aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt62
-rw-r--r--Documentation/admin-guide/thunderbolt.rst2
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/cgroup-v2.txt7
-rw-r--r--Documentation/core-api/genericirq.rst16
-rw-r--r--Documentation/devicetree/bindings/arm/ccn.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/crossbar.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/axi-clkgen.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos4-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5410-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5420-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5440-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/zx296702-clk.txt4
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt4
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt2
-rw-r--r--Documentation/devicetree/bindings/display/atmel,lcdc.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/zxdma.txt2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at25.txt13
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-altera.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-jz4780.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/hp03.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/bu21013.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/altera-mailbox.txt6
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt2
-rw-r--r--Documentation/devicetree/bindings/media/exynos5-gsc.txt2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-vcodec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt2
-rw-r--r--Documentation/devicetree/bindings/media/samsung-fimc.txt2
-rw-r--r--Documentation/devicetree/bindings/media/sh_mobile_ceu.txt2
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt10
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti/emif.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/brcm,kona-smc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nor.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/net/altera_tse.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mdio.txt2
-rw-r--r--Documentation/devicetree/bindings/net/socfpga-dwmac.txt2
-rw-r--r--Documentation/devicetree/bindings/nios2/nios2.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/efm32-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/adi,axi-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/ak4613.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/ak4642.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/da7218.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/da7219.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98371.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max9867.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-spdif.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt8
-rw-r--r--Documentation/devicetree/bindings/spi/efm32-spi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt18
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt12
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-qcom.txt4
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/am33xx-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-st.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/ohci-st.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt2
-rw-r--r--Documentation/driver-api/dmaengine/client.rst2
-rw-r--r--Documentation/driver-api/pci.rst3
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/filesystems/overlayfs.txt34
-rw-r--r--Documentation/gpu/i915.rst5
-rw-r--r--Documentation/kbuild/kconfig-language.txt23
-rw-r--r--Documentation/locking/crossrelease.txt874
-rw-r--r--Documentation/media/dvb-drivers/frontends.rst30
-rw-r--r--Documentation/media/dvb-drivers/index.rst1
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/msg_zerocopy.rst4
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt6
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt61
-rw-r--r--Documentation/vm/zswap.txt22
-rw-r--r--Documentation/x86/pti.txt186
-rw-r--r--Documentation/x86/x86_64/mm.txt37
-rw-r--r--MAINTAINERS98
-rw-r--r--Makefile48
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild2
-rw-r--r--arch/alpha/kernel/sys_sio.c35
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/arc/boot/dts/axc003.dtsi8
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi8
-rw-r--r--arch/arc/boot/dts/hsdk.dts8
-rw-r--r--arch/arc/configs/hsdk_defconfig5
-rw-r--r--arch/arc/include/asm/uaccess.h5
-rw-r--r--arch/arc/include/uapi/asm/Kbuild1
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/traps.c14
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/plat-axs10x/axs10x.c18
-rw-r--r--arch/arc/plat-hsdk/platform.c42
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi6
-rw-r--r--arch/arm/boot/dts/am437x-cm-t43.dts4
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts1
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-385-synology-ds116.dts2
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-tse850-3.dts1
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts4
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts4
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts4
-rw-r--r--arch/arm/boot/dts/da850-lego-ev3.dts4
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts4
-rw-r--r--arch/arm/boot/dts/imx53.dtsi9
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a7.dts10
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts3
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi17
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts2
-rw-r--r--arch/arm/boot/dts/meson.dtsi18
-rw-r--r--arch/arm/boot/dts/nspire.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts1
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts1
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-tao3530.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts1
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3066a-marsboard.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/boot/dts/tango4-common.dtsi1
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-c.dts6
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S4
-rw-r--r--arch/arm/mach-davinci/dm365.c29
-rw-r--r--arch/arm/mach-meson/platsmp.c2
-rw-r--r--arch/arm/mach-omap2/cm_common.c6
-rw-r--r--arch/arm/mach-omap2/omap-secure.c21
-rw-r--r--arch/arm/mach-omap2/omap-secure.h4
-rw-r--r--arch/arm/mach-omap2/omap_device.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/pm.h4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c13
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm33xx.c12
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S26
-rw-r--r--arch/arm/net/bpf_jit_32.c225
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi13
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi9
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi11
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts3
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi4
-rw-r--r--arch/arm64/include/asm/assembler.h10
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/efi.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h46
-rw-r--r--arch/arm64/include/asm/perf_event.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h41
-rw-r--r--arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/arm64/kernel/cpu-reset.S1
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/fpsimd.c53
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/process.c9
-rw-r--r--arch/arm64/kernel/relocate_kernel.S1
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c61
-rw-r--r--arch/arm64/kvm/hyp-init.S1
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c3
-rw-r--r--arch/arm64/kvm/hyp/switch.c37
-rw-r--r--arch/arm64/mm/dump.c2
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/blackfin/include/uapi/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/cris/include/uapi/asm/Kbuild1
-rw-r--r--arch/frv/include/uapi/asm/Kbuild2
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/atomic.h37
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild1
-rw-r--r--arch/m32r/kernel/traps.c1
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/metag/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/Kconfig.debug14
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/ath25/devices.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/serial.h22
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/mips-cm.c1
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/ptrace.c147
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/ralink/timer.c4
-rw-r--r--arch/mips/rb532/Makefile4
-rw-r--r--arch/mips/rb532/devices.c4
-rw-r--r--arch/mn10300/include/uapi/asm/Kbuild1
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/parisc/boot/compressed/misc.c4
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h5
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/entry.S25
-rw-r--r--arch/parisc/kernel/hpmc.S1
-rw-r--r--arch/parisc/kernel/pacache.S9
-rw-r--r--arch/parisc/kernel/process.c39
-rw-r--r--arch/parisc/kernel/unwind.c1
-rw-r--r--arch/parisc/lib/delay.c2
-rw-r--r--arch/parisc/mm/init.c10
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h57
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h13
-rw-r--r--arch/powerpc/include/asm/hvcall.h18
-rw-r--r--arch/powerpc/include/asm/machdep.h1
-rw-r--r--arch/powerpc/include/asm/mmu_context.h5
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h14
-rw-r--r--arch/powerpc/include/asm/setup.h14
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h25
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S44
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S137
-rw-r--r--arch/powerpc/kernel/fadump.c22
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c38
-rw-r--r--arch/powerpc/kernel/setup_64.c139
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c90
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/kvm/book3s_xive.c7
-rw-r--r--arch/powerpc/kvm/powerpc.c131
-rw-r--r--arch/powerpc/lib/feature-fixups.c41
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c12
-rw-r--r--arch/powerpc/perf/imc-pmu.c17
-rw-r--r--arch/powerpc/platforms/powernv/setup.c49
-rw-r--r--arch/powerpc/platforms/ps3/setup.c15
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c21
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c36
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/xmon/xmon.c36
-rw-r--r--arch/riscv/configs/defconfig75
-rw-r--r--arch/riscv/include/asm/barrier.h19
-rw-r--r--arch/riscv/include/asm/csr.h8
-rw-r--r--arch/riscv/include/asm/io.h4
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/ptrace.h2
-rw-r--r--arch/riscv/include/asm/tlbflush.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h12
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/asm/vdso-syscalls.h28
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/include/uapi/asm/syscalls.h26
-rw-r--r--arch/riscv/kernel/entry.S8
-rw-r--r--arch/riscv/kernel/process.c4
-rw-r--r--arch/riscv/kernel/setup.c11
-rw-r--r--arch/riscv/kernel/sys_riscv.c2
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S1
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/appldata/Makefile1
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr1
-rw-r--r--arch/s390/crypto/sha1_s390.c7
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/alternative.h1
-rw-r--r--arch/s390/include/asm/ap.h5
-rw-r--r--arch/s390/include/asm/bugs.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/perf_event.h1
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/asm/ptrace.h11
-rw-r--r--arch/s390/include/asm/segment.h1
-rw-r--r--arch/s390/include/asm/switch_to.h27
-rw-r--r--arch/s390/include/asm/vga.h1
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h125
-rw-r--r--arch/s390/include/uapi/asm/sthyi.h1
-rw-r--r--arch/s390/include/uapi/asm/virtio-ccw.h2
-rw-r--r--arch/s390/include/uapi/asm/vmcp.h1
-rw-r--r--arch/s390/kernel/alternative.c1
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/perf_regs.c1
-rw-r--r--arch/s390/kernel/syscalls.S6
-rw-r--r--arch/s390/kernel/vdso64/note.S1
-rw-r--r--arch/s390/kvm/Makefile5
-rw-r--r--arch/s390/kvm/diag.c5
-rw-r--r--arch/s390/kvm/gaccess.h5
-rw-r--r--arch/s390/kvm/guestdbg.c5
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/irq.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c50
-rw-r--r--arch/s390/kvm/kvm-s390.h5
-rw-r--r--arch/s390/kvm/priv.c18
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/s390/kvm/vsie.c15
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/net/Makefile1
-rw-r--r--arch/s390/net/bpf_jit_comp.c11
-rw-r--r--arch/s390/numa/Makefile1
-rw-r--r--arch/s390/pci/Makefile1
-rw-r--r--arch/s390/pci/pci_dma.c21
-rw-r--r--arch/s390/pci/pci_insn.c3
-rw-r--r--arch/s390/tools/gen_opcode_table.c1
-rw-r--r--arch/score/include/uapi/asm/Kbuild1
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c24
-rw-r--r--arch/sh/include/mach-se/mach/se.h1
-rw-r--r--arch/sh/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/crypto/Makefile2
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/lib/hweight.S4
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/mm/gup.c4
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c6
-rw-r--r--arch/tile/include/uapi/asm/Kbuild1
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mmu_context.h3
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/include/asm/mmu_context.h5
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/unicore32/kernel/traps.c1
-rw-r--r--arch/x86/Kconfig18
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/misc.c16
-rw-r--r--arch/x86/boot/compressed/pagetable.c3
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c28
-rw-r--r--arch/x86/boot/genimage.sh32
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S3
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S3
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S3
-rw-r--r--arch/x86/crypto/salsa20_glue.c7
-rw-r--r--arch/x86/entry/calling.h147
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S260
-rw-r--r--arch/x86/entry/entry_64_compat.S32
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c38
-rw-r--r--arch/x86/events/amd/power.c2
-rw-r--r--arch/x86/events/intel/bts.c18
-rw-r--r--arch/x86/events/intel/core.c5
-rw-r--r--arch/x86/events/intel/ds.c175
-rw-r--r--arch/x86/events/intel/rapl.c4
-rw-r--r--arch/x86/events/perf_event.h23
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm-prototypes.h24
-rw-r--r--arch/x86/include/asm/asm.h2
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h81
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h12
-rw-r--r--arch/x86/include/asm/desc.h14
-rw-r--r--arch/x86/include/asm/disabled-features.h8
-rw-r--r--arch/x86/include/asm/espfix.h7
-rw-r--r--arch/x86/include/asm/fixmap.h7
-rw-r--r--arch/x86/include/asm/hypervisor.h25
-rw-r--r--arch/x86/include/asm/intel_ds.h36
-rw-r--r--arch/x86/include/asm/invpcid.h53
-rw-r--r--arch/x86/include/asm/irqdomain.h2
-rw-r--r--arch/x86/include/asm/irqflags.h3
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/kmemcheck.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/include/asm/mem_encrypt.h4
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h113
-rw-r--r--arch/x86/include/asm/mshyperv.h18
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/nospec-branch.h222
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/pgalloc.h11
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h15
-rw-r--r--arch/x86/include/asm/pgtable_64.h92
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h59
-rw-r--r--arch/x86/include/asm/processor-flags.h5
-rw-r--r--arch/x86/include/asm/processor.h82
-rw-r--r--arch/x86/include/asm/pti.h14
-rw-r--r--arch/x86/include/asm/segment.h12
-rw-r--r--arch/x86/include/asm/stacktrace.h3
-rw-r--r--arch/x86/include/asm/suspend_32.h8
-rw-r--r--arch/x86/include/asm/suspend_64.h19
-rw-r--r--arch/x86/include/asm/switch_to.h13
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h347
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h16
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/unwind.h20
-rw-r--r--arch/x86/include/asm/vsyscall.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h5
-rw-r--r--arch/x86/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h7
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c51
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/apic_noop.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/msi.c8
-rw-r--r--arch/x86/kernel/apic/probe_32.c2
-rw-r--r--arch/x86/kernel/apic/vector.c31
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/asm-offsets.c10
-rw-r--r--arch/x86/kernel/asm-offsets_32.c9
-rw-r--r--arch/x86/kernel/asm-offsets_64.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c35
-rw-r--r--arch/x86/kernel/cpu/bugs.c221
-rw-r--r--arch/x86/kernel/cpu/common.c106
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c42
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/doublefault.c36
-rw-r--r--arch/x86/kernel/dumpstack.c98
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c12
-rw-r--r--arch/x86/kernel/ftrace_32.S6
-rw-r--r--arch/x86/kernel/ftrace_64.S34
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/head_64.S30
-rw-r--r--arch/x86/kernel/idt.c12
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/irq.c12
-rw-r--r--arch/x86/kernel/irq_32.c9
-rw-r--r--arch/x86/kernel/irq_64.c4
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kprobes/opt.c23
-rw-r--r--arch/x86/kernel/ldt.c198
-rw-r--r--arch/x86/kernel/machine_kexec_32.c4
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/process.c46
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c14
-rw-r--r--arch/x86/kernel/setup.c15
-rw-r--r--arch/x86/kernel/smpboot.c21
-rw-r--r--arch/x86/kernel/stacktrace.c8
-rw-r--r--arch/x86/kernel/tboot.c11
-rw-r--r--arch/x86/kernel/tls.c11
-rw-r--r--arch/x86/kernel/traps.c77
-rw-r--r--arch/x86/kernel/tsc.c9
-rw-r--r--arch/x86/kernel/unwind_orc.c136
-rw-r--r--arch/x86/kernel/vmlinux.lds.S23
-rw-r--r--arch/x86/kvm/emulate.c56
-rw-r--r--arch/x86/kvm/mmu.c27
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx.c43
-rw-r--r--arch/x86/kvm/x86.c99
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/checksum_32.S7
-rw-r--r--arch/x86/lib/delay.c4
-rw-r--r--arch/x86/lib/retpoline.S48
-rw-r--r--arch/x86/lib/x86-opcode-map.txt13
-rw-r--r--arch/x86/mm/Makefile9
-rw-r--r--arch/x86/mm/cpu_entry_area.c166
-rw-r--r--arch/x86/mm/debug_pagetables.c80
-rw-r--r--arch/x86/mm/dump_pagetables.c141
-rw-r--r--arch/x86/mm/extable.c6
-rw-r--r--arch/x86/mm/fault.c33
-rw-r--r--arch/x86/mm/init.c82
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/kasan_init_64.c47
-rw-r--r--arch/x86/mm/kaslr.c32
-rw-r--r--arch/x86/mm/kmemcheck/error.c1
-rw-r--r--arch/x86/mm/kmemcheck/error.h1
-rw-r--r--arch/x86/mm/kmemcheck/opcode.c1
-rw-r--r--arch/x86/mm/kmemcheck/opcode.h1
-rw-r--r--arch/x86/mm/kmemcheck/pte.c1
-rw-r--r--arch/x86/mm/kmemcheck/pte.h1
-rw-r--r--arch/x86/mm/kmemcheck/selftest.c1
-rw-r--r--arch/x86/mm/kmemcheck/selftest.h1
-rw-r--r--arch/x86/mm/kmemcheck/shadow.h1
-rw-r--r--arch/x86/mm/kmmio.c12
-rw-r--r--arch/x86/mm/mem_encrypt.c360
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S80
-rw-r--r--arch/x86/mm/pgtable.c5
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/mm/pti.c368
-rw-r--r--arch/x86/mm/tlb.c98
-rw-r--r--arch/x86/pci/broadcom_bus.c2
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/fixup.c60
-rw-r--r--arch/x86/platform/efi/efi_64.c7
-rw-r--r--arch/x86/platform/efi/quirks.c13
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c4
-rw-r--r--arch/x86/power/cpu.c112
-rw-r--r--arch/x86/xen/apic.c2
-rw-r--r--arch/x86/xen/enlighten.c81
-rw-r--r--arch/x86/xen/enlighten_pv.c42
-rw-r--r--arch/x86/xen/mmu_pv.c22
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/xen-asm_64.S14
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild1
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-map.c38
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-throttle.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/bounce.c6
-rw-r--r--block/kyber-iosched.c37
-rw-r--r--crypto/af_alg.c23
-rw-r--r--crypto/algapi.c12
-rw-r--r--crypto/algif_aead.c20
-rw-r--r--crypto/algif_skcipher.c18
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c9
-rw-r--r--crypto/asymmetric_keys/public_key.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c8
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/hmac.c6
-rw-r--r--crypto/mcryptd.c23
-rw-r--r--crypto/pcrypt.c19
-rw-r--r--crypto/rsa_helper.c2
-rw-r--r--crypto/salsa20_generic.c7
-rw-r--r--crypto/shash.c5
-rw-r--r--crypto/skcipher.c10
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/cppc_acpi.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/nfit/core.c9
-rw-r--r--drivers/android/binder.c84
-rw-r--r--drivers/ata/ahci_mtk.c6
-rw-r--r--drivers/ata/ahci_qoriq.c12
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/base/Kconfig28
-rw-r--r--drivers/base/cacheinfo.c13
-rw-r--r--drivers/base/cpu.c48
-rw-r--r--drivers/base/isa.c10
-rw-r--r--drivers/base/power/main.c15
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/null_blk.c4
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/bus/arm-cci.c7
-rw-r--r--drivers/bus/arm-ccn.c25
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c7
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c12
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c11
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c85
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c89
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/firmware/arm_scpi.c216
-rw-r--r--drivers/firmware/efi/capsule-loader.c45
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/efi/esrt.c17
-rw-r--r--drivers/firmware/efi/runtime-map.c10
-rw-r--r--drivers/firmware/google/vpd.c48
-rw-r--r--drivers/firmware/qemu_fw_cfg.c3
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-bcm-kona.c3
-rw-r--r--drivers/gpio/gpio-brcmstb.c4
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-mmio.c30
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-reg.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-xgene-sb.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-devprop.c17
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpio/gpiolib.c62
-rw-r--r--drivers/gpio/gpiolib.h3
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile21
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile21
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile22
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c65
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c52
-rw-r--r--drivers/gpu/drm/drm_lease.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_plane.c42
-rw-r--r--drivers/gpu/drm/drm_syncobj.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c22
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c373
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c13
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c20
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c101
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cp2112.c15
-rw-r--r--drivers/hid/hid-holtekff.c8
-rw-r--r--drivers/hv/channel.c10
-rw-r--r--drivers/hv/channel_mgmt.c7
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/hwmon.c21
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c13
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c52
-rw-r--r--drivers/iio/health/max30102.c2
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/proximity/sx9500.c9
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c3
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c23
-rw-r--r--drivers/input/mouse/alps.h10
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/trackpoint.c245
-rw-r--r--drivers/input/mouse/trackpoint.h34
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c16
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/stmfts.c15
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/leds/led-core.c1
-rw-r--r--drivers/md/dm-bufio.c8
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-mpath.c67
-rw-r--r--drivers/md/dm-snap.c48
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5-cache.c22
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/common/siano/smscoreapi.c66
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c68
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c13
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/dvb-frontends/af9013.h24
-rw-r--r--drivers/media/dvb-frontends/ascot2e.h9
-rw-r--r--drivers/media/dvb-frontends/cxd2820r.h24
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h878
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c248
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.h220
-rw-r--r--drivers/media/dvb-frontends/drxk.h13
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c32
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h13
-rw-r--r--drivers/media/dvb-frontends/helene.h30
-rw-r--r--drivers/media/dvb-frontends/horus3a.h9
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c6
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h28
-rw-r--r--drivers/media/dvb-frontends/l64781.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h155
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h17
-rw-r--r--drivers/media/dvb-frontends/mn88472.h16
-rw-r--r--drivers/media/dvb-frontends/rtl2830.h1
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h1
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.h6
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb6000.h11
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.h1
-rw-r--r--drivers/media/dvb-frontends/tda826x.h11
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/tua6100.h2
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.h13
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10036.h16
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig1
-rw-r--r--drivers/media/i2c/imx274.c5
-rw-r--r--drivers/media/i2c/lm3560.c1
-rw-r--r--drivers/media/i2c/m5mols/m5mols_capture.c5
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c1
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c20
-rw-r--r--drivers/media/i2c/ov5647.c4
-rw-r--r--drivers/media/i2c/s5k6a3.c3
-rw-r--r--drivers/media/i2c/s5k6aa.c5
-rw-r--r--drivers/media/i2c/tvp514x.c12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c8
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-enc.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c11
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c33
-rw-r--r--drivers/media/platform/davinci/vpif.c3
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c27
-rw-r--r--drivers/media/platform/davinci/vpif_display.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c3
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c11
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c2
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c3
-rw-r--r--drivers/media/platform/pxa_camera.c9
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c21
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c18
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c1
-rw-r--r--drivers/media/radio/radio-si476x.c18
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c2
-rw-r--r--drivers/media/rc/imon.c40
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c4
-rw-r--r--drivers/media/rc/ir-nec-decoder.c3
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c2
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c2
-rw-r--r--drivers/media/rc/rc-ir-raw.c2
-rw-r--r--drivers/media/rc/rc-main.c78
-rw-r--r--drivers/media/rc/sir_ir.c40
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c6
-rw-r--r--drivers/media/tuners/mt2063.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c8
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c16
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/friio.c2
-rw-r--r--drivers/media/usb/gspca/ov519.c2
-rw-r--r--drivers/media/usb/pwc/pwc-dec23.c7
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c6
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c1
-rw-r--r--drivers/media/v4l2-core/tuner-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c56
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/mfd/arizona-irq.c4
-rw-r--r--drivers/mfd/cros_ec_spi.c53
-rw-r--r--drivers/mfd/rtsx_pcr.c3
-rw-r--r--drivers/mfd/twl4030-audio.c9
-rw-r--r--drivers/mfd/twl6040.c12
-rw-r--r--drivers/misc/eeprom/at24.c26
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c3
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c14
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/gpio.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c6
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c5
-rw-r--r--drivers/net/can/sja1000/peak_pci.c5
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c13
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c21
-rw-r--r--drivers/net/can/usb/usb_8dev.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c164
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c57
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c11
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c110
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c55
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c76
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c1
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio-xgene.c21
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c74
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/phy/sfp.c41
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/tun.c39
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c17
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nvdimm/btt.c201
-rw-r--r--drivers/nvdimm/btt.h47
-rw-r--r--drivers/nvdimm/pfn_devs.c20
-rw-r--r--drivers/nvme/host/core.c18
-rw-r--r--drivers/nvme/host/fabrics.c1
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c66
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/nvmem/meson-mx-efuse.c4
-rw-r--r--drivers/of/dynamic.c4
-rw-r--r--drivers/of/of_mdio.c12
-rw-r--r--drivers/of/overlay.c84
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/lba_pci.c33
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/pci-driver.c9
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/tegra/xusb.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c13
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c6
-rw-r--r--drivers/platform/x86/asus-wireless.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c17
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/Makefile1
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/char/Kconfig1
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/defkeymap.map1
-rw-r--r--drivers/s390/cio/blacklist.h1
-rw-r--r--drivers/s390/cio/qdio_main.c7
-rw-r--r--drivers/s390/net/Kconfig1
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c46
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c43
-rw-r--r--drivers/s390/net/qeth_l3_sys.c75
-rw-r--r--drivers/s390/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c43
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/aacraid/rx.c15
-rw-r--r--drivers/scsi/aacraid/src.c20
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/scsi_debugfs.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c33
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c12
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c4
-rw-r--r--drivers/spi/spi-armada-3700.c8
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-xilinx.c11
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c15
-rw-r--r--drivers/staging/ccree/ssi_hash.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c23
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c8
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h98
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h78
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h144
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h216
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h62
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h120
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h112
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h90
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h148
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h82
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h98
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h104
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c6
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c14
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c26
-rw-r--r--drivers/tty/serial/8250/8250_early.c14
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/common/ulpi.c4
-rw-r--r--drivers/usb/core/config.c32
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/composite.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c15
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/core.c36
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-debugfs.c16
-rw-r--r--drivers/usb/host/xhci-mem.c22
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c18
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c20
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/storage/uas-detect.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/typec/Kconfig54
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/stub_rx.c47
-rw-r--r--drivers/usb/usbip/stub_tx.c13
-rw-r--r--drivers/usb/usbip/usbip_common.c33
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/usb/usbip/vhci_rx.c23
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/usb/usbip/vhci_tx.c3
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/vhost/net.c20
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/virtio/virtio_balloon.c3
-rw-r--r--drivers/virtio/virtio_mmio.c22
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/pvcalls-front.c6
-rw-r--r--fs/afs/dir.c37
-rw-r--r--fs/afs/inode.c4
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/autofs4/waitq.c1
-rw-r--r--fs/btrfs/ctree.c18
-rw-r--r--fs/btrfs/delayed-inode.c71
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/ceph/mds_client.c42
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.c30
-rw-r--r--fs/cramfs/Kconfig1
-rw-r--r--fs/dax.c3
-rw-r--r--fs/exec.c23
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c9
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/dnode.c2
-rw-r--r--fs/hpfs/super.c1
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/nfs/client.c11
-rw-r--r--fs/nfs/nfs4client.c17
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/orangefs/devorangefs-req.c3
-rw-r--r--fs/orangefs/file.c7
-rw-r--r--fs/orangefs/orangefs-kernel.h11
-rw-r--r--fs/orangefs/waitqueue.c4
-rw-r--r--fs/overlayfs/Kconfig10
-rw-r--r--fs/overlayfs/dir.c3
-rw-r--r--fs/overlayfs/namei.c18
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/readdir.c7
-rw-r--r--fs/overlayfs/super.c87
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/super.c43
-rw-r--r--fs/userfaultfd.c20
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr.c20
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c9
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_defer.c39
-rw-r--r--fs/xfs/libxfs/xfs_defer.h5
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c10
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c52
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c99
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h16
-rw-r--r--fs/xfs/scrub/scrub.c1
-rw-r--r--fs/xfs/scrub/trace.c1
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_extfree_item.c2
-rw-r--r--fs/xfs/xfs_fsops.c5
-rw-r--r--fs/xfs/xfs_icache.c35
-rw-r--r--fs/xfs/xfs_icache.h1
-rw-r--r--fs/xfs/xfs_inode.c61
-rw-r--r--fs/xfs/xfs_inode.h3
-rw-r--r--fs/xfs/xfs_iomap.c4
-rw-r--r--fs/xfs/xfs_qm.c50
-rw-r--r--fs/xfs/xfs_reflink.c23
-rw-r--r--fs/xfs/xfs_super.c9
-rw-r--r--fs/xfs/xfs_symlink.c15
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--include/asm-generic/mm_hooks.h5
-rw-r--r--include/asm-generic/pgtable.h5
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--include/crypto/internal/hash.h8
-rw-r--r--include/crypto/mcryptd.h1
-rw-r--r--include/drm/drm_connector.h10
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_mode_config.h18
-rw-r--r--include/kvm/arm_arch_timer.h5
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blk_types.h9
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler.h47
-rw-r--r--include/linux/completion.h46
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/delayacct.h8
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gpio/driver.h33
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h5
-rw-r--r--include/linux/intel-pti.h43
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h17
-rw-r--r--include/linux/irqdesc.h15
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--include/linux/kmemcheck.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lockdep.h127
-rw-r--r--include/linux/mfd/rtsx_pci.h2
-rw-r--r--include/linux/mlx5/driver.h22
-rw-r--r--include/linux/mlx5/mlx5_ifc.h13
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/oom.h9
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pti.h50
-rw-r--r--include/linux/ptr_ring.h23
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rculist_nulls.h38
-rw-r--r--include/linux/rwlock_types.h3
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/serdev.h2
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/spinlock_types.h3
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/swapops.h21
-rw-r--r--include/linux/sysfs.h6
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/timer.h4
-rw-r--r--include/linux/trace.h2
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/dst.h8
-rw-r--r--include/net/gue.h18
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/pkt_cls.h7
-rw-r--r--include/net/red.h13
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h11
-rw-r--r--include/net/tc_act/tc_sample.h1
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/net/tls.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/trace/events/clk.h4
-rw-r--r--include/trace/events/kvm.h7
-rw-r--r--include/trace/events/preemptirq.h11
-rw-r--r--include/trace/events/tcp.h97
-rw-r--r--include/trace/events/xdp.h1
-rw-r--r--include/uapi/asm-generic/bpf_perf_event.h9
-rw-r--r--include/uapi/linux/bpf_perf_event.h5
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/kvm.h8
-rw-r--r--include/uapi/linux/libc-compat.h61
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/usb/ch9.h3
-rw-r--r--include/xen/balloon.h5
-rw-r--r--init/Kconfig14
-rw-r--r--init/main.c16
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/bpf/arraymap.c61
-rw-r--r--kernel/bpf/core.c26
-rw-r--r--kernel/bpf/hashtab.c2
-rw-r--r--kernel/bpf/inode.c40
-rw-r--r--kernel/bpf/offload.c15
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c388
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c21
-rw-r--r--kernel/cgroup/debug.c4
-rw-r--r--kernel/cgroup/stat.c8
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/delayacct.c42
-rw-r--r--kernel/events/core.c54
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/futex.c96
-rw-r--r--kernel/groups.c5
-rw-r--r--kernel/irq/debug.h5
-rw-r--r--kernel/irq/debugfs.c1
-rw-r--r--kernel/irq/generic-chip.c11
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdomain.c13
-rw-r--r--kernel/irq/matrix.c24
-rw-r--r--kernel/irq/msi.c64
-rw-r--r--kernel/jump_label.c12
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/locking/lockdep.c653
-rw-r--r--kernel/locking/rtmutex.c26
-rw-r--r--kernel/locking/rtmutex_common.h1
-rw-r--r--kernel/locking/spinlock.c13
-rw-r--r--kernel/pid.c8
-rw-r--r--kernel/printk/printk.c3
-rw-r--r--kernel/sched/completion.c5
-rw-r--r--kernel/sched/core.c28
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/fair.c106
-rw-r--r--kernel/sched/membarrier.c2
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/hrtimer.c3
-rw-r--r--kernel/time/posix-timers.c29
-rw-r--r--kernel/time/tick-sched.c32
-rw-r--r--kernel/time/timer.c37
-rw-r--r--kernel/trace/Kconfig3
-rw-r--r--kernel/trace/bpf_trace.c27
-rw-r--r--kernel/trace/ftrace.c29
-rw-r--r--kernel/trace/ring_buffer.c79
-rw-r--r--kernel/trace/trace.c88
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_trigger.c13
-rw-r--r--kernel/trace/trace_functions.c49
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/uid16.c1
-rw-r--r--kernel/workqueue.c46
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/asn1_decoder.c49
-rw-r--r--lib/kobject_uevent.c16
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/nlattr.c22
-rw-r--r--lib/oid_registry.c16
-rw-r--r--lib/rbtree.c10
-rw-r--r--lib/test_bpf.c54
-rw-r--r--lib/timerqueue.c8
-rw-r--r--mm/backing-dev.c5
-rw-r--r--mm/debug.c28
-rw-r--r--mm/early_ioremap.c2
-rw-r--r--mm/frame_vector.c6
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c8
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/kmemcheck.c1
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/memory.c21
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/page_owner.c1
-rw-r--r--mm/page_vma_mapped.c66
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/slab.c23
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/vmscan.c3
-rw-r--r--mm/zsmalloc.c1
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/tp_meter.c4
-rw-r--r--net/bluetooth/l2cap_core.c20
-rw-r--r--net/bridge/br_netlink.c11
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c10
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/can/af_can.c36
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/ethtool.c15
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netprio_cgroup.c1
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/dccp/ccids/ccid2.c3
-rw-r--r--net/dccp/minisocks.c6
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/esp4.c1
-rw-r--r--net/ipv4/esp4_offload.c6
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/igmp.c44
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_tunnel.c7
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/raw.c17
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_bbr.c12
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_ipv4.c61
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/tcp_rate.c10
-rw-r--r--net/ipv4/tcp_recovery.c28
-rw-r--r--net/ipv4/tcp_timer.c17
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv4/xfrm4_input.c12
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/esp6_offload.c6
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/ip6_fib.c83
-rw-r--r--net/ipv6/ip6_gre.c72
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c32
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c25
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c8
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/xfrm6_input.c10
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/kcm/kcmsock.c93
-rw-r--r--net/key/af_key.c12
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c128
-rw-r--r--net/netfilter/nf_conntrack_netlink.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c3
-rw-r--r--net/netfilter/nf_tables_api.c15
-rw-r--r--net/netfilter/nfnetlink_cthelper.c10
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c5
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netfilter/xt_bpf.c20
-rw-r--r--net/netfilter/xt_osf.c7
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/openvswitch/flow.c15
-rw-r--r--net/openvswitch/flow_netlink.c51
-rw-r--r--net/rds/rdma.c6
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/rxrpc/call_event.c4
-rw-r--r--net/rxrpc/conn_event.c50
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/input.c4
-rw-r--r--net/rxrpc/sendmsg.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_meta_mark.c1
-rw-r--r--net/sched/act_meta_skbtcindex.c1
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_sample.c14
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_bpf.c100
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_api.c17
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_generic.c25
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_ingress.c32
-rw-r--r--net/sched/sch_red.c33
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sctp/chunk.c11
-rw-r--r--net/sctp/debug.c3
-rw-r--r--net/sctp/input.c28
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/offload.c3
-rw-r--r--net/sctp/outqueue.c23
-rw-r--r--net/sctp/socket.c79
-rw-r--r--net/sctp/stream.c22
-rw-r--r--net/sctp/transport.c29
-rw-r--r--net/sctp/ulpqueue.c24
-rw-r--r--net/socket.c123
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/svcauth_unix.c2
-rw-r--r--net/sunrpc/xprt.c28
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/tipc/bearer.c5
-rw-r--r--net/tipc/group.c69
-rw-r--r--net/tipc/monitor.c6
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/server.c3
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/tipc/udp_media.c4
-rw-r--r--net/tls/tls_main.c17
-rw-r--r--net/tls/tls_sw.c18
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/vmw_vsock/hyperv_transport.c2
-rw-r--r--net/wireless/Makefile39
-rw-r--r--net/wireless/certs/sforshee.hex86
-rw-r--r--net/wireless/certs/sforshee.x509bin680 -> 0 bytes
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c20
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_input.c69
-rw-r--r--net/xfrm/xfrm_policy.c24
-rw-r--r--net/xfrm/xfrm_state.c24
-rw-r--r--net/xfrm/xfrm_user.c44
-rw-r--r--samples/bpf/bpf_load.c14
-rw-r--r--scripts/Makefile.build14
-rwxr-xr-xscripts/checkpatch.pl22
-rwxr-xr-xscripts/decodecode8
-rwxr-xr-xscripts/faddr2line8
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/genksyms/.gitignore1
-rw-r--r--scripts/kconfig/expr.c5
-rwxr-xr-xscripts/kernel-doc2
-rw-r--r--security/Kconfig11
-rw-r--r--security/apparmor/domain.c9
-rw-r--r--security/apparmor/include/perms.h3
-rw-r--r--security/apparmor/ipc.c53
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--security/commoncap.c21
-rw-r--r--security/keys/key.c1
-rw-r--r--security/keys/keyctl.c24
-rw-r--r--security/keys/request_key.c48
-rw-r--r--sound/core/oss/pcm_oss.c41
-rw-r--r--sound/core/oss/pcm_plugin.c14
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_lib.c5
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/core/rawmidi.c15
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_clientmgr.h1
-rw-r--r--sound/core/seq/seq_timer.c2
-rw-r--r--sound/drivers/aloop.c98
-rw-r--r--sound/hda/hdac_i915.c2
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_conexant.c29
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c58
-rw-r--r--sound/soc/amd/acp-pcm-dma.c7
-rw-r--r--sound/soc/atmel/Kconfig2
-rw-r--r--sound/soc/codecs/da7218.c2
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c2
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c4
-rw-r--r--sound/soc/codecs/nau8825.c1
-rw-r--r--sound/soc/codecs/rt5514-spi.c15
-rw-r--r--sound/soc/codecs/rt5514.c2
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/rt5663.h4
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h2
-rw-r--r--sound/soc/codecs/twl4030.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c12
-rw-r--r--sound/soc/fsl/fsl_asrc.h4
-rw-r--r--sound/soc/fsl/fsl_ssi.c44
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c15
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c18
-rw-r--r--sound/soc/sh/rcar/adg.c6
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/sh/rcar/dma.c86
-rw-r--r--sound/soc/sh/rcar/ssi.c16
-rw-r--r--sound/soc/sh/rcar/ssiu.c5
-rw-r--r--sound/usb/mixer.c26
-rw-r--r--sound/usb/quirks.c7
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm_perf.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/perf_regs.h44
-rw-r--r--tools/arch/s390/include/uapi/asm/ptrace.h457
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h538
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile2
-rw-r--r--tools/bpf/bpftool/Makefile7
-rw-r--r--tools/bpf/bpftool/main.c36
-rw-r--r--tools/bpf/bpftool/main.h5
-rw-r--r--tools/bpf/bpftool/map.c8
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/hv/hv_kvp_daemon.c70
-rw-r--r--tools/include/linux/compiler.h21
-rw-r--r--tools/include/linux/kmemcheck.h1
-rw-r--r--tools/include/linux/lockdep.h1
-rw-r--r--tools/include/uapi/asm-generic/bpf_perf_event.h9
-rw-r--r--tools/include/uapi/asm-generic/mman.h1
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h7
-rw-r--r--tools/include/uapi/drm/drm.h41
-rw-r--r--tools/include/uapi/drm/i915_drm.h33
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h6
-rw-r--r--tools/include/uapi/linux/kcmp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h1
-rw-r--r--tools/include/uapi/linux/prctl.h10
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat74
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt4
-rw-r--r--tools/objtool/Makefile10
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt15
-rw-r--r--tools/objtool/builtin-orc.c4
-rw-r--r--tools/objtool/check.c69
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/elf.c4
-rw-r--r--tools/objtool/orc_dump.c7
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/perf/Makefile.config9
-rw-r--r--tools/perf/arch/s390/Makefile1
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h2
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c32
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-help.c4
-rw-r--r--tools/perf/builtin-record.c42
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-script.c31
-rw-r--r--tools/perf/builtin-top.c36
-rw-r--r--tools/perf/builtin-trace.c6
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/jvmti/jvmti_agent.c16
-rw-r--r--tools/perf/jvmti/jvmti_agent.h7
-rw-r--r--tools/perf/jvmti/libjvmti.c147
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh6
-rw-r--r--tools/perf/tests/task-exit.c4
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/util/annotate.c18
-rw-r--r--tools/perf/util/evlist.c14
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c14
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h10
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt15
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/mmap.h2
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/pmu.c5
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/test_align.c22
-rw-r--r--tools/testing/selftests/bpf/test_progs.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c819
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c7
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c12
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c500
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c24
-rw-r--r--tools/usb/usbip/src/utils.c9
-rw-r--r--tools/virtio/ringtest/ptr_ring.c29
-rw-r--r--tools/vm/slabinfo-gnuplot.sh2
-rw-r--r--virt/kvm/arm/arch_timer.c43
-rw-r--r--virt/kvm/arm/arm.c9
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c4
-rw-r--r--virt/kvm/arm/mmio.c6
-rw-r--r--virt/kvm/arm/mmu.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
-rw-r--r--virt/kvm/kvm_main.c8
2113 files changed, 25692 insertions, 13477 deletions
diff --git a/.mailmap b/.mailmap
index 1469ff0d3f4d..e18cab73e209 100644
--- a/.mailmap
+++ b/.mailmap
@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
107Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> 107Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
108Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> 108Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
109Mark Brown <broonie@sirena.org.uk> 109Mark Brown <broonie@sirena.org.uk>
110Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
110Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> 111Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
111Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> 112Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
112Matthieu CASTET <castet.matthieu@free.fr> 113Matthieu CASTET <castet.matthieu@free.fr>
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index d6d862db3b5d..bfd29bc8d37a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -375,3 +375,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
375Description: information about CPUs heterogeneity. 375Description: information about CPUs heterogeneity.
376 376
377 cpu_capacity: capacity of cpu#. 377 cpu_capacity: capacity of cpu#.
378
379What: /sys/devices/system/cpu/vulnerabilities
380 /sys/devices/system/cpu/vulnerabilities/meltdown
381 /sys/devices/system/cpu/vulnerabilities/spectre_v1
382 /sys/devices/system/cpu/vulnerabilities/spectre_v2
383Date: January 2018
384Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
385Description: Information about CPU vulnerabilities
386
387 The files are named after the code names of CPU
388 vulnerabilities. The output of those files reflects the
389 state of the CPUs in the system. Possible output values:
390
391 "Not affected" CPU is not affected by the vulnerability
392 "Vulnerable" CPU is affected and no mitigation in effect
393 "Mitigation: $M" CPU is affected and mitigation $M is in effect
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index b2598cc9834c..7242cbda15dd 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -109,6 +109,7 @@ parameter is applicable::
109 IPV6 IPv6 support is enabled. 109 IPV6 IPv6 support is enabled.
110 ISAPNP ISA PnP code is enabled. 110 ISAPNP ISA PnP code is enabled.
111 ISDN Appropriate ISDN support is enabled. 111 ISDN Appropriate ISDN support is enabled.
112 ISOL CPU Isolation is enabled.
112 JOY Appropriate joystick support is enabled. 113 JOY Appropriate joystick support is enabled.
113 KGDB Kernel debugger support is enabled. 114 KGDB Kernel debugger support is enabled.
114 KVM Kernel Virtual Machine support is enabled. 115 KVM Kernel Virtual Machine support is enabled.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6571fbfdb2a1..46b26bfee27b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -328,11 +328,15 @@
328 not play well with APC CPU idle - disable it if you have 328 not play well with APC CPU idle - disable it if you have
329 APC and your system crashes randomly. 329 APC and your system crashes randomly.
330 330
331 apic= [APIC,X86-32] Advanced Programmable Interrupt Controller 331 apic= [APIC,X86] Advanced Programmable Interrupt Controller
332 Change the output verbosity whilst booting 332 Change the output verbosity whilst booting
333 Format: { quiet (default) | verbose | debug } 333 Format: { quiet (default) | verbose | debug }
334 Change the amount of debugging information output 334 Change the amount of debugging information output
335 when initialising the APIC and IO-APIC components. 335 when initialising the APIC and IO-APIC components.
336 For X86-32, this can also be used to specify an APIC
337 driver name.
338 Format: apic=driver_name
339 Examples: apic=bigsmp
336 340
337 apic_extnmi= [APIC,X86] External NMI delivery setting 341 apic_extnmi= [APIC,X86] External NMI delivery setting
338 Format: { bsp (default) | all | none } 342 Format: { bsp (default) | all | none }
@@ -709,9 +713,6 @@
709 It will be ignored when crashkernel=X,high is not used 713 It will be ignored when crashkernel=X,high is not used
710 or memory reserved is below 4G. 714 or memory reserved is below 4G.
711 715
712 crossrelease_fullstack
713 [KNL] Allow to record full stack trace in cross-release
714
715 cryptomgr.notests 716 cryptomgr.notests
716 [KNL] Disable crypto self-tests 717 [KNL] Disable crypto self-tests
717 718
@@ -1737,7 +1738,7 @@
1737 isapnp= [ISAPNP] 1738 isapnp= [ISAPNP]
1738 Format: <RDP>,<reset>,<pci_scan>,<verbosity> 1739 Format: <RDP>,<reset>,<pci_scan>,<verbosity>
1739 1740
1740 isolcpus= [KNL,SMP] Isolate a given set of CPUs from disturbance. 1741 isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance.
1741 [Deprecated - use cpusets instead] 1742 [Deprecated - use cpusets instead]
1742 Format: [flag-list,]<cpu-list> 1743 Format: [flag-list,]<cpu-list>
1743 1744
@@ -2622,6 +2623,11 @@
2622 nosmt [KNL,S390] Disable symmetric multithreading (SMT). 2623 nosmt [KNL,S390] Disable symmetric multithreading (SMT).
2623 Equivalent to smt=1. 2624 Equivalent to smt=1.
2624 2625
2626 nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
2627 (indirect branch prediction) vulnerability. System may
2628 allow data leaks with this option, which is equivalent
2629 to spectre_v2=off.
2630
2625 noxsave [BUGS=X86] Disables x86 extended register state save 2631 noxsave [BUGS=X86] Disables x86 extended register state save
2626 and restore using xsave. The kernel will fallback to 2632 and restore using xsave. The kernel will fallback to
2627 enabling legacy floating-point and sse state. 2633 enabling legacy floating-point and sse state.
@@ -2662,7 +2668,7 @@
2662 Valid arguments: on, off 2668 Valid arguments: on, off
2663 Default: on 2669 Default: on
2664 2670
2665 nohz_full= [KNL,BOOT] 2671 nohz_full= [KNL,BOOT,SMP,ISOL]
2666 The argument is a cpu list, as described above. 2672 The argument is a cpu list, as described above.
2667 In kernels built with CONFIG_NO_HZ_FULL=y, set 2673 In kernels built with CONFIG_NO_HZ_FULL=y, set
2668 the specified list of CPUs whose tick will be stopped 2674 the specified list of CPUs whose tick will be stopped
@@ -3094,6 +3100,12 @@
3094 pcie_scan_all Scan all possible PCIe devices. Otherwise we 3100 pcie_scan_all Scan all possible PCIe devices. Otherwise we
3095 only look for one device below a PCIe downstream 3101 only look for one device below a PCIe downstream
3096 port. 3102 port.
3103 big_root_window Try to add a big 64bit memory window to the PCIe
3104 root complex on AMD CPUs. Some GFX hardware
3105 can resize a BAR to allow access to all VRAM.
3106 Adding the window is slightly risky (it may
3107 conflict with unreported devices), so this
3108 taints the kernel.
3097 3109
3098 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power 3110 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
3099 Management. 3111 Management.
@@ -3282,6 +3294,21 @@
3282 pt. [PARIDE] 3294 pt. [PARIDE]
3283 See Documentation/blockdev/paride.txt. 3295 See Documentation/blockdev/paride.txt.
3284 3296
3297 pti= [X86_64] Control Page Table Isolation of user and
3298 kernel address spaces. Disabling this feature
3299 removes hardening, but improves performance of
3300 system calls and interrupts.
3301
3302 on - unconditionally enable
3303 off - unconditionally disable
3304 auto - kernel detects whether your CPU model is
3305 vulnerable to issues that PTI mitigates
3306
3307 Not specifying this option is equivalent to pti=auto.
3308
3309 nopti [X86_64]
3310 Equivalent to pti=off
3311
3285 pty.legacy_count= 3312 pty.legacy_count=
3286 [KNL] Number of legacy pty's. Overwrites compiled-in 3313 [KNL] Number of legacy pty's. Overwrites compiled-in
3287 default number. 3314 default number.
@@ -3931,6 +3958,29 @@
3931 sonypi.*= [HW] Sony Programmable I/O Control Device driver 3958 sonypi.*= [HW] Sony Programmable I/O Control Device driver
3932 See Documentation/laptops/sonypi.txt 3959 See Documentation/laptops/sonypi.txt
3933 3960
3961 spectre_v2= [X86] Control mitigation of Spectre variant 2
3962 (indirect branch speculation) vulnerability.
3963
3964 on - unconditionally enable
3965 off - unconditionally disable
3966 auto - kernel detects whether your CPU model is
3967 vulnerable
3968
3969 Selecting 'on' will, and 'auto' may, choose a
3970 mitigation method at run time according to the
3971 CPU, the available microcode, the setting of the
3972 CONFIG_RETPOLINE configuration option, and the
3973 compiler with which the kernel was built.
3974
3975 Specific mitigations can also be selected manually:
3976
3977 retpoline - replace indirect branches
3978 retpoline,generic - google's original retpoline
3979 retpoline,amd - AMD-specific minimal thunk
3980
3981 Not specifying this option is equivalent to
3982 spectre_v2=auto.
3983
3934 spia_io_base= [HW,MTD] 3984 spia_io_base= [HW,MTD]
3935 spia_fio_base= 3985 spia_fio_base=
3936 spia_pedr= 3986 spia_pedr=
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index de50a8561774..9b55952039a6 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -230,7 +230,7 @@ If supported by your machine this will be exposed by the WMI bus with
230a sysfs attribute called "force_power". 230a sysfs attribute called "force_power".
231 231
232For example the intel-wmi-thunderbolt driver exposes this attribute in: 232For example the intel-wmi-thunderbolt driver exposes this attribute in:
233 /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power 233 /sys/bus/wmi/devices/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
234 234
235 To force the power to on, write 1 to this attribute file. 235 To force the power to on, write 1 to this attribute file.
236 To disable force power, write 0 to this attribute file. 236 To disable force power, write 0 to this attribute file.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 304bf22bb83c..fc1c884fea10 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -75,3 +75,4 @@ stable kernels.
75| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | 75| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
76| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | 76| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
77| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | 77| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
78| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 779211fbb69f..2cddab7efb20 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -898,6 +898,13 @@ controller implements weight and absolute bandwidth limit models for
898normal scheduling policy and absolute bandwidth allocation model for 898normal scheduling policy and absolute bandwidth allocation model for
899realtime scheduling policy. 899realtime scheduling policy.
900 900
901WARNING: cgroup2 doesn't yet support control of realtime processes and
902the cpu controller can only be enabled when all RT processes are in
903the root cgroup. Be aware that system management software may already
904have placed RT processes into nonroot cgroups during the system boot
905process, and these processes may need to be moved to the root cgroup
906before the cpu controller can be enabled.
907
901 908
902CPU Interface Files 909CPU Interface Files
903~~~~~~~~~~~~~~~~~~~ 910~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/core-api/genericirq.rst b/Documentation/core-api/genericirq.rst
index 0054bd48be84..4da67b65cecf 100644
--- a/Documentation/core-api/genericirq.rst
+++ b/Documentation/core-api/genericirq.rst
@@ -225,9 +225,9 @@ interrupts.
225 225
226The following control flow is implemented (simplified excerpt):: 226The following control flow is implemented (simplified excerpt)::
227 227
228 :c:func:`desc->irq_data.chip->irq_mask_ack`; 228 desc->irq_data.chip->irq_mask_ack();
229 handle_irq_event(desc->action); 229 handle_irq_event(desc->action);
230 :c:func:`desc->irq_data.chip->irq_unmask`; 230 desc->irq_data.chip->irq_unmask();
231 231
232 232
233Default Fast EOI IRQ flow handler 233Default Fast EOI IRQ flow handler
@@ -239,7 +239,7 @@ which only need an EOI at the end of the handler.
239The following control flow is implemented (simplified excerpt):: 239The following control flow is implemented (simplified excerpt)::
240 240
241 handle_irq_event(desc->action); 241 handle_irq_event(desc->action);
242 :c:func:`desc->irq_data.chip->irq_eoi`; 242 desc->irq_data.chip->irq_eoi();
243 243
244 244
245Default Edge IRQ flow handler 245Default Edge IRQ flow handler
@@ -251,15 +251,15 @@ interrupts.
251The following control flow is implemented (simplified excerpt):: 251The following control flow is implemented (simplified excerpt)::
252 252
253 if (desc->status & running) { 253 if (desc->status & running) {
254 :c:func:`desc->irq_data.chip->irq_mask_ack`; 254 desc->irq_data.chip->irq_mask_ack();
255 desc->status |= pending | masked; 255 desc->status |= pending | masked;
256 return; 256 return;
257 } 257 }
258 :c:func:`desc->irq_data.chip->irq_ack`; 258 desc->irq_data.chip->irq_ack();
259 desc->status |= running; 259 desc->status |= running;
260 do { 260 do {
261 if (desc->status & masked) 261 if (desc->status & masked)
262 :c:func:`desc->irq_data.chip->irq_unmask`; 262 desc->irq_data.chip->irq_unmask();
263 desc->status &= ~pending; 263 desc->status &= ~pending;
264 handle_irq_event(desc->action); 264 handle_irq_event(desc->action);
265 } while (status & pending); 265 } while (status & pending);
@@ -293,10 +293,10 @@ simplified version without locking.
293The following control flow is implemented (simplified excerpt):: 293The following control flow is implemented (simplified excerpt)::
294 294
295 if (desc->irq_data.chip->irq_ack) 295 if (desc->irq_data.chip->irq_ack)
296 :c:func:`desc->irq_data.chip->irq_ack`; 296 desc->irq_data.chip->irq_ack();
297 handle_irq_event(desc->action); 297 handle_irq_event(desc->action);
298 if (desc->irq_data.chip->irq_eoi) 298 if (desc->irq_data.chip->irq_eoi)
299 :c:func:`desc->irq_data.chip->irq_eoi`; 299 desc->irq_data.chip->irq_eoi();
300 300
301 301
302EOI Edge IRQ flow handler 302EOI Edge IRQ flow handler
diff --git a/Documentation/devicetree/bindings/arm/ccn.txt b/Documentation/devicetree/bindings/arm/ccn.txt
index 29801456c9ee..43b5a71a5a9d 100644
--- a/Documentation/devicetree/bindings/arm/ccn.txt
+++ b/Documentation/devicetree/bindings/arm/ccn.txt
@@ -15,7 +15,7 @@ Required properties:
15 15
16Example: 16Example:
17 17
18 ccn@0x2000000000 { 18 ccn@2000000000 {
19 compatible = "arm,ccn-504"; 19 compatible = "arm,ccn-504";
20 reg = <0x20 0x00000000 0 0x1000000>; 20 reg = <0x20 0x00000000 0 0x1000000>;
21 interrupts = <0 181 4>; 21 interrupts = <0 181 4>;
diff --git a/Documentation/devicetree/bindings/arm/omap/crossbar.txt b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
index bb5727ae004a..ecb360ed0e33 100644
--- a/Documentation/devicetree/bindings/arm/omap/crossbar.txt
+++ b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
@@ -49,7 +49,7 @@ An interrupt consumer on an SoC using crossbar will use:
49 interrupts = <GIC_SPI request_number interrupt_level> 49 interrupts = <GIC_SPI request_number interrupt_level>
50 50
51Example: 51Example:
52 device_x@0x4a023000 { 52 device_x@4a023000 {
53 /* Crossbar 8 used */ 53 /* Crossbar 8 used */
54 interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; 54 interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
55 ... 55 ...
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
index 866d93421eba..f9632bacbd04 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
@@ -8,7 +8,7 @@ Required properties:
8- interrupts : Should contain MC General interrupt. 8- interrupts : Should contain MC General interrupt.
9 9
10Example: 10Example:
11 memory-controller@0x7000f000 { 11 memory-controller@7000f000 {
12 compatible = "nvidia,tegra20-mc"; 12 compatible = "nvidia,tegra20-mc";
13 reg = <0x7000f000 0x024 13 reg = <0x7000f000 0x024
14 0x7000f03c 0x3c4>; 14 0x7000f03c 0x3c4>;
diff --git a/Documentation/devicetree/bindings/clock/axi-clkgen.txt b/Documentation/devicetree/bindings/clock/axi-clkgen.txt
index fb40da303d25..aca94fe9416f 100644
--- a/Documentation/devicetree/bindings/clock/axi-clkgen.txt
+++ b/Documentation/devicetree/bindings/clock/axi-clkgen.txt
@@ -17,7 +17,7 @@ Optional properties:
17- clock-output-names : From common clock binding. 17- clock-output-names : From common clock binding.
18 18
19Example: 19Example:
20 clock@0xff000000 { 20 clock@ff000000 {
21 compatible = "adi,axi-clkgen"; 21 compatible = "adi,axi-clkgen";
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 reg = <0xff000000 0x1000>; 23 reg = <0xff000000 0x1000>;
diff --git a/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt b/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
index 7a837d2182ac..4acfc8f641b6 100644
--- a/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
+++ b/Documentation/devicetree/bindings/clock/brcm,bcm2835-aux-clock.txt
@@ -23,7 +23,7 @@ Example:
23 clocks = <&clk_osc>; 23 clocks = <&clk_osc>;
24 }; 24 };
25 25
26 aux: aux@0x7e215004 { 26 aux: aux@7e215004 {
27 compatible = "brcm,bcm2835-aux"; 27 compatible = "brcm,bcm2835-aux";
28 #clock-cells = <1>; 28 #clock-cells = <1>;
29 reg = <0x7e215000 0x8>; 29 reg = <0x7e215000 0x8>;
diff --git a/Documentation/devicetree/bindings/clock/exynos4-clock.txt b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
index bc61c952cb0b..17bb11365354 100644
--- a/Documentation/devicetree/bindings/clock/exynos4-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos4-clock.txt
@@ -24,7 +24,7 @@ tree sources.
24 24
25Example 1: An example of a clock controller node is listed below. 25Example 1: An example of a clock controller node is listed below.
26 26
27 clock: clock-controller@0x10030000 { 27 clock: clock-controller@10030000 {
28 compatible = "samsung,exynos4210-clock"; 28 compatible = "samsung,exynos4210-clock";
29 reg = <0x10030000 0x20000>; 29 reg = <0x10030000 0x20000>;
30 #clock-cells = <1>; 30 #clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 536eacd1063f..aff266a12eeb 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -22,7 +22,7 @@ tree sources.
22 22
23Example 1: An example of a clock controller node is listed below. 23Example 1: An example of a clock controller node is listed below.
24 24
25 clock: clock-controller@0x10010000 { 25 clock: clock-controller@10010000 {
26 compatible = "samsung,exynos5250-clock"; 26 compatible = "samsung,exynos5250-clock";
27 reg = <0x10010000 0x30000>; 27 reg = <0x10010000 0x30000>;
28 #clock-cells = <1>; 28 #clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt b/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
index 4527de3ea205..c68b0d29b3d0 100644
--- a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
@@ -30,7 +30,7 @@ Example 1: An example of a clock controller node is listed below.
30 #clock-cells = <0>; 30 #clock-cells = <0>;
31 }; 31 };
32 32
33 clock: clock-controller@0x10010000 { 33 clock: clock-controller@10010000 {
34 compatible = "samsung,exynos5410-clock"; 34 compatible = "samsung,exynos5410-clock";
35 reg = <0x10010000 0x30000>; 35 reg = <0x10010000 0x30000>;
36 #clock-cells = <1>; 36 #clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
index d54f42cf0440..717a7b1531c7 100644
--- a/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5420-clock.txt
@@ -23,7 +23,7 @@ tree sources.
23 23
24Example 1: An example of a clock controller node is listed below. 24Example 1: An example of a clock controller node is listed below.
25 25
26 clock: clock-controller@0x10010000 { 26 clock: clock-controller@10010000 {
27 compatible = "samsung,exynos5420-clock"; 27 compatible = "samsung,exynos5420-clock";
28 reg = <0x10010000 0x30000>; 28 reg = <0x10010000 0x30000>;
29 #clock-cells = <1>; 29 #clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
index 5f7005f73058..c7d227c31e95 100644
--- a/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5440-clock.txt
@@ -21,7 +21,7 @@ tree sources.
21 21
22Example: An example of a clock controller node is listed below. 22Example: An example of a clock controller node is listed below.
23 23
24 clock: clock-controller@0x10010000 { 24 clock: clock-controller@10010000 {
25 compatible = "samsung,exynos5440-clock"; 25 compatible = "samsung,exynos5440-clock";
26 reg = <0x160000 0x10000>; 26 reg = <0x160000 0x10000>;
27 #clock-cells = <1>; 27 #clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt b/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
index 3e6a81e99804..c35cb6c4af4d 100644
--- a/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
+++ b/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt
@@ -14,7 +14,7 @@ Required properties:
14 14
15Example: 15Example:
16 16
17pllctrl: pll-controller@0x02310000 { 17pllctrl: pll-controller@02310000 {
18 compatible = "ti,keystone-pllctrl", "syscon"; 18 compatible = "ti,keystone-pllctrl", "syscon";
19 reg = <0x02310000 0x200>; 19 reg = <0x02310000 0x200>;
20}; 20};
diff --git a/Documentation/devicetree/bindings/clock/zx296702-clk.txt b/Documentation/devicetree/bindings/clock/zx296702-clk.txt
index e85ecb510d56..5c91c9e4f1be 100644
--- a/Documentation/devicetree/bindings/clock/zx296702-clk.txt
+++ b/Documentation/devicetree/bindings/clock/zx296702-clk.txt
@@ -20,13 +20,13 @@ ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296702-clock.h
20for the full list of zx296702 clock IDs. 20for the full list of zx296702 clock IDs.
21 21
22 22
23topclk: topcrm@0x09800000 { 23topclk: topcrm@09800000 {
24 compatible = "zte,zx296702-topcrm-clk"; 24 compatible = "zte,zx296702-topcrm-clk";
25 reg = <0x09800000 0x1000>; 25 reg = <0x09800000 0x1000>;
26 #clock-cells = <1>; 26 #clock-cells = <1>;
27}; 27};
28 28
29uart0: serial@0x09405000 { 29uart0: serial@09405000 {
30 compatible = "zte,zx296702-uart"; 30 compatible = "zte,zx296702-uart";
31 reg = <0x09405000 0x1000>; 31 reg = <0x09405000 0x1000>;
32 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; 32 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 7aef0eae58d4..76aec8a3724d 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -456,7 +456,7 @@ System ON/OFF key driver
456 Definition: this is phandle to the register map node. 456 Definition: this is phandle to the register map node.
457 457
458EXAMPLE: 458EXAMPLE:
459 snvs-pwrkey@0x020cc000 { 459 snvs-pwrkey@020cc000 {
460 compatible = "fsl,sec-v4.0-pwrkey"; 460 compatible = "fsl,sec-v4.0-pwrkey";
461 regmap = <&snvs>; 461 regmap = <&snvs>;
462 interrupts = <0 4 0x4> 462 interrupts = <0 4 0x4>
@@ -545,7 +545,7 @@ FULL EXAMPLE
545 interrupts = <93 2>; 545 interrupts = <93 2>;
546 }; 546 };
547 547
548 snvs-pwrkey@0x020cc000 { 548 snvs-pwrkey@020cc000 {
549 compatible = "fsl,sec-v4.0-pwrkey"; 549 compatible = "fsl,sec-v4.0-pwrkey";
550 regmap = <&sec_mon>; 550 regmap = <&sec_mon>;
551 interrupts = <0 4 0x4>; 551 interrupts = <0 4 0x4>;
diff --git a/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt
index 001dd63979a9..148191b0fc15 100644
--- a/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt
+++ b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt
@@ -9,7 +9,7 @@ Required properties:
9- clock-names : the name of clock used by the DFI, must be "pclk_ddr_mon"; 9- clock-names : the name of clock used by the DFI, must be "pclk_ddr_mon";
10 10
11Example: 11Example:
12 dfi: dfi@0xff630000 { 12 dfi: dfi@ff630000 {
13 compatible = "rockchip,rk3399-dfi"; 13 compatible = "rockchip,rk3399-dfi";
14 reg = <0x00 0xff630000 0x00 0x4000>; 14 reg = <0x00 0xff630000 0x00 0x4000>;
15 rockchip,pmu = <&pmugrf>; 15 rockchip,pmu = <&pmugrf>;
diff --git a/Documentation/devicetree/bindings/display/atmel,lcdc.txt b/Documentation/devicetree/bindings/display/atmel,lcdc.txt
index 1a21202778ee..acb5a0132127 100644
--- a/Documentation/devicetree/bindings/display/atmel,lcdc.txt
+++ b/Documentation/devicetree/bindings/display/atmel,lcdc.txt
@@ -27,7 +27,7 @@ Optional properties:
27 27
28Example: 28Example:
29 29
30 fb0: fb@0x00500000 { 30 fb0: fb@00500000 {
31 compatible = "atmel,at91sam9g45-lcdc"; 31 compatible = "atmel,at91sam9g45-lcdc";
32 reg = <0x00500000 0x1000>; 32 reg = <0x00500000 0x1000>;
33 interrupts = <23 3 0>; 33 interrupts = <23 3 0>;
@@ -41,7 +41,7 @@ Example:
41 41
42Example for fixed framebuffer memory: 42Example for fixed framebuffer memory:
43 43
44 fb0: fb@0x00500000 { 44 fb0: fb@00500000 {
45 compatible = "atmel,at91sam9263-lcdc"; 45 compatible = "atmel,at91sam9263-lcdc";
46 reg = <0x00700000 0x1000 0x70000000 0x200000>; 46 reg = <0x00700000 0x1000 0x70000000 0x200000>;
47 [...] 47 [...]
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index 55492c264d17..b3408cc57be6 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -73,7 +73,7 @@ Hypervisor OS configuration:
73 max-read-transactions = <31>; 73 max-read-transactions = <31>;
74 channel-reset-timeout-cycles = <0x500>; 74 channel-reset-timeout-cycles = <0x500>;
75 75
76 hidma_24: dma-controller@0x5c050000 { 76 hidma_24: dma-controller@5c050000 {
77 compatible = "qcom,hidma-1.0"; 77 compatible = "qcom,hidma-1.0";
78 reg = <0 0x5c050000 0x0 0x1000>, 78 reg = <0 0x5c050000 0x0 0x1000>,
79 <0 0x5c0b0000 0x0 0x1000>; 79 <0 0x5c0b0000 0x0 0x1000>;
@@ -85,7 +85,7 @@ Hypervisor OS configuration:
85 85
86Guest OS configuration: 86Guest OS configuration:
87 87
88 hidma_24: dma-controller@0x5c050000 { 88 hidma_24: dma-controller@5c050000 {
89 compatible = "qcom,hidma-1.0"; 89 compatible = "qcom,hidma-1.0";
90 reg = <0 0x5c050000 0x0 0x1000>, 90 reg = <0 0x5c050000 0x0 0x1000>,
91 <0 0x5c0b0000 0x0 0x1000>; 91 <0 0x5c0b0000 0x0 0x1000>;
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
index abec59f35fde..0ab80f69e566 100644
--- a/Documentation/devicetree/bindings/dma/zxdma.txt
+++ b/Documentation/devicetree/bindings/dma/zxdma.txt
@@ -13,7 +13,7 @@ Required properties:
13Example: 13Example:
14 14
15Controller: 15Controller:
16 dma: dma-controller@0x09c00000{ 16 dma: dma-controller@09c00000{
17 compatible = "zte,zx296702-dma"; 17 compatible = "zte,zx296702-dma";
18 reg = <0x09c00000 0x1000>; 18 reg = <0x09c00000 0x1000>;
19 clocks = <&topclk ZX296702_DMA_ACLK>; 19 clocks = <&topclk ZX296702_DMA_ACLK>;
diff --git a/Documentation/devicetree/bindings/eeprom/at25.txt b/Documentation/devicetree/bindings/eeprom/at25.txt
index 1d3447165c37..e823d90b802f 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.txt
+++ b/Documentation/devicetree/bindings/eeprom/at25.txt
@@ -1,7 +1,12 @@
1EEPROMs (SPI) compatible with Atmel at25. 1EEPROMs (SPI) compatible with Atmel at25.
2 2
3Required properties: 3Required properties:
4- compatible : "atmel,at25". 4- compatible : Should be "<vendor>,<type>", and generic value "atmel,at25".
5 Example "<vendor>,<type>" values:
6 "microchip,25lc040"
7 "st,m95m02"
8 "st,m95256"
9
5- reg : chip select number 10- reg : chip select number
6- spi-max-frequency : max spi frequency to use 11- spi-max-frequency : max spi frequency to use
7- pagesize : size of the eeprom page 12- pagesize : size of the eeprom page
@@ -13,7 +18,7 @@ Optional properties:
13- spi-cpol : SPI inverse clock polarity, as per spi-bus bindings. 18- spi-cpol : SPI inverse clock polarity, as per spi-bus bindings.
14- read-only : this parameter-less property disables writes to the eeprom 19- read-only : this parameter-less property disables writes to the eeprom
15 20
16Obsolete legacy properties are can be used in place of "size", "pagesize", 21Obsolete legacy properties can be used in place of "size", "pagesize",
17"address-width", and "read-only": 22"address-width", and "read-only":
18- at25,byte-len : total eeprom size in bytes 23- at25,byte-len : total eeprom size in bytes
19- at25,addr-mode : addr-mode flags, as defined in include/linux/spi/eeprom.h 24- at25,addr-mode : addr-mode flags, as defined in include/linux/spi/eeprom.h
@@ -22,8 +27,8 @@ Obsolete legacy properties are can be used in place of "size", "pagesize",
22Additional compatible properties are also allowed. 27Additional compatible properties are also allowed.
23 28
24Example: 29Example:
25 at25@0 { 30 eeprom@0 {
26 compatible = "atmel,at25", "st,m95256"; 31 compatible = "st,m95256", "atmel,at25";
27 reg = <0> 32 reg = <0>
28 spi-max-frequency = <5000000>; 33 spi-max-frequency = <5000000>;
29 spi-cpha; 34 spi-cpha;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
index 826a7208ca93..146e554b3c67 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
@@ -30,7 +30,7 @@ Optional properties:
30 30
31Example: 31Example:
32 32
33gpio_altr: gpio@0xff200000 { 33gpio_altr: gpio@ff200000 {
34 compatible = "altr,pio-1.0"; 34 compatible = "altr,pio-1.0";
35 reg = <0xff200000 0x10>; 35 reg = <0xff200000 0x10>;
36 interrupts = <0 45 4>; 36 interrupts = <0 45 4>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index 7f57271df2bc..0d0158728f89 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -27,7 +27,7 @@ Required properties:
27 ti,tca6424 27 ti,tca6424
28 ti,tca9539 28 ti,tca9539
29 ti,tca9554 29 ti,tca9554
30 onsemi,pca9654 30 onnn,pca9654
31 exar,xra1202 31 exar,xra1202
32 32
33Optional properties: 33Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
index 231e4cc4008c..d4a082acf92f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
@@ -18,7 +18,7 @@ Optional properties:
18Example 18Example
19 19
20/ { 20/ {
21 i2c4: i2c4@0x10054000 { 21 i2c4: i2c4@10054000 {
22 compatible = "ingenic,jz4780-i2c"; 22 compatible = "ingenic,jz4780-i2c";
23 reg = <0x10054000 0x1000>; 23 reg = <0x10054000 0x1000>;
24 24
diff --git a/Documentation/devicetree/bindings/iio/pressure/hp03.txt b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
index 54e7e70bcea5..831dbee7a5c3 100644
--- a/Documentation/devicetree/bindings/iio/pressure/hp03.txt
+++ b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
@@ -10,7 +10,7 @@ Required properties:
10 10
11Example: 11Example:
12 12
13hp03@0x77 { 13hp03@77 {
14 compatible = "hoperf,hp03"; 14 compatible = "hoperf,hp03";
15 reg = <0x77>; 15 reg = <0x77>;
16 xclr-gpio = <&portc 0 0x0>; 16 xclr-gpio = <&portc 0 0x0>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt b/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
index ca5a2c86480c..56d835242af2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
@@ -15,7 +15,7 @@ Optional properties:
15Example: 15Example:
16 16
17 i2c@80110000 { 17 i2c@80110000 {
18 bu21013_tp@0x5c { 18 bu21013_tp@5c {
19 compatible = "rohm,bu21013_tp"; 19 compatible = "rohm,bu21013_tp";
20 reg = <0x5c>; 20 reg = <0x5c>;
21 touch-gpio = <&gpio2 20 0x4>; 21 touch-gpio = <&gpio2 20 0x4>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
index 560d8a727b8f..2f3244648646 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
@@ -155,7 +155,7 @@ Example:
155 <0x0 0xe112f000 0 0x02000>, 155 <0x0 0xe112f000 0 0x02000>,
156 <0x0 0xe1140000 0 0x10000>, 156 <0x0 0xe1140000 0 0x10000>,
157 <0x0 0xe1160000 0 0x10000>; 157 <0x0 0xe1160000 0 0x10000>;
158 v2m0: v2m@0x8000 { 158 v2m0: v2m@8000 {
159 compatible = "arm,gic-v2m-frame"; 159 compatible = "arm,gic-v2m-frame";
160 msi-controller; 160 msi-controller;
161 reg = <0x0 0x80000 0 0x1000>; 161 reg = <0x0 0x80000 0 0x1000>;
@@ -163,7 +163,7 @@ Example:
163 163
164 .... 164 ....
165 165
166 v2mN: v2m@0x9000 { 166 v2mN: v2m@9000 {
167 compatible = "arm,gic-v2m-frame"; 167 compatible = "arm,gic-v2m-frame";
168 msi-controller; 168 msi-controller;
169 reg = <0x0 0x90000 0 0x1000>; 169 reg = <0x0 0x90000 0 0x1000>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt
index 80994adab392..42431f44697f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/img,meta-intc.txt
@@ -71,7 +71,7 @@ Example 2:
71 * An interrupt generating device that is wired to a Meta external 71 * An interrupt generating device that is wired to a Meta external
72 * trigger block. 72 * trigger block.
73 */ 73 */
74 uart1: uart@0x02004c00 { 74 uart1: uart@02004c00 {
75 // Interrupt source '5' that is level-sensitive. 75 // Interrupt source '5' that is level-sensitive.
76 // Note that there are only two cells as specified in the 76 // Note that there are only two cells as specified in the
77 // interrupt parent's '#interrupt-cells' property. 77 // interrupt parent's '#interrupt-cells' property.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
index a69118550344..5dc2a55ad811 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/img,pdc-intc.txt
@@ -51,7 +51,7 @@ Example 1:
51 /* 51 /*
52 * TZ1090 PDC block 52 * TZ1090 PDC block
53 */ 53 */
54 pdc: pdc@0x02006000 { 54 pdc: pdc@02006000 {
55 // This is an interrupt controller node. 55 // This is an interrupt controller node.
56 interrupt-controller; 56 interrupt-controller;
57 57
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
index 715a013ed4bd..2ab0ea39867b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
@@ -39,7 +39,7 @@ Example:
39 39
40The following is an example from the SPEAr320 SoC dtsi file. 40The following is an example from the SPEAr320 SoC dtsi file.
41 41
42shirq: interrupt-controller@0xb3000000 { 42shirq: interrupt-controller@b3000000 {
43 compatible = "st,spear320-shirq"; 43 compatible = "st,spear320-shirq";
44 reg = <0xb3000000 0x1000>; 44 reg = <0xb3000000 0x1000>;
45 interrupts = <28 29 30 1>; 45 interrupts = <28 29 30 1>;
diff --git a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
index c2619797ce0c..49cfc8c337c4 100644
--- a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
@@ -14,7 +14,7 @@ Optional properties:
14 depends on the interrupt controller parent. 14 depends on the interrupt controller parent.
15 15
16Example: 16Example:
17 mbox_tx: mailbox@0x100 { 17 mbox_tx: mailbox@100 {
18 compatible = "altr,mailbox-1.0"; 18 compatible = "altr,mailbox-1.0";
19 reg = <0x100 0x8>; 19 reg = <0x100 0x8>;
20 interrupt-parent = < &gic_0 >; 20 interrupt-parent = < &gic_0 >;
@@ -22,7 +22,7 @@ Example:
22 #mbox-cells = <1>; 22 #mbox-cells = <1>;
23 }; 23 };
24 24
25 mbox_rx: mailbox@0x200 { 25 mbox_rx: mailbox@200 {
26 compatible = "altr,mailbox-1.0"; 26 compatible = "altr,mailbox-1.0";
27 reg = <0x200 0x8>; 27 reg = <0x200 0x8>;
28 interrupt-parent = < &gic_0 >; 28 interrupt-parent = < &gic_0 >;
@@ -40,7 +40,7 @@ support only one channel).The equivalent "mbox-names" property value can be
40used to give a name to the communication channel to be used by the client user. 40used to give a name to the communication channel to be used by the client user.
41 41
42Example: 42Example:
43 mclient0: mclient0@0x400 { 43 mclient0: mclient0@400 {
44 compatible = "client-1.0"; 44 compatible = "client-1.0";
45 reg = <0x400 0x10>; 45 reg = <0x400 0x10>;
46 mbox-names = "mbox-tx", "mbox-rx"; 46 mbox-names = "mbox-tx", "mbox-rx";
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
index 0f3ee81d92c2..9bcdf2087625 100644
--- a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
@@ -15,7 +15,7 @@ Optional properties:
15- brcm,use-bcm-hdr: present if a BCM header precedes each frame. 15- brcm,use-bcm-hdr: present if a BCM header precedes each frame.
16 16
17Example: 17Example:
18 pdc0: iproc-pdc0@0x612c0000 { 18 pdc0: iproc-pdc0@612c0000 {
19 compatible = "brcm,iproc-pdc-mbox"; 19 compatible = "brcm,iproc-pdc-mbox";
20 reg = <0 0x612c0000 0 0x445>; /* PDC FS0 regs */ 20 reg = <0 0x612c0000 0 0x445>; /* PDC FS0 regs */
21 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 21 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index 0d4fdaedc6f1..bc963a6d305a 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -17,7 +17,7 @@ Optional properties:
17 17
18Example: 18Example:
19 19
20gsc_0: gsc@0x13e00000 { 20gsc_0: gsc@13e00000 {
21 compatible = "samsung,exynos5250-gsc"; 21 compatible = "samsung,exynos5250-gsc";
22 reg = <0x13e00000 0x1000>; 22 reg = <0x13e00000 0x1000>;
23 interrupts = <0 85 0>; 23 interrupts = <0 85 0>;
diff --git a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
index 46c15c54175d..2a615d84a682 100644
--- a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
@@ -68,7 +68,7 @@ vcodec_dec: vcodec@16000000 {
68 "vdec_bus_clk_src"; 68 "vdec_bus_clk_src";
69 }; 69 };
70 70
71 vcodec_enc: vcodec@0x18002000 { 71 vcodec_enc: vcodec@18002000 {
72 compatible = "mediatek,mt8173-vcodec-enc"; 72 compatible = "mediatek,mt8173-vcodec-enc";
73 reg = <0 0x18002000 0 0x1000>, /*VENC_SYS*/ 73 reg = <0 0x18002000 0 0x1000>, /*VENC_SYS*/
74 <0 0x19002000 0 0x1000>; /*VENC_LT_SYS*/ 74 <0 0x19002000 0 0x1000>; /*VENC_LT_SYS*/
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index 6e4ef8caf759..19357d0bbe65 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -44,7 +44,7 @@ Device node example
44 vin0 = &vin0; 44 vin0 = &vin0;
45 }; 45 };
46 46
47 vin0: vin@0xe6ef0000 { 47 vin0: vin@e6ef0000 {
48 compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin"; 48 compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin";
49 clocks = <&mstp8_clks R8A7790_CLK_VIN0>; 49 clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
50 reg = <0 0xe6ef0000 0 0x1000>; 50 reg = <0 0xe6ef0000 0 0x1000>;
diff --git a/Documentation/devicetree/bindings/media/samsung-fimc.txt b/Documentation/devicetree/bindings/media/samsung-fimc.txt
index e4e15d8d7521..48c599dacbdf 100644
--- a/Documentation/devicetree/bindings/media/samsung-fimc.txt
+++ b/Documentation/devicetree/bindings/media/samsung-fimc.txt
@@ -138,7 +138,7 @@ Example:
138 }; 138 };
139 139
140 /* MIPI CSI-2 bus IF sensor */ 140 /* MIPI CSI-2 bus IF sensor */
141 s5c73m3: sensor@0x1a { 141 s5c73m3: sensor@1a {
142 compatible = "samsung,s5c73m3"; 142 compatible = "samsung,s5c73m3";
143 reg = <0x1a>; 143 reg = <0x1a>;
144 vddio-supply = <...>; 144 vddio-supply = <...>;
diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
index 1ce4e46bcbb7..17a8e81ca0cc 100644
--- a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
+++ b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
@@ -8,7 +8,7 @@ Bindings, specific for the sh_mobile_ceu_camera.c driver:
8 8
9Example: 9Example:
10 10
11ceu0: ceu@0xfe910000 { 11ceu0: ceu@fe910000 {
12 compatible = "renesas,sh-mobile-ceu"; 12 compatible = "renesas,sh-mobile-ceu";
13 reg = <0xfe910000 0xa0>; 13 reg = <0xfe910000 0xa0>;
14 interrupt-parent = <&intcs>; 14 interrupt-parent = <&intcs>;
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 3994b0143dd1..258b8dfddf48 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -154,7 +154,7 @@ imx074 is linked to ceu0 through the MIPI CSI-2 receiver (csi2). ceu0 has a
154'port' node which may indicate that at any time only one of the following data 154'port' node which may indicate that at any time only one of the following data
155pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0. 155pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
156 156
157 ceu0: ceu@0xfe910000 { 157 ceu0: ceu@fe910000 {
158 compatible = "renesas,sh-mobile-ceu"; 158 compatible = "renesas,sh-mobile-ceu";
159 reg = <0xfe910000 0xa0>; 159 reg = <0xfe910000 0xa0>;
160 interrupts = <0x880>; 160 interrupts = <0x880>;
@@ -193,9 +193,9 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
193 }; 193 };
194 }; 194 };
195 195
196 i2c0: i2c@0xfff20000 { 196 i2c0: i2c@fff20000 {
197 ... 197 ...
198 ov772x_1: camera@0x21 { 198 ov772x_1: camera@21 {
199 compatible = "ovti,ov772x"; 199 compatible = "ovti,ov772x";
200 reg = <0x21>; 200 reg = <0x21>;
201 vddio-supply = <&regulator1>; 201 vddio-supply = <&regulator1>;
@@ -219,7 +219,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
219 }; 219 };
220 }; 220 };
221 221
222 imx074: camera@0x1a { 222 imx074: camera@1a {
223 compatible = "sony,imx074"; 223 compatible = "sony,imx074";
224 reg = <0x1a>; 224 reg = <0x1a>;
225 vddio-supply = <&regulator1>; 225 vddio-supply = <&regulator1>;
@@ -239,7 +239,7 @@ pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
239 }; 239 };
240 }; 240 };
241 241
242 csi2: csi2@0xffc90000 { 242 csi2: csi2@ffc90000 {
243 compatible = "renesas,sh-mobile-csi2"; 243 compatible = "renesas,sh-mobile-csi2";
244 reg = <0xffc90000 0x1000>; 244 reg = <0xffc90000 0x1000>;
245 interrupts = <0x17a0>; 245 interrupts = <0x17a0>;
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index fd823d6091b2..152eeccbde1c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -46,7 +46,7 @@ Optional properties:
46 46
47Example: 47Example:
48 48
49emif1: emif@0x4c000000 { 49emif1: emif@4c000000 {
50 compatible = "ti,emif-4d"; 50 compatible = "ti,emif-4d";
51 ti,hwmods = "emif2"; 51 ti,hwmods = "emif2";
52 phy-type = <1>; 52 phy-type = <1>;
diff --git a/Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt b/Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt
index 20963c76b4bc..71a1f5963936 100644
--- a/Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt
+++ b/Documentation/devicetree/bindings/mfd/ti-keystone-devctrl.txt
@@ -13,7 +13,7 @@ Required properties:
13 13
14Example: 14Example:
15 15
16devctrl: device-state-control@0x02620000 { 16devctrl: device-state-control@02620000 {
17 compatible = "ti,keystone-devctrl", "syscon"; 17 compatible = "ti,keystone-devctrl", "syscon";
18 reg = <0x02620000 0x1000>; 18 reg = <0x02620000 0x1000>;
19}; 19};
diff --git a/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt b/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
index 6c9f176f3571..05b47232ed9e 100644
--- a/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
+++ b/Documentation/devicetree/bindings/misc/brcm,kona-smc.txt
@@ -9,7 +9,7 @@ Required properties:
9- reg : Location and size of bounce buffer 9- reg : Location and size of bounce buffer
10 10
11Example: 11Example:
12 smc@0x3404c000 { 12 smc@3404c000 {
13 compatible = "brcm,bcm11351-smc", "brcm,kona-smc"; 13 compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
14 reg = <0x3404c000 0x400>; //1 KiB in SRAM 14 reg = <0x3404c000 0x400>; //1 KiB in SRAM
15 }; 15 };
diff --git a/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
index aaba2483b4ff..7f5dd83f5bd9 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,kona-sdhci.txt
@@ -12,7 +12,7 @@ Refer to clocks/clock-bindings.txt for generic clock consumer properties.
12 12
13Example: 13Example:
14 14
15sdio2: sdio@0x3f1a0000 { 15sdio2: sdio@3f1a0000 {
16 compatible = "brcm,kona-sdhci"; 16 compatible = "brcm,kona-sdhci";
17 reg = <0x3f1a0000 0x10000>; 17 reg = <0x3f1a0000 0x10000>;
18 clocks = <&sdio3_clk>; 18 clocks = <&sdio3_clk>;
diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt b/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
index 954561d09a8e..fa90d253dc7e 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
@@ -24,7 +24,7 @@ Optional properties:
24 24
25Example: 25Example:
26 26
27sdhci0: sdhci@0x18041000 { 27sdhci0: sdhci@18041000 {
28 compatible = "brcm,sdhci-iproc-cygnus"; 28 compatible = "brcm,sdhci-iproc-cygnus";
29 reg = <0x18041000 0x100>; 29 reg = <0x18041000 0x100>;
30 interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; 30 interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 3a4ac401e6f9..19f5508a7569 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -55,7 +55,7 @@ Examples:
55 55
56[hwmod populated DMA resources] 56[hwmod populated DMA resources]
57 57
58 mmc1: mmc@0x4809c000 { 58 mmc1: mmc@4809c000 {
59 compatible = "ti,omap4-hsmmc"; 59 compatible = "ti,omap4-hsmmc";
60 reg = <0x4809c000 0x400>; 60 reg = <0x4809c000 0x400>;
61 ti,hwmods = "mmc1"; 61 ti,hwmods = "mmc1";
@@ -67,7 +67,7 @@ Examples:
67 67
68[generic DMA request binding] 68[generic DMA request binding]
69 69
70 mmc1: mmc@0x4809c000 { 70 mmc1: mmc@4809c000 {
71 compatible = "ti,omap4-hsmmc"; 71 compatible = "ti,omap4-hsmmc";
72 reg = <0x4809c000 0x400>; 72 reg = <0x4809c000 0x400>;
73 ti,hwmods = "mmc1"; 73 ti,hwmods = "mmc1";
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nor.txt b/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
index 131d3a74d0bd..c8567b40fe13 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nor.txt
@@ -82,15 +82,15 @@ gpmc: gpmc@6e000000 {
82 label = "bootloader-nor"; 82 label = "bootloader-nor";
83 reg = <0 0x40000>; 83 reg = <0 0x40000>;
84 }; 84 };
85 partition@0x40000 { 85 partition@40000 {
86 label = "params-nor"; 86 label = "params-nor";
87 reg = <0x40000 0x40000>; 87 reg = <0x40000 0x40000>;
88 }; 88 };
89 partition@0x80000 { 89 partition@80000 {
90 label = "kernel-nor"; 90 label = "kernel-nor";
91 reg = <0x80000 0x200000>; 91 reg = <0x80000 0x200000>;
92 }; 92 };
93 partition@0x280000 { 93 partition@280000 {
94 label = "filesystem-nor"; 94 label = "filesystem-nor";
95 reg = <0x240000 0x7d80000>; 95 reg = <0x240000 0x7d80000>;
96 }; 96 };
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 376fa2f50e6b..956bb046e599 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -13,7 +13,6 @@ Required properties:
13 at25df321a 13 at25df321a
14 at25df641 14 at25df641
15 at26df081a 15 at26df081a
16 en25s64
17 mr25h128 16 mr25h128
18 mr25h256 17 mr25h256
19 mr25h10 18 mr25h10
@@ -33,7 +32,6 @@ Required properties:
33 s25fl008k 32 s25fl008k
34 s25fl064k 33 s25fl064k
35 sst25vf040b 34 sst25vf040b
36 sst25wf040b
37 m25p40 35 m25p40
38 m25p80 36 m25p80
39 m25p16 37 m25p16
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index dbf9e054c11c..0431841de781 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -131,7 +131,7 @@ Example:
131 read-only; 131 read-only;
132 reg = <0x00000000 0x00400000>; 132 reg = <0x00000000 0x00400000>;
133 }; 133 };
134 android@0x00400000 { 134 android@00400000 {
135 label = "android"; 135 label = "android";
136 reg = <0x00400000 0x12c00000>; 136 reg = <0x00400000 0x12c00000>;
137 }; 137 };
diff --git a/Documentation/devicetree/bindings/net/altera_tse.txt b/Documentation/devicetree/bindings/net/altera_tse.txt
index a706297998e9..0e21df94a53f 100644
--- a/Documentation/devicetree/bindings/net/altera_tse.txt
+++ b/Documentation/devicetree/bindings/net/altera_tse.txt
@@ -52,7 +52,7 @@ Optional properties:
52 52
53Example: 53Example:
54 54
55 tse_sub_0_eth_tse_0: ethernet@0x1,00000000 { 55 tse_sub_0_eth_tse_0: ethernet@1,00000000 {
56 compatible = "altr,tse-msgdma-1.0"; 56 compatible = "altr,tse-msgdma-1.0";
57 reg = <0x00000001 0x00000000 0x00000400>, 57 reg = <0x00000001 0x00000000 0x00000400>,
58 <0x00000001 0x00000460 0x00000020>, 58 <0x00000001 0x00000460 0x00000020>,
@@ -90,7 +90,7 @@ Example:
90 }; 90 };
91 }; 91 };
92 92
93 tse_sub_1_eth_tse_0: ethernet@0x1,00001000 { 93 tse_sub_1_eth_tse_0: ethernet@1,00001000 {
94 compatible = "altr,tse-msgdma-1.0"; 94 compatible = "altr,tse-msgdma-1.0";
95 reg = <0x00000001 0x00001000 0x00000400>, 95 reg = <0x00000001 0x00001000 0x00000400>,
96 <0x00000001 0x00001460 0x00000020>, 96 <0x00000001 0x00001460 0x00000020>,
diff --git a/Documentation/devicetree/bindings/net/mdio.txt b/Documentation/devicetree/bindings/net/mdio.txt
index 96a53f89aa6e..e3e1603f256c 100644
--- a/Documentation/devicetree/bindings/net/mdio.txt
+++ b/Documentation/devicetree/bindings/net/mdio.txt
@@ -18,7 +18,7 @@ Example :
18This example shows these optional properties, plus other properties 18This example shows these optional properties, plus other properties
19required for the TI Davinci MDIO driver. 19required for the TI Davinci MDIO driver.
20 20
21 davinci_mdio: ethernet@0x5c030000 { 21 davinci_mdio: ethernet@5c030000 {
22 compatible = "ti,davinci_mdio"; 22 compatible = "ti,davinci_mdio";
23 reg = <0x5c030000 0x1000>; 23 reg = <0x5c030000 0x1000>;
24 #address-cells = <1>; 24 #address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
index b30d04b54ee9..17d6819669c8 100644
--- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
@@ -28,7 +28,7 @@ Required properties:
28 28
29Example: 29Example:
30 30
31gmii_to_sgmii_converter: phy@0x100000240 { 31gmii_to_sgmii_converter: phy@100000240 {
32 compatible = "altr,gmii-to-sgmii-2.0"; 32 compatible = "altr,gmii-to-sgmii-2.0";
33 reg = <0x00000001 0x00000240 0x00000008>, 33 reg = <0x00000001 0x00000240 0x00000008>,
34 <0x00000001 0x00000200 0x00000040>; 34 <0x00000001 0x00000200 0x00000040>;
diff --git a/Documentation/devicetree/bindings/nios2/nios2.txt b/Documentation/devicetree/bindings/nios2/nios2.txt
index d6d0a94cb3bb..b95e831bcba3 100644
--- a/Documentation/devicetree/bindings/nios2/nios2.txt
+++ b/Documentation/devicetree/bindings/nios2/nios2.txt
@@ -36,7 +36,7 @@ Optional properties:
36 36
37Example: 37Example:
38 38
39cpu@0x0 { 39cpu@0 {
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "altr,nios2-1.0"; 41 compatible = "altr,nios2-1.0";
42 reg = <0>; 42 reg = <0>;
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
index 495880193adc..a1dc9366a8fc 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt
@@ -25,7 +25,7 @@ Optional properties:
25- bus-range: PCI bus numbers covered 25- bus-range: PCI bus numbers covered
26 26
27Example 27Example
28 pcie_0: pcie@0xc00000000 { 28 pcie_0: pcie@c00000000 {
29 compatible = "altr,pcie-root-port-1.0"; 29 compatible = "altr,pcie-root-port-1.0";
30 reg = <0xc0000000 0x20000000>, 30 reg = <0xc0000000 0x20000000>,
31 <0xff220000 0x00004000>; 31 <0xff220000 0x00004000>;
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index 7b1e48bf172b..149d8f7f86b0 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -52,7 +52,7 @@ Additional required properties for imx7d-pcie:
52 52
53Example: 53Example:
54 54
55 pcie@0x01000000 { 55 pcie@01000000 {
56 compatible = "fsl,imx6q-pcie", "snps,dw-pcie"; 56 compatible = "fsl,imx6q-pcie", "snps,dw-pcie";
57 reg = <0x01ffc000 0x04000>, 57 reg = <0x01ffc000 0x04000>,
58 <0x01f00000 0x80000>; 58 <0x01f00000 0x80000>;
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
index bdb7ab39d2d7..7bf9df047a1e 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -21,7 +21,7 @@ Optional properties:
21- dma-coherent: Present if DMA operations are coherent. 21- dma-coherent: Present if DMA operations are coherent.
22 22
23Hip05 Example (note that Hip06 is the same except compatible): 23Hip05 Example (note that Hip06 is the same except compatible):
24 pcie@0xb0080000 { 24 pcie@b0080000 {
25 compatible = "hisilicon,hip05-pcie", "snps,dw-pcie"; 25 compatible = "hisilicon,hip05-pcie", "snps,dw-pcie";
26 reg = <0 0xb0080000 0 0x10000>, <0x220 0x00000000 0 0x2000>; 26 reg = <0 0xb0080000 0 0x10000>, <0x220 0x00000000 0 0x2000>;
27 reg-names = "rc_dbi", "config"; 27 reg-names = "rc_dbi", "config";
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
index cbc7847dbf6c..c1ce5a0a652e 100644
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
@@ -45,7 +45,7 @@ Optional properties:
45- usb3_vbus-supply : regulator phandle for controller usb3 vbus 45- usb3_vbus-supply : regulator phandle for controller usb3 vbus
46 46
47Example: 47Example:
48 usbphy: phy@0x01c13400 { 48 usbphy: phy@01c13400 {
49 #phy-cells = <1>; 49 #phy-cells = <1>;
50 compatible = "allwinner,sun4i-a10-usb-phy"; 50 compatible = "allwinner,sun4i-a10-usb-phy";
51 /* phy base regs, phy1 pmu reg, phy2 pmu reg */ 51 /* phy base regs, phy1 pmu reg, phy2 pmu reg */
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
index 3600d5c6c4d7..3914529a3214 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-pinmux.txt
@@ -25,7 +25,7 @@ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
25 25
26For example: 26For example:
27 27
28 pinmux: pinmux@0x0301d0c8 { 28 pinmux: pinmux@0301d0c8 {
29 compatible = "brcm,cygnus-pinmux"; 29 compatible = "brcm,cygnus-pinmux";
30 reg = <0x0301d0c8 0x1b0>; 30 reg = <0x0301d0c8 0x1b0>;
31 31
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
index eecf028ff485..bf9b07016c87 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
@@ -96,14 +96,14 @@ For example, pinctrl might have subnodes like the following:
96 96
97For a specific board, if it wants to use sd1, 97For a specific board, if it wants to use sd1,
98it can add the following to its board-specific .dts file. 98it can add the following to its board-specific .dts file.
99sd1: sd@0x12340000 { 99sd1: sd@12340000 {
100 pinctrl-names = "default"; 100 pinctrl-names = "default";
101 pinctrl-0 = <&sd1_pmx0>; 101 pinctrl-0 = <&sd1_pmx0>;
102} 102}
103 103
104or 104or
105 105
106sd1: sd@0x12340000 { 106sd1: sd@12340000 {
107 pinctrl-names = "default"; 107 pinctrl-names = "default";
108 pinctrl-0 = <&sd1_pmx1>; 108 pinctrl-0 = <&sd1_pmx1>;
109} 109}
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
index 5f55be59d914..f8420520e14b 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
@@ -41,7 +41,7 @@ For example, pinctrl might have subnodes like the following:
41 41
42For a specific board, if it wants to use uart2 without hardware flow control, 42For a specific board, if it wants to use uart2 without hardware flow control,
43it can add the following to its board-specific .dts file. 43it can add the following to its board-specific .dts file.
44uart2: uart@0xb0070000 { 44uart2: uart@b0070000 {
45 pinctrl-names = "default"; 45 pinctrl-names = "default";
46 pinctrl-0 = <&uart2_noflow_pins_a>; 46 pinctrl-0 = <&uart2_noflow_pins_a>;
47} 47}
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index 4864e3a74de3..a01a3b8a2363 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -136,7 +136,7 @@ Example for rk3188:
136 #size-cells = <1>; 136 #size-cells = <1>;
137 ranges; 137 ranges;
138 138
139 gpio0: gpio0@0x2000a000 { 139 gpio0: gpio0@2000a000 {
140 compatible = "rockchip,rk3188-gpio-bank0"; 140 compatible = "rockchip,rk3188-gpio-bank0";
141 reg = <0x2000a000 0x100>; 141 reg = <0x2000a000 0x100>;
142 interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; 142 interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
@@ -149,7 +149,7 @@ Example for rk3188:
149 #interrupt-cells = <2>; 149 #interrupt-cells = <2>;
150 }; 150 };
151 151
152 gpio1: gpio1@0x2003c000 { 152 gpio1: gpio1@2003c000 {
153 compatible = "rockchip,gpio-bank"; 153 compatible = "rockchip,gpio-bank";
154 reg = <0x2003c000 0x100>; 154 reg = <0x2003c000 0x100>;
155 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; 155 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 378f6dc8b8bd..3cbf56ce66ea 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -107,7 +107,7 @@ regulators (twl_reg1 and twl_reg2),
107 ... 107 ...
108 }; 108 };
109 109
110 mmc: mmc@0x0 { 110 mmc: mmc@0 {
111 ... 111 ...
112 ... 112 ...
113 vmmc-supply = <&twl_reg1>; 113 vmmc-supply = <&twl_reg1>;
diff --git a/Documentation/devicetree/bindings/serial/efm32-uart.txt b/Documentation/devicetree/bindings/serial/efm32-uart.txt
index 8adbab268ca3..4f8d8fde0c1c 100644
--- a/Documentation/devicetree/bindings/serial/efm32-uart.txt
+++ b/Documentation/devicetree/bindings/serial/efm32-uart.txt
@@ -12,7 +12,7 @@ Optional properties:
12 12
13Example: 13Example:
14 14
15uart@0x4000c400 { 15uart@4000c400 {
16 compatible = "energymicro,efm32-uart"; 16 compatible = "energymicro,efm32-uart";
17 reg = <0x4000c400 0x400>; 17 reg = <0x4000c400 0x400>;
18 interrupts = <15>; 18 interrupts = <15>;
diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
index f311472990a7..75996b6111bb 100644
--- a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
+++ b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
@@ -14,7 +14,7 @@ Required properties:
14 14
15 15
16Example: 16Example:
17 ps20: ps2@0x01c2a000 { 17 ps20: ps2@01c2a000 {
18 compatible = "allwinner,sun4i-a10-ps2"; 18 compatible = "allwinner,sun4i-a10-ps2";
19 reg = <0x01c2a000 0x400>; 19 reg = <0x01c2a000 0x400>;
20 interrupts = <0 62 4>; 20 interrupts = <0 62 4>;
diff --git a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
index 64c66a5644e7..77cd42cc5f54 100644
--- a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
+++ b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
@@ -220,7 +220,7 @@ qmss: qmss@2a40000 {
220 #address-cells = <1>; 220 #address-cells = <1>;
221 #size-cells = <1>; 221 #size-cells = <1>;
222 ranges; 222 ranges;
223 pdsp0@0x2a10000 { 223 pdsp0@2a10000 {
224 reg = <0x2a10000 0x1000>, 224 reg = <0x2a10000 0x1000>,
225 <0x2a0f000 0x100>, 225 <0x2a0f000 0x100>,
226 <0x2a0c000 0x3c8>, 226 <0x2a0c000 0x3c8>,
diff --git a/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt b/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
index 5875ca459ed1..4248b662deff 100644
--- a/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
@@ -21,7 +21,7 @@ please check:
21 21
22Example: 22Example:
23 23
24 i2s: i2s@0x77600000 { 24 i2s: i2s@77600000 {
25 compatible = "adi,axi-i2s-1.00.a"; 25 compatible = "adi,axi-i2s-1.00.a";
26 reg = <0x77600000 0x1000>; 26 reg = <0x77600000 0x1000>;
27 clocks = <&clk 15>, <&audio_clock>; 27 clocks = <&clk 15>, <&audio_clock>;
diff --git a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
index 4eb7997674a0..7b664e7cb4ae 100644
--- a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
+++ b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
@@ -20,7 +20,7 @@ please check:
20 20
21Example: 21Example:
22 22
23 spdif: spdif@0x77400000 { 23 spdif: spdif@77400000 {
24 compatible = "adi,axi-spdif-tx-1.00.a"; 24 compatible = "adi,axi-spdif-tx-1.00.a";
25 reg = <0x77600000 0x1000>; 25 reg = <0x77600000 0x1000>;
26 clocks = <&clk 15>, <&audio_clock>; 26 clocks = <&clk 15>, <&audio_clock>;
diff --git a/Documentation/devicetree/bindings/sound/ak4613.txt b/Documentation/devicetree/bindings/sound/ak4613.txt
index 1783f9ef0930..49a2e74fd9cb 100644
--- a/Documentation/devicetree/bindings/sound/ak4613.txt
+++ b/Documentation/devicetree/bindings/sound/ak4613.txt
@@ -20,7 +20,7 @@ Optional properties:
20Example: 20Example:
21 21
22&i2c { 22&i2c {
23 ak4613: ak4613@0x10 { 23 ak4613: ak4613@10 {
24 compatible = "asahi-kasei,ak4613"; 24 compatible = "asahi-kasei,ak4613";
25 reg = <0x10>; 25 reg = <0x10>;
26 }; 26 };
diff --git a/Documentation/devicetree/bindings/sound/ak4642.txt b/Documentation/devicetree/bindings/sound/ak4642.txt
index 340784db6808..58e48ee97175 100644
--- a/Documentation/devicetree/bindings/sound/ak4642.txt
+++ b/Documentation/devicetree/bindings/sound/ak4642.txt
@@ -17,7 +17,7 @@ Optional properties:
17Example 1: 17Example 1:
18 18
19&i2c { 19&i2c {
20 ak4648: ak4648@0x12 { 20 ak4648: ak4648@12 {
21 compatible = "asahi-kasei,ak4642"; 21 compatible = "asahi-kasei,ak4642";
22 reg = <0x12>; 22 reg = <0x12>;
23 }; 23 };
diff --git a/Documentation/devicetree/bindings/sound/da7218.txt b/Documentation/devicetree/bindings/sound/da7218.txt
index 5ca5a709b6aa..3ab9dfef38d1 100644
--- a/Documentation/devicetree/bindings/sound/da7218.txt
+++ b/Documentation/devicetree/bindings/sound/da7218.txt
@@ -73,7 +73,7 @@ Example:
73 compatible = "dlg,da7218"; 73 compatible = "dlg,da7218";
74 reg = <0x1a>; 74 reg = <0x1a>;
75 interrupt-parent = <&gpio6>; 75 interrupt-parent = <&gpio6>;
76 interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; 76 interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
77 wakeup-source; 77 wakeup-source;
78 78
79 VDD-supply = <&reg_audio>; 79 VDD-supply = <&reg_audio>;
diff --git a/Documentation/devicetree/bindings/sound/da7219.txt b/Documentation/devicetree/bindings/sound/da7219.txt
index cf61681826b6..5b54d2d045c3 100644
--- a/Documentation/devicetree/bindings/sound/da7219.txt
+++ b/Documentation/devicetree/bindings/sound/da7219.txt
@@ -77,7 +77,7 @@ Example:
77 reg = <0x1a>; 77 reg = <0x1a>;
78 78
79 interrupt-parent = <&gpio6>; 79 interrupt-parent = <&gpio6>;
80 interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; 80 interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
81 81
82 VDD-supply = <&reg_audio>; 82 VDD-supply = <&reg_audio>;
83 VDDMIC-supply = <&reg_audio>; 83 VDDMIC-supply = <&reg_audio>;
diff --git a/Documentation/devicetree/bindings/sound/max98371.txt b/Documentation/devicetree/bindings/sound/max98371.txt
index 6c285235e64b..8b2b2704b574 100644
--- a/Documentation/devicetree/bindings/sound/max98371.txt
+++ b/Documentation/devicetree/bindings/sound/max98371.txt
@@ -10,7 +10,7 @@ Required properties:
10Example: 10Example:
11 11
12&i2c { 12&i2c {
13 max98371: max98371@0x31 { 13 max98371: max98371@31 {
14 compatible = "maxim,max98371"; 14 compatible = "maxim,max98371";
15 reg = <0x31>; 15 reg = <0x31>;
16 }; 16 };
diff --git a/Documentation/devicetree/bindings/sound/max9867.txt b/Documentation/devicetree/bindings/sound/max9867.txt
index 394cd4eb17ec..b8bd914ee697 100644
--- a/Documentation/devicetree/bindings/sound/max9867.txt
+++ b/Documentation/devicetree/bindings/sound/max9867.txt
@@ -10,7 +10,7 @@ Required properties:
10Example: 10Example:
11 11
12&i2c { 12&i2c {
13 max9867: max9867@0x18 { 13 max9867: max9867@18 {
14 compatible = "maxim,max9867"; 14 compatible = "maxim,max9867";
15 reg = <0x18>; 15 reg = <0x18>;
16 }; 16 };
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.txt b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
index 0d0ab51105b0..0cf0f819b823 100644
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
@@ -20,7 +20,7 @@ Required properties:
20 20
21Example: 21Example:
22 22
23sh_fsi2: sh_fsi2@0xec230000 { 23sh_fsi2: sh_fsi2@ec230000 {
24 compatible = "renesas,sh_fsi2"; 24 compatible = "renesas,sh_fsi2";
25 reg = <0xec230000 0x400>; 25 reg = <0xec230000 0x400>;
26 interrupts = <0 146 0x4>; 26 interrupts = <0 146 0x4>;
diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
index 0a1dc4e1815c..ec20c1271e92 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
@@ -33,7 +33,7 @@ Required properties on RK3288:
33 33
34Example for the rk3188 SPDIF controller: 34Example for the rk3188 SPDIF controller:
35 35
36spdif: spdif@0x1011e000 { 36spdif: spdif@1011e000 {
37 compatible = "rockchip,rk3188-spdif", "rockchip,rk3066-spdif"; 37 compatible = "rockchip,rk3188-spdif", "rockchip,rk3066-spdif";
38 reg = <0x1011e000 0x2000>; 38 reg = <0x1011e000 0x2000>;
39 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 39 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 40068ec0e9a5..9c1ee52fed5b 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -51,7 +51,7 @@ Optional properties:
51 51
52Example: 52Example:
53 53
54 sti_uni_player1: sti-uni-player@0x8D81000 { 54 sti_uni_player1: sti-uni-player@8D81000 {
55 compatible = "st,stih407-uni-player-hdmi"; 55 compatible = "st,stih407-uni-player-hdmi";
56 #sound-dai-cells = <0>; 56 #sound-dai-cells = <0>;
57 st,syscfg = <&syscfg_core>; 57 st,syscfg = <&syscfg_core>;
@@ -63,7 +63,7 @@ Example:
63 st,tdm-mode = <1>; 63 st,tdm-mode = <1>;
64 }; 64 };
65 65
66 sti_uni_player2: sti-uni-player@0x8D82000 { 66 sti_uni_player2: sti-uni-player@8D82000 {
67 compatible = "st,stih407-uni-player-pcm-out"; 67 compatible = "st,stih407-uni-player-pcm-out";
68 #sound-dai-cells = <0>; 68 #sound-dai-cells = <0>;
69 st,syscfg = <&syscfg_core>; 69 st,syscfg = <&syscfg_core>;
@@ -74,7 +74,7 @@ Example:
74 dma-names = "tx"; 74 dma-names = "tx";
75 }; 75 };
76 76
77 sti_uni_player3: sti-uni-player@0x8D85000 { 77 sti_uni_player3: sti-uni-player@8D85000 {
78 compatible = "st,stih407-uni-player-spdif"; 78 compatible = "st,stih407-uni-player-spdif";
79 #sound-dai-cells = <0>; 79 #sound-dai-cells = <0>;
80 st,syscfg = <&syscfg_core>; 80 st,syscfg = <&syscfg_core>;
@@ -85,7 +85,7 @@ Example:
85 dma-names = "tx"; 85 dma-names = "tx";
86 }; 86 };
87 87
88 sti_uni_reader1: sti-uni-reader@0x8D84000 { 88 sti_uni_reader1: sti-uni-reader@8D84000 {
89 compatible = "st,stih407-uni-reader-hdmi"; 89 compatible = "st,stih407-uni-reader-hdmi";
90 #sound-dai-cells = <0>; 90 #sound-dai-cells = <0>;
91 st,syscfg = <&syscfg_core>; 91 st,syscfg = <&syscfg_core>;
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
index 2c1e6a43930b..e0fa61a1be0c 100644
--- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
+++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
@@ -19,7 +19,7 @@ Recommended properties :
19 19
20Example: 20Example:
21 21
22spi1: spi@0x4000c400 { /* USART1 */ 22spi1: spi@4000c400 { /* USART1 */
23 #address-cells = <1>; 23 #address-cells = <1>;
24 #size-cells = <0>; 24 #size-cells = <0>;
25 compatible = "energymicro,efm32-spi"; 25 compatible = "energymicro,efm32-spi";
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
index 5bf13960f7f4..e3c48b20b1a6 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -12,24 +12,30 @@ Required properties:
12 - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc 12 - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
13- reg : Offset and length of the register set for the device 13- reg : Offset and length of the register set for the device
14- interrupts : Should contain CSPI/eCSPI interrupt 14- interrupts : Should contain CSPI/eCSPI interrupt
15- cs-gpios : Specifies the gpio pins to be used for chipselects.
16- clocks : Clock specifiers for both ipg and per clocks. 15- clocks : Clock specifiers for both ipg and per clocks.
17- clock-names : Clock names should include both "ipg" and "per" 16- clock-names : Clock names should include both "ipg" and "per"
18See the clock consumer binding, 17See the clock consumer binding,
19 Documentation/devicetree/bindings/clock/clock-bindings.txt 18 Documentation/devicetree/bindings/clock/clock-bindings.txt
20- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
21 Documentation/devicetree/bindings/dma/dma.txt
22- dma-names: DMA request names should include "tx" and "rx" if present.
23 19
24Obsolete properties: 20Recommended properties:
25- fsl,spi-num-chipselects : Contains the number of the chipselect 21- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt. While the native chip
22select lines can be used, they appear to always generate a pulse between each
23word of a transfer. Most use cases will require GPIO based chip selects to
24generate a valid transaction.
26 25
27Optional properties: 26Optional properties:
27- num-cs : Number of total chip selects, see spi-bus.txt.
28- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
29Documentation/devicetree/bindings/dma/dma.txt.
30- dma-names: DMA request names, if present, should include "tx" and "rx".
28- fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register 31- fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
29controlling the SPI_READY handling. Note that to enable the DRCTL consideration, 32controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
30the SPI_READY mode-flag needs to be set too. 33the SPI_READY mode-flag needs to be set too.
31Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst). 34Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
32 35
36Obsolete properties:
37- fsl,spi-num-chipselects : Contains the number of the chipselect
38
33Example: 39Example:
34 40
35ecspi@70010000 { 41ecspi@70010000 {
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 88b6ea1ad290..44d7cb2cb2c0 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -239,7 +239,7 @@ cpus {
239 * A simple fan controller which supports 10 speeds of operation 239 * A simple fan controller which supports 10 speeds of operation
240 * (represented as 0-9). 240 * (represented as 0-9).
241 */ 241 */
242 fan0: fan@0x48 { 242 fan0: fan@48 {
243 ... 243 ...
244 cooling-min-level = <0>; 244 cooling-min-level = <0>;
245 cooling-max-level = <9>; 245 cooling-max-level = <9>;
@@ -252,7 +252,7 @@ ocp {
252 /* 252 /*
253 * A simple IC with a single bandgap temperature sensor. 253 * A simple IC with a single bandgap temperature sensor.
254 */ 254 */
255 bandgap0: bandgap@0x0000ED00 { 255 bandgap0: bandgap@0000ED00 {
256 ... 256 ...
257 #thermal-sensor-cells = <0>; 257 #thermal-sensor-cells = <0>;
258 }; 258 };
@@ -330,7 +330,7 @@ ocp {
330 /* 330 /*
331 * A simple IC with several bandgap temperature sensors. 331 * A simple IC with several bandgap temperature sensors.
332 */ 332 */
333 bandgap0: bandgap@0x0000ED00 { 333 bandgap0: bandgap@0000ED00 {
334 ... 334 ...
335 #thermal-sensor-cells = <1>; 335 #thermal-sensor-cells = <1>;
336 }; 336 };
@@ -447,7 +447,7 @@ one thermal zone.
447 /* 447 /*
448 * A simple IC with a single temperature sensor. 448 * A simple IC with a single temperature sensor.
449 */ 449 */
450 adc: sensor@0x49 { 450 adc: sensor@49 {
451 ... 451 ...
452 #thermal-sensor-cells = <0>; 452 #thermal-sensor-cells = <0>;
453 }; 453 };
@@ -458,7 +458,7 @@ ocp {
458 /* 458 /*
459 * A simple IC with a single bandgap temperature sensor. 459 * A simple IC with a single bandgap temperature sensor.
460 */ 460 */
461 bandgap0: bandgap@0x0000ED00 { 461 bandgap0: bandgap@0000ED00 {
462 ... 462 ...
463 #thermal-sensor-cells = <0>; 463 #thermal-sensor-cells = <0>;
464 }; 464 };
@@ -516,7 +516,7 @@ with many sensors and many cooling devices.
516 /* 516 /*
517 * An IC with several temperature sensor. 517 * An IC with several temperature sensor.
518 */ 518 */
519 adc_dummy: sensor@0x50 { 519 adc_dummy: sensor@50 {
520 ... 520 ...
521 #thermal-sensor-cells = <1>; /* sensor internal ID */ 521 #thermal-sensor-cells = <1>; /* sensor internal ID */
522 }; 522 };
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 1f69ee1a61ea..21d9a93db2e9 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -32,7 +32,7 @@ Optional properties:
32 32
33Example: 33Example:
34 34
35 ufsphy1: ufsphy@0xfc597000 { 35 ufsphy1: ufsphy@fc597000 {
36 compatible = "qcom,ufs-phy-qmp-20nm"; 36 compatible = "qcom,ufs-phy-qmp-20nm";
37 reg = <0xfc597000 0x800>; 37 reg = <0xfc597000 0x800>;
38 reg-names = "phy_mem"; 38 reg-names = "phy_mem";
@@ -53,7 +53,7 @@ Example:
53 <&clock_gcc clk_gcc_ufs_rx_cfg_clk>; 53 <&clock_gcc clk_gcc_ufs_rx_cfg_clk>;
54 }; 54 };
55 55
56 ufshc@0xfc598000 { 56 ufshc@fc598000 {
57 ... 57 ...
58 phys = <&ufsphy1>; 58 phys = <&ufsphy1>;
59 phy-names = "ufsphy"; 59 phy-names = "ufsphy";
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index a99ed5565b26..c39dfef76a18 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -46,7 +46,7 @@ Note: If above properties are not defined it can be assumed that the supply
46regulators or clocks are always on. 46regulators or clocks are always on.
47 47
48Example: 48Example:
49 ufshc@0xfc598000 { 49 ufshc@fc598000 {
50 compatible = "jedec,ufs-1.1"; 50 compatible = "jedec,ufs-1.1";
51 reg = <0xfc598000 0x800>; 51 reg = <0xfc598000 0x800>;
52 interrupts = <0 28 0>; 52 interrupts = <0 28 0>;
diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
index 7a33f22c815a..7a198a30408a 100644
--- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
@@ -95,6 +95,7 @@ usb: usb@47400000 {
95 reg = <0x47401300 0x100>; 95 reg = <0x47401300 0x100>;
96 reg-names = "phy"; 96 reg-names = "phy";
97 ti,ctrl_mod = <&ctrl_mod>; 97 ti,ctrl_mod = <&ctrl_mod>;
98 #phy-cells = <0>;
98 }; 99 };
99 100
100 usb0: usb@47401000 { 101 usb0: usb@47401000 {
@@ -141,6 +142,7 @@ usb: usb@47400000 {
141 reg = <0x47401b00 0x100>; 142 reg = <0x47401b00 0x100>;
142 reg-names = "phy"; 143 reg-names = "phy";
143 ti,ctrl_mod = <&ctrl_mod>; 144 ti,ctrl_mod = <&ctrl_mod>;
145 #phy-cells = <0>;
144 }; 146 };
145 147
146 usb1: usb@47401800 { 148 usb1: usb@47401800 {
diff --git a/Documentation/devicetree/bindings/usb/ehci-st.txt b/Documentation/devicetree/bindings/usb/ehci-st.txt
index 9feea6c3e4d9..065c91d955ad 100644
--- a/Documentation/devicetree/bindings/usb/ehci-st.txt
+++ b/Documentation/devicetree/bindings/usb/ehci-st.txt
@@ -22,7 +22,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
22 22
23Example: 23Example:
24 24
25 ehci1: usb@0xfe203e00 { 25 ehci1: usb@fe203e00 {
26 compatible = "st,st-ehci-300x"; 26 compatible = "st,st-ehci-300x";
27 reg = <0xfe203e00 0x100>; 27 reg = <0xfe203e00 0x100>;
28 interrupts = <GIC_SPI 148 IRQ_TYPE_NONE>; 28 interrupts = <GIC_SPI 148 IRQ_TYPE_NONE>;
diff --git a/Documentation/devicetree/bindings/usb/ohci-st.txt b/Documentation/devicetree/bindings/usb/ohci-st.txt
index d893ec9131c3..44c998c16f85 100644
--- a/Documentation/devicetree/bindings/usb/ohci-st.txt
+++ b/Documentation/devicetree/bindings/usb/ohci-st.txt
@@ -20,7 +20,7 @@ See: Documentation/devicetree/bindings/reset/reset.txt
20 20
21Example: 21Example:
22 22
23 ohci0: usb@0xfe1ffc00 { 23 ohci0: usb@fe1ffc00 {
24 compatible = "st,st-ohci-300x"; 24 compatible = "st,st-ohci-300x";
25 reg = <0xfe1ffc00 0x100>; 25 reg = <0xfe1ffc00 0x100>;
26 interrupts = <GIC_SPI 149 IRQ_TYPE_NONE>; 26 interrupts = <GIC_SPI 149 IRQ_TYPE_NONE>;
diff --git a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
index e27763ef0049..3c7a1cd13b10 100644
--- a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt
@@ -6,7 +6,7 @@ reg: Register address and length for watchdog registers
6 6
7Example: 7Example:
8 8
9watchdog: jz4740-watchdog@0x10002000 { 9watchdog: jz4740-watchdog@10002000 {
10 compatible = "ingenic,jz4740-watchdog"; 10 compatible = "ingenic,jz4740-watchdog";
11 reg = <0x10002000 0x100>; 11 reg = <0x10002000 0x100>;
12}; 12};
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index 6245c99af8c1..fbbb2831f29f 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -185,7 +185,7 @@ The details of these operations are:
185 void dma_async_issue_pending(struct dma_chan *chan); 185 void dma_async_issue_pending(struct dma_chan *chan);
186 186
187Further APIs: 187Further APIs:
188------------ 188-------------
189 189
1901. Terminate APIs 1901. Terminate APIs
191 191
diff --git a/Documentation/driver-api/pci.rst b/Documentation/driver-api/pci.rst
index 01a6c8b7d3a7..ca85e5e78b2c 100644
--- a/Documentation/driver-api/pci.rst
+++ b/Documentation/driver-api/pci.rst
@@ -25,9 +25,6 @@ PCI Support Library
25.. kernel-doc:: drivers/pci/irq.c 25.. kernel-doc:: drivers/pci/irq.c
26 :export: 26 :export:
27 27
28.. kernel-doc:: drivers/pci/htirq.c
29 :export:
30
31.. kernel-doc:: drivers/pci/probe.c 28.. kernel-doc:: drivers/pci/probe.c
32 :export: 29 :export:
33 30
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index c0727dc36271..f2f3f8592a6f 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -25,8 +25,8 @@ available from the following download page. At least "mkfs.nilfs2",
25cleaner or garbage collector) are required. Details on the tools are 25cleaner or garbage collector) are required. Details on the tools are
26described in the man pages included in the package. 26described in the man pages included in the package.
27 27
28Project web page: http://nilfs.sourceforge.net/ 28Project web page: https://nilfs.sourceforge.io/
29Download page: http://nilfs.sourceforge.net/en/download.html 29Download page: https://nilfs.sourceforge.io/en/download.html
30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs 30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs
31 31
32Caveats 32Caveats
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 8caa60734647..e6a5f4912b6d 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -156,6 +156,40 @@ handle it in two different ways:
156 root of the overlay. Finally the directory is moved to the new 156 root of the overlay. Finally the directory is moved to the new
157 location. 157 location.
158 158
159There are several ways to tune the "redirect_dir" feature.
160
161Kernel config options:
162
163- OVERLAY_FS_REDIRECT_DIR:
164 If this is enabled, then redirect_dir is turned on by default.
165- OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW:
166 If this is enabled, then redirects are always followed by default. Enabling
167 this results in a less secure configuration. Enable this option only when
168 worried about backward compatibility with kernels that have the redirect_dir
169 feature and follow redirects even if turned off.
170
171Module options (can also be changed through /sys/module/overlay/parameters/*):
172
173- "redirect_dir=BOOL":
174 See OVERLAY_FS_REDIRECT_DIR kernel config option above.
175- "redirect_always_follow=BOOL":
176 See OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW kernel config option above.
177- "redirect_max=NUM":
178 The maximum number of bytes in an absolute redirect (default is 256).
179
180Mount options:
181
182- "redirect_dir=on":
183 Redirects are enabled.
184- "redirect_dir=follow":
185 Redirects are not created, but followed.
186- "redirect_dir=off":
187 Redirects are not created and only followed if "redirect_always_follow"
188 feature is enabled in the kernel/module config.
189- "redirect_dir=nofollow":
190 Redirects are not created and not followed (equivalent to "redirect_dir=off"
191 if "redirect_always_follow" feature is not enabled).
192
159Non-directories 193Non-directories
160--------------- 194---------------
161 195
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 2e7ee0313c1c..e94d3ac2bdd0 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -341,10 +341,7 @@ GuC
341GuC-specific firmware loader 341GuC-specific firmware loader
342---------------------------- 342----------------------------
343 343
344.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c 344.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
345 :doc: GuC-specific firmware loader
346
347.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
348 :internal: 345 :internal:
349 346
350GuC-based command submission 347GuC-based command submission
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 262722d8867b..c4a293a03c33 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -200,10 +200,14 @@ module state. Dependency expressions have the following syntax:
200<expr> ::= <symbol> (1) 200<expr> ::= <symbol> (1)
201 <symbol> '=' <symbol> (2) 201 <symbol> '=' <symbol> (2)
202 <symbol> '!=' <symbol> (3) 202 <symbol> '!=' <symbol> (3)
203 '(' <expr> ')' (4) 203 <symbol1> '<' <symbol2> (4)
204 '!' <expr> (5) 204 <symbol1> '>' <symbol2> (4)
205 <expr> '&&' <expr> (6) 205 <symbol1> '<=' <symbol2> (4)
206 <expr> '||' <expr> (7) 206 <symbol1> '>=' <symbol2> (4)
207 '(' <expr> ')' (5)
208 '!' <expr> (6)
209 <expr> '&&' <expr> (7)
210 <expr> '||' <expr> (8)
207 211
208Expressions are listed in decreasing order of precedence. 212Expressions are listed in decreasing order of precedence.
209 213
@@ -214,10 +218,13 @@ Expressions are listed in decreasing order of precedence.
214 otherwise 'n'. 218 otherwise 'n'.
215(3) If the values of both symbols are equal, it returns 'n', 219(3) If the values of both symbols are equal, it returns 'n',
216 otherwise 'y'. 220 otherwise 'y'.
217(4) Returns the value of the expression. Used to override precedence. 221(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
218(5) Returns the result of (2-/expr/). 222 or greater-or-equal than value of <symbol2>, it returns 'y',
219(6) Returns the result of min(/expr/, /expr/). 223 otherwise 'n'.
220(7) Returns the result of max(/expr/, /expr/). 224(5) Returns the value of the expression. Used to override precedence.
225(6) Returns the result of (2-/expr/).
226(7) Returns the result of min(/expr/, /expr/).
227(8) Returns the result of max(/expr/, /expr/).
221 228
222An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2 229An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
223respectively for calculations). A menu entry becomes visible when its 230respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt
deleted file mode 100644
index bdf1423d5f99..000000000000
--- a/Documentation/locking/crossrelease.txt
+++ /dev/null
@@ -1,874 +0,0 @@
1Crossrelease
2============
3
4Started by Byungchul Park <byungchul.park@lge.com>
5
6Contents:
7
8 (*) Background
9
10 - What causes deadlock
11 - How lockdep works
12
13 (*) Limitation
14
15 - Limit lockdep
16 - Pros from the limitation
17 - Cons from the limitation
18 - Relax the limitation
19
20 (*) Crossrelease
21
22 - Introduce crossrelease
23 - Introduce commit
24
25 (*) Implementation
26
27 - Data structures
28 - How crossrelease works
29
30 (*) Optimizations
31
32 - Avoid duplication
33 - Lockless for hot paths
34
35 (*) APPENDIX A: What lockdep does to work aggresively
36
37 (*) APPENDIX B: How to avoid adding false dependencies
38
39
40==========
41Background
42==========
43
44What causes deadlock
45--------------------
46
47A deadlock occurs when a context is waiting for an event to happen,
48which is impossible because another (or the) context who can trigger the
49event is also waiting for another (or the) event to happen, which is
50also impossible due to the same reason.
51
52For example:
53
54 A context going to trigger event C is waiting for event A to happen.
55 A context going to trigger event A is waiting for event B to happen.
56 A context going to trigger event B is waiting for event C to happen.
57
58A deadlock occurs when these three wait operations run at the same time,
59because event C cannot be triggered if event A does not happen, which in
60turn cannot be triggered if event B does not happen, which in turn
61cannot be triggered if event C does not happen. After all, no event can
62be triggered since any of them never meets its condition to wake up.
63
64A dependency might exist between two waiters and a deadlock might happen
65due to an incorrect releationship between dependencies. Thus, we must
66define what a dependency is first. A dependency exists between them if:
67
68 1. There are two waiters waiting for each event at a given time.
69 2. The only way to wake up each waiter is to trigger its event.
70 3. Whether one can be woken up depends on whether the other can.
71
72Each wait in the example creates its dependency like:
73
74 Event C depends on event A.
75 Event A depends on event B.
76 Event B depends on event C.
77
78 NOTE: Precisely speaking, a dependency is one between whether a
79 waiter for an event can be woken up and whether another waiter for
80 another event can be woken up. However from now on, we will describe
81 a dependency as if it's one between an event and another event for
82 simplicity.
83
84And they form circular dependencies like:
85
86 -> C -> A -> B -
87 / \
88 \ /
89 ----------------
90
91 where 'A -> B' means that event A depends on event B.
92
93Such circular dependencies lead to a deadlock since no waiter can meet
94its condition to wake up as described.
95
96CONCLUSION
97
98Circular dependencies cause a deadlock.
99
100
101How lockdep works
102-----------------
103
104Lockdep tries to detect a deadlock by checking dependencies created by
105lock operations, acquire and release. Waiting for a lock corresponds to
106waiting for an event, and releasing a lock corresponds to triggering an
107event in the previous section.
108
109In short, lockdep does:
110
111 1. Detect a new dependency.
112 2. Add the dependency into a global graph.
113 3. Check if that makes dependencies circular.
114 4. Report a deadlock or its possibility if so.
115
116For example, consider a graph built by lockdep that looks like:
117
118 A -> B -
119 \
120 -> E
121 /
122 C -> D -
123
124 where A, B,..., E are different lock classes.
125
126Lockdep will add a dependency into the graph on detection of a new
127dependency. For example, it will add a dependency 'E -> C' when a new
128dependency between lock E and lock C is detected. Then the graph will be:
129
130 A -> B -
131 \
132 -> E -
133 / \
134 -> C -> D - \
135 / /
136 \ /
137 ------------------
138
139 where A, B,..., E are different lock classes.
140
141This graph contains a subgraph which demonstrates circular dependencies:
142
143 -> E -
144 / \
145 -> C -> D - \
146 / /
147 \ /
148 ------------------
149
150 where C, D and E are different lock classes.
151
152This is the condition under which a deadlock might occur. Lockdep
153reports it on detection after adding a new dependency. This is the way
154how lockdep works.
155
156CONCLUSION
157
158Lockdep detects a deadlock or its possibility by checking if circular
159dependencies were created after adding each new dependency.
160
161
162==========
163Limitation
164==========
165
166Limit lockdep
167-------------
168
169Limiting lockdep to work on only typical locks e.g. spin locks and
170mutexes, which are released within the acquire context, the
171implementation becomes simple but its capacity for detection becomes
172limited. Let's check pros and cons in next section.
173
174
175Pros from the limitation
176------------------------
177
178Given the limitation, when acquiring a lock, locks in a held_locks
179cannot be released if the context cannot acquire it so has to wait to
180acquire it, which means all waiters for the locks in the held_locks are
181stuck. It's an exact case to create dependencies between each lock in
182the held_locks and the lock to acquire.
183
184For example:
185
186 CONTEXT X
187 ---------
188 acquire A
189 acquire B /* Add a dependency 'A -> B' */
190 release B
191 release A
192
193 where A and B are different lock classes.
194
195When acquiring lock A, the held_locks of CONTEXT X is empty thus no
196dependency is added. But when acquiring lock B, lockdep detects and adds
197a new dependency 'A -> B' between lock A in the held_locks and lock B.
198They can be simply added whenever acquiring each lock.
199
200And data required by lockdep exists in a local structure, held_locks
201embedded in task_struct. Forcing to access the data within the context,
202lockdep can avoid racy problems without explicit locks while handling
203the local data.
204
205Lastly, lockdep only needs to keep locks currently being held, to build
206a dependency graph. However, relaxing the limitation, it needs to keep
207even locks already released, because a decision whether they created
208dependencies might be long-deferred.
209
210To sum up, we can expect several advantages from the limitation:
211
212 1. Lockdep can easily identify a dependency when acquiring a lock.
213 2. Races are avoidable while accessing local locks in a held_locks.
214 3. Lockdep only needs to keep locks currently being held.
215
216CONCLUSION
217
218Given the limitation, the implementation becomes simple and efficient.
219
220
221Cons from the limitation
222------------------------
223
224Given the limitation, lockdep is applicable only to typical locks. For
225example, page locks for page access or completions for synchronization
226cannot work with lockdep.
227
228Can we detect deadlocks below, under the limitation?
229
230Example 1:
231
232 CONTEXT X CONTEXT Y CONTEXT Z
233 --------- --------- ----------
234 mutex_lock A
235 lock_page B
236 lock_page B
237 mutex_lock A /* DEADLOCK */
238 unlock_page B held by X
239 unlock_page B
240 mutex_unlock A
241 mutex_unlock A
242
243 where A and B are different lock classes.
244
245No, we cannot.
246
247Example 2:
248
249 CONTEXT X CONTEXT Y
250 --------- ---------
251 mutex_lock A
252 mutex_lock A
253 wait_for_complete B /* DEADLOCK */
254 complete B
255 mutex_unlock A
256 mutex_unlock A
257
258 where A is a lock class and B is a completion variable.
259
260No, we cannot.
261
262CONCLUSION
263
264Given the limitation, lockdep cannot detect a deadlock or its
265possibility caused by page locks or completions.
266
267
268Relax the limitation
269--------------------
270
271Under the limitation, things to create dependencies are limited to
272typical locks. However, synchronization primitives like page locks and
273completions, which are allowed to be released in any context, also
274create dependencies and can cause a deadlock. So lockdep should track
275these locks to do a better job. We have to relax the limitation for
276these locks to work with lockdep.
277
278Detecting dependencies is very important for lockdep to work because
279adding a dependency means adding an opportunity to check whether it
280causes a deadlock. The more lockdep adds dependencies, the more it
281thoroughly works. Thus Lockdep has to do its best to detect and add as
282many true dependencies into a graph as possible.
283
284For example, considering only typical locks, lockdep builds a graph like:
285
286 A -> B -
287 \
288 -> E
289 /
290 C -> D -
291
292 where A, B,..., E are different lock classes.
293
294On the other hand, under the relaxation, additional dependencies might
295be created and added. Assuming additional 'FX -> C' and 'E -> GX' are
296added thanks to the relaxation, the graph will be:
297
298 A -> B -
299 \
300 -> E -> GX
301 /
302 FX -> C -> D -
303
304 where A, B,..., E, FX and GX are different lock classes, and a suffix
305 'X' is added on non-typical locks.
306
307The latter graph gives us more chances to check circular dependencies
308than the former. However, it might suffer performance degradation since
309relaxing the limitation, with which design and implementation of lockdep
310can be efficient, might introduce inefficiency inevitably. So lockdep
311should provide two options, strong detection and efficient detection.
312
313Choosing efficient detection:
314
315 Lockdep works with only locks restricted to be released within the
316 acquire context. However, lockdep works efficiently.
317
318Choosing strong detection:
319
320 Lockdep works with all synchronization primitives. However, lockdep
321 suffers performance degradation.
322
323CONCLUSION
324
325Relaxing the limitation, lockdep can add additional dependencies giving
326additional opportunities to check circular dependencies.
327
328
329============
330Crossrelease
331============
332
333Introduce crossrelease
334----------------------
335
336In order to allow lockdep to handle additional dependencies by what
337might be released in any context, namely 'crosslock', we have to be able
338to identify those created by crosslocks. The proposed 'crossrelease'
339feature provoides a way to do that.
340
341Crossrelease feature has to do:
342
343 1. Identify dependencies created by crosslocks.
344 2. Add the dependencies into a dependency graph.
345
346That's all. Once a meaningful dependency is added into graph, then
347lockdep would work with the graph as it did. The most important thing
348crossrelease feature has to do is to correctly identify and add true
349dependencies into the global graph.
350
351A dependency e.g. 'A -> B' can be identified only in the A's release
352context because a decision required to identify the dependency can be
353made only in the release context. That is to decide whether A can be
354released so that a waiter for A can be woken up. It cannot be made in
355other than the A's release context.
356
357It's no matter for typical locks because each acquire context is same as
358its release context, thus lockdep can decide whether a lock can be
359released in the acquire context. However for crosslocks, lockdep cannot
360make the decision in the acquire context but has to wait until the
361release context is identified.
362
363Therefore, deadlocks by crosslocks cannot be detected just when it
364happens, because those cannot be identified until the crosslocks are
365released. However, deadlock possibilities can be detected and it's very
366worth. See 'APPENDIX A' section to check why.
367
368CONCLUSION
369
370Using crossrelease feature, lockdep can work with what might be released
371in any context, namely crosslock.
372
373
374Introduce commit
375----------------
376
377Since crossrelease defers the work adding true dependencies of
378crosslocks until they are actually released, crossrelease has to queue
379all acquisitions which might create dependencies with the crosslocks.
380Then it identifies dependencies using the queued data in batches at a
381proper time. We call it 'commit'.
382
383There are four types of dependencies:
384
3851. TT type: 'typical lock A -> typical lock B'
386
387 Just when acquiring B, lockdep can see it's in the A's release
388 context. So the dependency between A and B can be identified
389 immediately. Commit is unnecessary.
390
3912. TC type: 'typical lock A -> crosslock BX'
392
393 Just when acquiring BX, lockdep can see it's in the A's release
394 context. So the dependency between A and BX can be identified
395 immediately. Commit is unnecessary, too.
396
3973. CT type: 'crosslock AX -> typical lock B'
398
399 When acquiring B, lockdep cannot identify the dependency because
400 there's no way to know if it's in the AX's release context. It has
401 to wait until the decision can be made. Commit is necessary.
402
4034. CC type: 'crosslock AX -> crosslock BX'
404
405 When acquiring BX, lockdep cannot identify the dependency because
406 there's no way to know if it's in the AX's release context. It has
407 to wait until the decision can be made. Commit is necessary.
408 But, handling CC type is not implemented yet. It's a future work.
409
410Lockdep can work without commit for typical locks, but commit step is
411necessary once crosslocks are involved. Introducing commit, lockdep
412performs three steps. What lockdep does in each step is:
413
4141. Acquisition: For typical locks, lockdep does what it originally did
415 and queues the lock so that CT type dependencies can be checked using
416 it at the commit step. For crosslocks, it saves data which will be
417 used at the commit step and increases a reference count for it.
418
4192. Commit: No action is reauired for typical locks. For crosslocks,
420 lockdep adds CT type dependencies using the data saved at the
421 acquisition step.
422
4233. Release: No changes are required for typical locks. When a crosslock
424 is released, it decreases a reference count for it.
425
426CONCLUSION
427
428Crossrelease introduces commit step to handle dependencies of crosslocks
429in batches at a proper time.
430
431
432==============
433Implementation
434==============
435
436Data structures
437---------------
438
439Crossrelease introduces two main data structures.
440
4411. hist_lock
442
443 This is an array embedded in task_struct, for keeping lock history so
444 that dependencies can be added using them at the commit step. Since
445 it's local data, it can be accessed locklessly in the owner context.
446 The array is filled at the acquisition step and consumed at the
447 commit step. And it's managed in circular manner.
448
4492. cross_lock
450
451 One per lockdep_map exists. This is for keeping data of crosslocks
452 and used at the commit step.
453
454
455How crossrelease works
456----------------------
457
458It's the key of how crossrelease works, to defer necessary works to an
459appropriate point in time and perform in at once at the commit step.
460Let's take a look with examples step by step, starting from how lockdep
461works without crossrelease for typical locks.
462
463 acquire A /* Push A onto held_locks */
464 acquire B /* Push B onto held_locks and add 'A -> B' */
465 acquire C /* Push C onto held_locks and add 'B -> C' */
466 release C /* Pop C from held_locks */
467 release B /* Pop B from held_locks */
468 release A /* Pop A from held_locks */
469
470 where A, B and C are different lock classes.
471
472 NOTE: This document assumes that readers already understand how
473 lockdep works without crossrelease thus omits details. But there's
474 one thing to note. Lockdep pretends to pop a lock from held_locks
475 when releasing it. But it's subtly different from the original pop
476 operation because lockdep allows other than the top to be poped.
477
478In this case, lockdep adds 'the top of held_locks -> the lock to acquire'
479dependency every time acquiring a lock.
480
481After adding 'A -> B', a dependency graph will be:
482
483 A -> B
484
485 where A and B are different lock classes.
486
487And after adding 'B -> C', the graph will be:
488
489 A -> B -> C
490
491 where A, B and C are different lock classes.
492
493Let's performs commit step even for typical locks to add dependencies.
494Of course, commit step is not necessary for them, however, it would work
495well because this is a more general way.
496
497 acquire A
498 /*
499 * Queue A into hist_locks
500 *
501 * In hist_locks: A
502 * In graph: Empty
503 */
504
505 acquire B
506 /*
507 * Queue B into hist_locks
508 *
509 * In hist_locks: A, B
510 * In graph: Empty
511 */
512
513 acquire C
514 /*
515 * Queue C into hist_locks
516 *
517 * In hist_locks: A, B, C
518 * In graph: Empty
519 */
520
521 commit C
522 /*
523 * Add 'C -> ?'
524 * Answer the following to decide '?'
525 * What has been queued since acquire C: Nothing
526 *
527 * In hist_locks: A, B, C
528 * In graph: Empty
529 */
530
531 release C
532
533 commit B
534 /*
535 * Add 'B -> ?'
536 * Answer the following to decide '?'
537 * What has been queued since acquire B: C
538 *
539 * In hist_locks: A, B, C
540 * In graph: 'B -> C'
541 */
542
543 release B
544
545 commit A
546 /*
547 * Add 'A -> ?'
548 * Answer the following to decide '?'
549 * What has been queued since acquire A: B, C
550 *
551 * In hist_locks: A, B, C
552 * In graph: 'B -> C', 'A -> B', 'A -> C'
553 */
554
555 release A
556
557 where A, B and C are different lock classes.
558
559In this case, dependencies are added at the commit step as described.
560
561After commits for A, B and C, the graph will be:
562
563 A -> B -> C
564
565 where A, B and C are different lock classes.
566
567 NOTE: A dependency 'A -> C' is optimized out.
568
569We can see the former graph built without commit step is same as the
570latter graph built using commit steps. Of course the former way leads to
571earlier finish for building the graph, which means we can detect a
572deadlock or its possibility sooner. So the former way would be prefered
573when possible. But we cannot avoid using the latter way for crosslocks.
574
575Let's look at how commit steps work for crosslocks. In this case, the
576commit step is performed only on crosslock AX as real. And it assumes
577that the AX release context is different from the AX acquire context.
578
579 BX RELEASE CONTEXT BX ACQUIRE CONTEXT
580 ------------------ ------------------
581 acquire A
582 /*
583 * Push A onto held_locks
584 * Queue A into hist_locks
585 *
586 * In held_locks: A
587 * In hist_locks: A
588 * In graph: Empty
589 */
590
591 acquire BX
592 /*
593 * Add 'the top of held_locks -> BX'
594 *
595 * In held_locks: A
596 * In hist_locks: A
597 * In graph: 'A -> BX'
598 */
599
600 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
601 It must be guaranteed that the following operations are seen after
602 acquiring BX globally. It can be done by things like barrier.
603 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
604
605 acquire C
606 /*
607 * Push C onto held_locks
608 * Queue C into hist_locks
609 *
610 * In held_locks: C
611 * In hist_locks: C
612 * In graph: 'A -> BX'
613 */
614
615 release C
616 /*
617 * Pop C from held_locks
618 *
619 * In held_locks: Empty
620 * In hist_locks: C
621 * In graph: 'A -> BX'
622 */
623 acquire D
624 /*
625 * Push D onto held_locks
626 * Queue D into hist_locks
627 * Add 'the top of held_locks -> D'
628 *
629 * In held_locks: A, D
630 * In hist_locks: A, D
631 * In graph: 'A -> BX', 'A -> D'
632 */
633 acquire E
634 /*
635 * Push E onto held_locks
636 * Queue E into hist_locks
637 *
638 * In held_locks: E
639 * In hist_locks: C, E
640 * In graph: 'A -> BX', 'A -> D'
641 */
642
643 release E
644 /*
645 * Pop E from held_locks
646 *
647 * In held_locks: Empty
648 * In hist_locks: D, E
649 * In graph: 'A -> BX', 'A -> D'
650 */
651 release D
652 /*
653 * Pop D from held_locks
654 *
655 * In held_locks: A
656 * In hist_locks: A, D
657 * In graph: 'A -> BX', 'A -> D'
658 */
659 commit BX
660 /*
661 * Add 'BX -> ?'
662 * What has been queued since acquire BX: C, E
663 *
664 * In held_locks: Empty
665 * In hist_locks: D, E
666 * In graph: 'A -> BX', 'A -> D',
667 * 'BX -> C', 'BX -> E'
668 */
669
670 release BX
671 /*
672 * In held_locks: Empty
673 * In hist_locks: D, E
674 * In graph: 'A -> BX', 'A -> D',
675 * 'BX -> C', 'BX -> E'
676 */
677 release A
678 /*
679 * Pop A from held_locks
680 *
681 * In held_locks: Empty
682 * In hist_locks: A, D
683 * In graph: 'A -> BX', 'A -> D',
684 * 'BX -> C', 'BX -> E'
685 */
686
687 where A, BX, C,..., E are different lock classes, and a suffix 'X' is
688 added on crosslocks.
689
690Crossrelease considers all acquisitions after acqiuring BX are
691candidates which might create dependencies with BX. True dependencies
692will be determined when identifying the release context of BX. Meanwhile,
693all typical locks are queued so that they can be used at the commit step.
694And then two dependencies 'BX -> C' and 'BX -> E' are added at the
695commit step when identifying the release context.
696
697The final graph will be, with crossrelease:
698
699 -> C
700 /
701 -> BX -
702 / \
703 A - -> E
704 \
705 -> D
706
707 where A, BX, C,..., E are different lock classes, and a suffix 'X' is
708 added on crosslocks.
709
710However, the final graph will be, without crossrelease:
711
712 A -> D
713
714 where A and D are different lock classes.
715
716The former graph has three more dependencies, 'A -> BX', 'BX -> C' and
717'BX -> E' giving additional opportunities to check if they cause
718deadlocks. This way lockdep can detect a deadlock or its possibility
719caused by crosslocks.
720
721CONCLUSION
722
723We checked how crossrelease works with several examples.
724
725
726=============
727Optimizations
728=============
729
730Avoid duplication
731-----------------
732
733Crossrelease feature uses a cache like what lockdep already uses for
734dependency chains, but this time it's for caching CT type dependencies.
735Once that dependency is cached, the same will never be added again.
736
737
738Lockless for hot paths
739----------------------
740
741To keep all locks for later use at the commit step, crossrelease adopts
742a local array embedded in task_struct, which makes access to the data
743lockless by forcing it to happen only within the owner context. It's
744like how lockdep handles held_locks. Lockless implmentation is important
745since typical locks are very frequently acquired and released.
746
747
748=================================================
749APPENDIX A: What lockdep does to work aggresively
750=================================================
751
752A deadlock actually occurs when all wait operations creating circular
753dependencies run at the same time. Even though they don't, a potential
754deadlock exists if the problematic dependencies exist. Thus it's
755meaningful to detect not only an actual deadlock but also its potential
756possibility. The latter is rather valuable. When a deadlock occurs
757actually, we can identify what happens in the system by some means or
758other even without lockdep. However, there's no way to detect possiblity
759without lockdep unless the whole code is parsed in head. It's terrible.
760Lockdep does the both, and crossrelease only focuses on the latter.
761
762Whether or not a deadlock actually occurs depends on several factors.
763For example, what order contexts are switched in is a factor. Assuming
764circular dependencies exist, a deadlock would occur when contexts are
765switched so that all wait operations creating the dependencies run
766simultaneously. Thus to detect a deadlock possibility even in the case
767that it has not occured yet, lockdep should consider all possible
768combinations of dependencies, trying to:
769
7701. Use a global dependency graph.
771
772 Lockdep combines all dependencies into one global graph and uses them,
773 regardless of which context generates them or what order contexts are
774 switched in. Aggregated dependencies are only considered so they are
775 prone to be circular if a problem exists.
776
7772. Check dependencies between classes instead of instances.
778
779 What actually causes a deadlock are instances of lock. However,
780 lockdep checks dependencies between classes instead of instances.
781 This way lockdep can detect a deadlock which has not happened but
782 might happen in future by others but the same class.
783
7843. Assume all acquisitions lead to waiting.
785
786 Although locks might be acquired without waiting which is essential
787 to create dependencies, lockdep assumes all acquisitions lead to
788 waiting since it might be true some time or another.
789
790CONCLUSION
791
792Lockdep detects not only an actual deadlock but also its possibility,
793and the latter is more valuable.
794
795
796==================================================
797APPENDIX B: How to avoid adding false dependencies
798==================================================
799
800Remind what a dependency is. A dependency exists if:
801
802 1. There are two waiters waiting for each event at a given time.
803 2. The only way to wake up each waiter is to trigger its event.
804 3. Whether one can be woken up depends on whether the other can.
805
806For example:
807
808 acquire A
809 acquire B /* A dependency 'A -> B' exists */
810 release B
811 release A
812
813 where A and B are different lock classes.
814
815A depedency 'A -> B' exists since:
816
817 1. A waiter for A and a waiter for B might exist when acquiring B.
818 2. Only way to wake up each is to release what it waits for.
819 3. Whether the waiter for A can be woken up depends on whether the
820 other can. IOW, TASK X cannot release A if it fails to acquire B.
821
822For another example:
823
824 TASK X TASK Y
825 ------ ------
826 acquire AX
827 acquire B /* A dependency 'AX -> B' exists */
828 release B
829 release AX held by Y
830
831 where AX and B are different lock classes, and a suffix 'X' is added
832 on crosslocks.
833
834Even in this case involving crosslocks, the same rule can be applied. A
835depedency 'AX -> B' exists since:
836
837 1. A waiter for AX and a waiter for B might exist when acquiring B.
838 2. Only way to wake up each is to release what it waits for.
839 3. Whether the waiter for AX can be woken up depends on whether the
840 other can. IOW, TASK X cannot release AX if it fails to acquire B.
841
842Let's take a look at more complicated example:
843
844 TASK X TASK Y
845 ------ ------
846 acquire B
847 release B
848 fork Y
849 acquire AX
850 acquire C /* A dependency 'AX -> C' exists */
851 release C
852 release AX held by Y
853
854 where AX, B and C are different lock classes, and a suffix 'X' is
855 added on crosslocks.
856
857Does a dependency 'AX -> B' exist? Nope.
858
859Two waiters are essential to create a dependency. However, waiters for
860AX and B to create 'AX -> B' cannot exist at the same time in this
861example. Thus the dependency 'AX -> B' cannot be created.
862
863It would be ideal if the full set of true ones can be considered. But
864we can ensure nothing but what actually happened. Relying on what
865actually happens at runtime, we can anyway add only true ones, though
866they might be a subset of true ones. It's similar to how lockdep works
867for typical locks. There might be more true dependencies than what
868lockdep has detected in runtime. Lockdep has no choice but to rely on
869what actually happens. Crossrelease also relies on it.
870
871CONCLUSION
872
873Relying on what actually happens, lockdep can avoid adding false
874dependencies.
diff --git a/Documentation/media/dvb-drivers/frontends.rst b/Documentation/media/dvb-drivers/frontends.rst
new file mode 100644
index 000000000000..1f5f57989196
--- /dev/null
+++ b/Documentation/media/dvb-drivers/frontends.rst
@@ -0,0 +1,30 @@
1****************
2Frontend drivers
3****************
4
5Frontend attach headers
6***********************
7
8.. Keep it on alphabetic order
9
10.. kernel-doc:: drivers/media/dvb-frontends/a8293.h
11.. kernel-doc:: drivers/media/dvb-frontends/af9013.h
12.. kernel-doc:: drivers/media/dvb-frontends/ascot2e.h
13.. kernel-doc:: drivers/media/dvb-frontends/cxd2820r.h
14.. kernel-doc:: drivers/media/dvb-frontends/drxk.h
15.. kernel-doc:: drivers/media/dvb-frontends/dvb-pll.h
16.. kernel-doc:: drivers/media/dvb-frontends/helene.h
17.. kernel-doc:: drivers/media/dvb-frontends/horus3a.h
18.. kernel-doc:: drivers/media/dvb-frontends/ix2505v.h
19.. kernel-doc:: drivers/media/dvb-frontends/m88ds3103.h
20.. kernel-doc:: drivers/media/dvb-frontends/mb86a20s.h
21.. kernel-doc:: drivers/media/dvb-frontends/mn88472.h
22.. kernel-doc:: drivers/media/dvb-frontends/rtl2830.h
23.. kernel-doc:: drivers/media/dvb-frontends/rtl2832.h
24.. kernel-doc:: drivers/media/dvb-frontends/rtl2832_sdr.h
25.. kernel-doc:: drivers/media/dvb-frontends/stb6000.h
26.. kernel-doc:: drivers/media/dvb-frontends/tda10071.h
27.. kernel-doc:: drivers/media/dvb-frontends/tda826x.h
28.. kernel-doc:: drivers/media/dvb-frontends/zd1301_demod.h
29.. kernel-doc:: drivers/media/dvb-frontends/zl10036.h
30
diff --git a/Documentation/media/dvb-drivers/index.rst b/Documentation/media/dvb-drivers/index.rst
index 376141143ae9..314e127d82e3 100644
--- a/Documentation/media/dvb-drivers/index.rst
+++ b/Documentation/media/dvb-drivers/index.rst
@@ -41,4 +41,5 @@ For more details see the file COPYING in the source distribution of Linux.
41 technisat 41 technisat
42 ttusb-dec 42 ttusb-dec
43 udev 43 udev
44 frontends
44 contributors 45 contributors
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 66e620866245..7d4b15977d61 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 batman-adv 9 batman-adv
10 kapi 10 kapi
11 z8530book 11 z8530book
12 msg_zerocopy
12 13
13.. only:: subproject 14.. only:: subproject
14 15
@@ -16,4 +17,3 @@ Contents:
16 ======= 17 =======
17 18
18 * :ref:`genindex` 19 * :ref:`genindex`
19
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 77f6d7e25cfd..291a01264967 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one))) 72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
73 error(1, errno, "setsockopt zerocopy"); 73 error(1, errno, "setsockopt zerocopy");
74 74
75Setting the socket option only works when the socket is in its initial
76(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
77for example, will lead to an EBUSY error. In this case, the option should be set
78to the listening socket and it will be inherited by the accepted sockets.
75 79
76Transmission 80Transmission
77------------ 81------------
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 6338400eed73..2c31d9ee6776 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -319,12 +319,12 @@ struct Scsi_Host:
319 instance. If the reference count reaches 0 then the given instance 319 instance. If the reference count reaches 0 then the given instance
320 is freed 320 is freed
321 321
322The Scsi_device structure has had reference counting infrastructure added. 322The scsi_device structure has had reference counting infrastructure added.
323This effectively spreads the ownership of struct Scsi_device instances 323This effectively spreads the ownership of struct scsi_device instances
324across the various SCSI layers which use them. Previously such instances 324across the various SCSI layers which use them. Previously such instances
325were exclusively owned by the mid level. See the access functions declared 325were exclusively owned by the mid level. See the access functions declared
326towards the end of include/scsi/scsi_device.h . If an LLD wants to keep 326towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
327a copy of a pointer to a Scsi_device instance it should use scsi_device_get() 327a copy of a pointer to a scsi_device instance it should use scsi_device_get()
328to bump its reference count. When it is finished with the pointer it can 328to bump its reference count. When it is finished with the pointer it can
329use scsi_device_put() to decrement its reference count (and potentially 329use scsi_device_put() to decrement its reference count (and potentially
330delete it). 330delete it).
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 441a4b9b666f..5908a21fddb6 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value
693in each line. The rules stated above are best illustrated with an example: 693in each line. The rules stated above are best illustrated with an example:
694 694
695# mkdir functions/uvc.usb0/control/header/h 695# mkdir functions/uvc.usb0/control/header/h
696# cd functions/uvc.usb0/control/header/h 696# cd functions/uvc.usb0/control/
697# ln -s header/h class/fs 697# ln -s header/h class/fs
698# ln -s header/h class/ss 698# ln -s header/h class/ss
699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p 699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f670e4b9e7f3..fc3ae951bc07 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2901,14 +2901,19 @@ userspace buffer and its length:
2901 2901
2902struct kvm_s390_irq_state { 2902struct kvm_s390_irq_state {
2903 __u64 buf; 2903 __u64 buf;
2904 __u32 flags; 2904 __u32 flags; /* will stay unused for compatibility reasons */
2905 __u32 len; 2905 __u32 len;
2906 __u32 reserved[4]; 2906 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2907}; 2907};
2908 2908
2909Userspace passes in the above struct and for each pending interrupt a 2909Userspace passes in the above struct and for each pending interrupt a
2910struct kvm_s390_irq is copied to the provided buffer. 2910struct kvm_s390_irq is copied to the provided buffer.
2911 2911
2912The structure contains a flags and a reserved field for future extensions. As
2913the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and
2914reserved, these fields can not be used in the future without breaking
2915compatibility.
2916
2912If -ENOBUFS is returned the buffer provided was too small and userspace 2917If -ENOBUFS is returned the buffer provided was too small and userspace
2913may retry with a bigger buffer. 2918may retry with a bigger buffer.
2914 2919
@@ -2932,10 +2937,14 @@ containing a struct kvm_s390_irq_state:
2932 2937
2933struct kvm_s390_irq_state { 2938struct kvm_s390_irq_state {
2934 __u64 buf; 2939 __u64 buf;
2940 __u32 flags; /* will stay unused for compatibility reasons */
2935 __u32 len; 2941 __u32 len;
2936 __u32 pad; 2942 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2937}; 2943};
2938 2944
2945The restrictions for flags and reserved apply as well.
2946(see KVM_S390_GET_IRQ_STATE)
2947
2939The userspace memory referenced by buf contains a struct kvm_s390_irq 2948The userspace memory referenced by buf contains a struct kvm_s390_irq
2940for each interrupt to be injected into the guest. 2949for each interrupt to be injected into the guest.
2941If one of the interrupts could not be injected for some reason the 2950If one of the interrupts could not be injected for some reason the
@@ -3394,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
3394or if no page table is present for the addresses (e.g. when using 3403or if no page table is present for the addresses (e.g. when using
3395hugepages). 3404hugepages).
3396 3405
34064.108 KVM_PPC_GET_CPU_CHAR
3407
3408Capability: KVM_CAP_PPC_GET_CPU_CHAR
3409Architectures: powerpc
3410Type: vm ioctl
3411Parameters: struct kvm_ppc_cpu_char (out)
3412Returns: 0 on successful completion
3413 -EFAULT if struct kvm_ppc_cpu_char cannot be written
3414
3415This ioctl gives userspace information about certain characteristics
3416of the CPU relating to speculative execution of instructions and
3417possible information leakage resulting from speculative execution (see
3418CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is
3419returned in struct kvm_ppc_cpu_char, which looks like this:
3420
3421struct kvm_ppc_cpu_char {
3422 __u64 character; /* characteristics of the CPU */
3423 __u64 behaviour; /* recommended software behaviour */
3424 __u64 character_mask; /* valid bits in character */
3425 __u64 behaviour_mask; /* valid bits in behaviour */
3426};
3427
3428For extensibility, the character_mask and behaviour_mask fields
3429indicate which bits of character and behaviour have been filled in by
3430the kernel. If the set of defined bits is extended in future then
3431userspace will be able to tell whether it is running on a kernel that
3432knows about the new bits.
3433
3434The character field describes attributes of the CPU which can help
3435with preventing inadvertent information disclosure - specifically,
3436whether there is an instruction to flash-invalidate the L1 data cache
3437(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
3438to a mode where entries can only be used by the thread that created
3439them, whether the bcctr[l] instruction prevents speculation, and
3440whether a speculation barrier instruction (ori 31,31,0) is provided.
3441
3442The behaviour field describes actions that software should take to
3443prevent inadvertent information disclosure, and thus describes which
3444vulnerabilities the hardware is subject to; specifically whether the
3445L1 data cache should be flushed when returning to user mode from the
3446kernel, and whether a speculation barrier should be placed between an
3447array bounds check and the array access.
3448
3449These fields use the same bit definitions as the new
3450H_GET_CPU_CHARACTERISTICS hypercall.
3451
33975. The kvm_run structure 34525. The kvm_run structure
3398------------------------ 3453------------------------
3399 3454
diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt
index 89fff7d611cc..0b3a1148f9f0 100644
--- a/Documentation/vm/zswap.txt
+++ b/Documentation/vm/zswap.txt
@@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its
98original compressor. Once all pages are removed from an old zpool, the zpool 98original compressor. Once all pages are removed from an old zpool, the zpool
99and its compressor are freed. 99and its compressor are freed.
100 100
101Some of the pages in zswap are same-value filled pages (i.e. contents of the
102page have same value or repetitive pattern). These pages include zero-filled
103pages and they are handled differently. During store operation, a page is
104checked if it is a same-value filled page before compressing it. If true, the
105compressed length of the page is set to zero and the pattern or same-filled
106value is stored.
107
108Same-value filled pages identification feature is enabled by default and can be
109disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0,
110e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at
111runtime using the sysfs "same_filled_pages_enabled" attribute, e.g.
112
113echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
114
115When zswap same-filled page identification is disabled at runtime, it will stop
116checking for the same-value filled pages during store operation. However, the
117existing pages which are marked as same-value filled pages remain stored
118unchanged in zswap until they are either loaded or invalidated.
119
101A debugfs interface is provided for various statistic about pool size, number 120A debugfs interface is provided for various statistic about pool size, number
102of pages stored, and various counters for the reasons pages are rejected. 121of pages stored, same-value filled pages and various counters for the reasons
122pages are rejected.
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 000000000000..5cd58439ad2d
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
1Overview
2========
3
4Page Table Isolation (pti, previously known as KAISER[1]) is a
5countermeasure against attacks on the shared user/kernel address
6space such as the "Meltdown" approach[2].
7
8To mitigate this class of attacks, we create an independent set of
9page tables for use only when running userspace applications. When
10the kernel is entered via syscalls, interrupts or exceptions, the
11page tables are switched to the full "kernel" copy. When the system
12switches back to user mode, the user copy is used again.
13
14The userspace page tables contain only a minimal amount of kernel
15data: only what is needed to enter/exit the kernel such as the
16entry/exit functions themselves and the interrupt descriptor table
17(IDT). There are a few strictly unnecessary things that get mapped
18such as the first C function when entering an interrupt (see
19comments in pti.c).
20
21This approach helps to ensure that side-channel attacks leveraging
22the paging structures do not function when PTI is enabled. It can be
23enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
24Once enabled at compile-time, it can be disabled at boot with the
25'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
26
27Page Table Management
28=====================
29
30When PTI is enabled, the kernel manages two sets of page tables.
31The first set is very similar to the single set which is present in
32kernels without PTI. This includes a complete mapping of userspace
33that the kernel can use for things like copy_to_user().
34
35Although _complete_, the user portion of the kernel page tables is
36crippled by setting the NX bit in the top level. This ensures
37that any missed kernel->user CR3 switch will immediately crash
38userspace upon executing its first instruction.
39
40The userspace page tables map only the kernel data needed to enter
41and exit the kernel. This data is entirely contained in the 'struct
42cpu_entry_area' structure which is placed in the fixmap which gives
43each CPU's copy of the area a compile-time-fixed virtual address.
44
45For new userspace mappings, the kernel makes the entries in its
46page tables like normal. The only difference is when the kernel
47makes entries in the top (PGD) level. In addition to setting the
48entry in the main kernel PGD, a copy of the entry is made in the
49userspace page tables' PGD.
50
51This sharing at the PGD level also inherently shares all the lower
52layers of the page tables. This leaves a single, shared set of
53userspace page tables to manage. One PTE to lock, one set of
54accessed bits, dirty bits, etc...
55
56Overhead
57========
58
59Protection against side-channel attacks is important. But,
60this protection comes at a cost:
61
621. Increased Memory Use
63 a. Each process now needs an order-1 PGD instead of order-0.
64 (Consumes an additional 4k per process).
65 b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
66 aligned so that it can be mapped by setting a single PMD
67 entry. This consumes nearly 2MB of RAM once the kernel
68 is decompressed, but no space in the kernel image itself.
69
702. Runtime Cost
71 a. CR3 manipulation to switch between the page table copies
72 must be done at interrupt, syscall, and exception entry
73 and exit (it can be skipped when the kernel is interrupted,
74 though.) Moves to CR3 are on the order of a hundred
75 cycles, and are required at every entry and exit.
76 b. A "trampoline" must be used for SYSCALL entry. This
77 trampoline depends on a smaller set of resources than the
78 non-PTI SYSCALL entry code, so requires mapping fewer
79 things into the userspace page tables. The downside is
80 that stacks must be switched at entry time.
81 c. Global pages are disabled for all kernel structures not
82 mapped into both kernel and userspace page tables. This
83 feature of the MMU allows different processes to share TLB
84 entries mapping the kernel. Losing the feature means more
85 TLB misses after a context switch. The actual loss of
86 performance is very small, however, never exceeding 1%.
87 d. Process Context IDentifiers (PCID) is a CPU feature that
88 allows us to skip flushing the entire TLB when switching page
89 tables by setting a special bit in CR3 when the page tables
90 are changed. This makes switching the page tables (at context
91 switch, or kernel entry/exit) cheaper. But, on systems with
92 PCID support, the context switch code must flush both the user
93 and kernel entries out of the TLB. The user PCID TLB flush is
94 deferred until the exit to userspace, minimizing the cost.
95 See intel.com/sdm for the gory PCID/INVPCID details.
96 e. The userspace page tables must be populated for each new
97 process. Even without PTI, the shared kernel mappings
98 are created by copying top-level (PGD) entries into each
99 new process. But, with PTI, there are now *two* kernel
100 mappings: one in the kernel page tables that maps everything
101 and one for the entry/exit structures. At fork(), we need to
102 copy both.
103 f. In addition to the fork()-time copying, there must also
104 be an update to the userspace PGD any time a set_pgd() is done
105 on a PGD used to map userspace. This ensures that the kernel
106 and userspace copies always map the same userspace
107 memory.
108 g. On systems without PCID support, each CR3 write flushes
109 the entire TLB. That means that each syscall, interrupt
110 or exception flushes the TLB.
111 h. INVPCID is a TLB-flushing instruction which allows flushing
112 of TLB entries for non-current PCIDs. Some systems support
113 PCIDs, but do not support INVPCID. On these systems, addresses
114 can only be flushed from the TLB for the current PCID. When
115 flushing a kernel address, we need to flush all PCIDs, so a
116 single kernel address flush will require a TLB-flushing CR3
117 write upon the next use of every PCID.
118
119Possible Future Work
120====================
1211. We can be more careful about not actually writing to CR3
122 unless its value is actually changed.
1232. Allow PTI to be enabled/disabled at runtime in addition to the
124 boot-time switching.
125
126Testing
127========
128
129To test stability of PTI, the following test procedure is recommended,
130ideally doing all of these in parallel:
131
1321. Set CONFIG_DEBUG_ENTRY=y
1332. Run several copies of all of the tools/testing/selftests/x86/ tests
134 (excluding MPX and protection_keys) in a loop on multiple CPUs for
135 several minutes. These tests frequently uncover corner cases in the
136 kernel entry code. In general, old kernels might cause these tests
137 themselves to crash, but they should never crash the kernel.
1383. Run the 'perf' tool in a mode (top or record) that generates many
139 frequent performance monitoring non-maskable interrupts (see "NMI"
140 in /proc/interrupts). This exercises the NMI entry/exit code which
141 is known to trigger bugs in code paths that did not expect to be
142 interrupted, including nested NMIs. Using "-c" boosts the rate of
143 NMIs, and using two -c with separate counters encourages nested NMIs
144 and less deterministic behavior.
145
146 while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
147
1484. Launch a KVM virtual machine.
1495. Run 32-bit binaries on systems supporting the SYSCALL instruction.
150 This has been a lightly-tested code path and needs extra scrutiny.
151
152Debugging
153=========
154
155Bugs in PTI cause a few different signatures of crashes
156that are worth noting here.
157
158 * Failures of the selftests/x86 code. Usually a bug in one of the
159 more obscure corners of entry_64.S
160 * Crashes in early boot, especially around CPU bringup. Bugs
161 in the trampoline code or mappings cause these.
162 * Crashes at the first interrupt. Caused by bugs in entry_64.S,
163 like screwing up a page table switch. Also caused by
164 incorrectly mapping the IRQ handler entry code.
165 * Crashes at the first NMI. The NMI code is separate from main
166 interrupt handlers and can have bugs that do not affect
167 normal interrupts. Also caused by incorrectly mapping NMI
168 code. NMIs that interrupt the entry code must be very
169 careful and can be the cause of crashes that show up when
170 running perf.
171 * Kernel crashes at the first exit to userspace. entry_64.S
172 bugs, or failing to map some of the exit code.
173 * Crashes at first interrupt that interrupts userspace. The paths
174 in entry_64.S that return to userspace are sometimes separate
175 from the ones that return to the kernel.
176 * Double faults: overflowing the kernel stack because of page
177 faults upon page faults. Caused by touching non-pti-mapped
178 data in the entry code, or forgetting to switch to kernel
179 CR3 before calling into C functions which are not pti-mapped.
180 * Userspace segfaults early in boot, sometimes manifesting
181 as mount(8) failing to mount the rootfs. These have
182 tended to be TLB invalidation issues. Usually invalidating
183 the wrong PCID, or otherwise missing an invalidation.
184
1851. https://gruss.cc/files/kaiser.pdf
1862. https://meltdownattack.com/meltdown.pdf
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 3448e675b462..ea91cb61a602 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -1,6 +1,4 @@
1 1
2<previous description obsolete, deleted>
3
4Virtual memory map with 4 level page tables: 2Virtual memory map with 4 level page tables:
5 3
60000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm 40000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
@@ -14,13 +12,17 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
14... unused hole ... 12... unused hole ...
15ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 13ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
16... unused hole ... 14... unused hole ...
15 vaddr_end for KASLR
16fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
17fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
17ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 18ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
18... unused hole ... 19... unused hole ...
19ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space 20ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
20... unused hole ... 21... unused hole ...
21ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 22ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
22ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable) 23ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable)
23ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls 24[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
25ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
24ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 26ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
25 27
26Virtual memory map with 5 level page tables: 28Virtual memory map with 5 level page tables:
@@ -29,26 +31,31 @@ Virtual memory map with 5 level page tables:
29hole caused by [56:63] sign extension 31hole caused by [56:63] sign extension
30ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor 32ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
31ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory 33ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
32ff90000000000000 - ff91ffffffffffff (=49 bits) hole 34ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
33ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space 35ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
34ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole 36ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
35ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) 37ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
36... unused hole ... 38... unused hole ...
37ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 39ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
38... unused hole ... 40... unused hole ...
41 vaddr_end for KASLR
42fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
43... unused hole ...
39ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 44ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
40... unused hole ... 45... unused hole ...
41ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space 46ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
42... unused hole ... 47... unused hole ...
43ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 48ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
44ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space 49ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
45ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls 50[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
51ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
46ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 52ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
47 53
48Architecture defines a 64-bit virtual address. Implementations can support 54Architecture defines a 64-bit virtual address. Implementations can support
49less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 55less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
50through to the most-significant implemented bit are set to either all ones 56through to the most-significant implemented bit are sign extended.
51or all zero. This causes hole between user space and kernel addresses. 57This causes hole between user space and kernel addresses if you interpret them
58as unsigned.
52 59
53The direct mapping covers all memory in the system up to the highest 60The direct mapping covers all memory in the system up to the highest
54memory address (this means in some cases it can also include PCI memory 61memory address (this means in some cases it can also include PCI memory
@@ -58,19 +65,15 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of
58the processes using the page fault handler, with init_top_pgt as 65the processes using the page fault handler, with init_top_pgt as
59reference. 66reference.
60 67
61Current X86-64 implementations support up to 46 bits of address space (64 TB),
62which is our current limit. This expands into MBZ space in the page tables.
63
64We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual 68We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
65memory window (this size is arbitrary, it can be raised later if needed). 69memory window (this size is arbitrary, it can be raised later if needed).
66The mappings are not part of any other kernel PGD and are only available 70The mappings are not part of any other kernel PGD and are only available
67during EFI runtime calls. 71during EFI runtime calls.
68 72
69The module mapping space size changes based on the CONFIG requirements for the
70following fixmap section.
71
72Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all 73Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
73physical memory, vmalloc/ioremap space and virtual memory map are randomized. 74physical memory, vmalloc/ioremap space and virtual memory map are randomized.
74Their order is preserved but their base will be offset early at boot time. 75Their order is preserved but their base will be offset early at boot time.
75 76
76-Andi Kleen, Jul 2004 77Be very careful vs. KASLR when changing anything here. The KASLR address
78range must not overlap with anything except the KASAN shadow area, which is
79correct as KASAN disables KASLR.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1facaa8cd6c9..c588625c2250 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -62,7 +62,15 @@ trivial patch so apply some common sense.
62 62
637. When sending security related changes or reports to a maintainer 637. When sending security related changes or reports to a maintainer
64 please Cc: security@kernel.org, especially if the maintainer 64 please Cc: security@kernel.org, especially if the maintainer
65 does not respond. 65 does not respond. Please keep in mind that the security team is
66 a small set of people who can be efficient only when working on
67 verified bugs. Please only Cc: this list when you have identified
68 that the bug would present a short-term risk to other users if it
69 were publicly disclosed. For example, reports of address leaks do
70 not represent an immediate threat and are better handled publicly,
71 and ideally, should come with a patch proposal. Please do not send
72 automated reports to this list either. Such bugs will be handled
73 better and faster in the usual public places.
66 74
678. Happy hacking. 758. Happy hacking.
68 76
@@ -554,13 +562,13 @@ S: Orphan
554F: Documentation/filesystems/affs.txt 562F: Documentation/filesystems/affs.txt
555F: fs/affs/ 563F: fs/affs/
556 564
557AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN 565AFS FILESYSTEM
558M: David Howells <dhowells@redhat.com> 566M: David Howells <dhowells@redhat.com>
559L: linux-afs@lists.infradead.org 567L: linux-afs@lists.infradead.org
560S: Supported 568S: Supported
561F: fs/afs/ 569F: fs/afs/
562F: include/net/af_rxrpc.h 570F: include/trace/events/afs.h
563F: net/rxrpc/af_rxrpc.c 571F: Documentation/filesystems/afs.txt
564W: https://www.infradead.org/~dhowells/kafs/ 572W: https://www.infradead.org/~dhowells/kafs/
565 573
566AGPGART DRIVER 574AGPGART DRIVER
@@ -859,7 +867,8 @@ F: kernel/configs/android*
859ANDROID DRIVERS 867ANDROID DRIVERS
860M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 868M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
861M: Arve Hjønnevåg <arve@android.com> 869M: Arve Hjønnevåg <arve@android.com>
862M: Riley Andrews <riandrews@android.com> 870M: Todd Kjos <tkjos@android.com>
871M: Martijn Coenen <maco@android.com>
863T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 872T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
864L: devel@driverdev.osuosl.org 873L: devel@driverdev.osuosl.org
865S: Supported 874S: Supported
@@ -2046,7 +2055,7 @@ F: arch/arm/boot/dts/uniphier*
2046F: arch/arm/include/asm/hardware/cache-uniphier.h 2055F: arch/arm/include/asm/hardware/cache-uniphier.h
2047F: arch/arm/mach-uniphier/ 2056F: arch/arm/mach-uniphier/
2048F: arch/arm/mm/cache-uniphier.c 2057F: arch/arm/mm/cache-uniphier.c
2049F: arch/arm64/boot/dts/socionext/ 2058F: arch/arm64/boot/dts/socionext/uniphier*
2050F: drivers/bus/uniphier-system-bus.c 2059F: drivers/bus/uniphier-system-bus.c
2051F: drivers/clk/uniphier/ 2060F: drivers/clk/uniphier/
2052F: drivers/gpio/gpio-uniphier.c 2061F: drivers/gpio/gpio-uniphier.c
@@ -2620,24 +2629,22 @@ F: fs/bfs/
2620F: include/uapi/linux/bfs_fs.h 2629F: include/uapi/linux/bfs_fs.h
2621 2630
2622BLACKFIN ARCHITECTURE 2631BLACKFIN ARCHITECTURE
2623M: Steven Miao <realmz6@gmail.com>
2624L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2632L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2625T: git git://git.code.sf.net/p/adi-linux/code 2633T: git git://git.code.sf.net/p/adi-linux/code
2626W: http://blackfin.uclinux.org 2634W: http://blackfin.uclinux.org
2627S: Supported 2635S: Orphan
2628F: arch/blackfin/ 2636F: arch/blackfin/
2629 2637
2630BLACKFIN EMAC DRIVER 2638BLACKFIN EMAC DRIVER
2631L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2639L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2632W: http://blackfin.uclinux.org 2640W: http://blackfin.uclinux.org
2633S: Supported 2641S: Orphan
2634F: drivers/net/ethernet/adi/ 2642F: drivers/net/ethernet/adi/
2635 2643
2636BLACKFIN MEDIA DRIVER 2644BLACKFIN MEDIA DRIVER
2637M: Scott Jiang <scott.jiang.linux@gmail.com>
2638L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2645L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2639W: http://blackfin.uclinux.org/ 2646W: http://blackfin.uclinux.org/
2640S: Supported 2647S: Orphan
2641F: drivers/media/platform/blackfin/ 2648F: drivers/media/platform/blackfin/
2642F: drivers/media/i2c/adv7183* 2649F: drivers/media/i2c/adv7183*
2643F: drivers/media/i2c/vs6624* 2650F: drivers/media/i2c/vs6624*
@@ -2645,25 +2652,25 @@ F: drivers/media/i2c/vs6624*
2645BLACKFIN RTC DRIVER 2652BLACKFIN RTC DRIVER
2646L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2653L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2647W: http://blackfin.uclinux.org 2654W: http://blackfin.uclinux.org
2648S: Supported 2655S: Orphan
2649F: drivers/rtc/rtc-bfin.c 2656F: drivers/rtc/rtc-bfin.c
2650 2657
2651BLACKFIN SDH DRIVER 2658BLACKFIN SDH DRIVER
2652L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2659L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2653W: http://blackfin.uclinux.org 2660W: http://blackfin.uclinux.org
2654S: Supported 2661S: Orphan
2655F: drivers/mmc/host/bfin_sdh.c 2662F: drivers/mmc/host/bfin_sdh.c
2656 2663
2657BLACKFIN SERIAL DRIVER 2664BLACKFIN SERIAL DRIVER
2658L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2665L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2659W: http://blackfin.uclinux.org 2666W: http://blackfin.uclinux.org
2660S: Supported 2667S: Orphan
2661F: drivers/tty/serial/bfin_uart.c 2668F: drivers/tty/serial/bfin_uart.c
2662 2669
2663BLACKFIN WATCHDOG DRIVER 2670BLACKFIN WATCHDOG DRIVER
2664L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) 2671L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
2665W: http://blackfin.uclinux.org 2672W: http://blackfin.uclinux.org
2666S: Supported 2673S: Orphan
2667F: drivers/watchdog/bfin_wdt.c 2674F: drivers/watchdog/bfin_wdt.c
2668 2675
2669BLINKM RGB LED DRIVER 2676BLINKM RGB LED DRIVER
@@ -5150,15 +5157,15 @@ F: sound/usb/misc/ua101.c
5150EFI TEST DRIVER 5157EFI TEST DRIVER
5151L: linux-efi@vger.kernel.org 5158L: linux-efi@vger.kernel.org
5152M: Ivan Hu <ivan.hu@canonical.com> 5159M: Ivan Hu <ivan.hu@canonical.com>
5153M: Matt Fleming <matt@codeblueprint.co.uk> 5160M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5154S: Maintained 5161S: Maintained
5155F: drivers/firmware/efi/test/ 5162F: drivers/firmware/efi/test/
5156 5163
5157EFI VARIABLE FILESYSTEM 5164EFI VARIABLE FILESYSTEM
5158M: Matthew Garrett <matthew.garrett@nebula.com> 5165M: Matthew Garrett <matthew.garrett@nebula.com>
5159M: Jeremy Kerr <jk@ozlabs.org> 5166M: Jeremy Kerr <jk@ozlabs.org>
5160M: Matt Fleming <matt@codeblueprint.co.uk> 5167M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5161T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 5168T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
5162L: linux-efi@vger.kernel.org 5169L: linux-efi@vger.kernel.org
5163S: Maintained 5170S: Maintained
5164F: fs/efivarfs/ 5171F: fs/efivarfs/
@@ -5319,7 +5326,6 @@ S: Supported
5319F: security/integrity/evm/ 5326F: security/integrity/evm/
5320 5327
5321EXTENSIBLE FIRMWARE INTERFACE (EFI) 5328EXTENSIBLE FIRMWARE INTERFACE (EFI)
5322M: Matt Fleming <matt@codeblueprint.co.uk>
5323M: Ard Biesheuvel <ard.biesheuvel@linaro.org> 5329M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5324L: linux-efi@vger.kernel.org 5330L: linux-efi@vger.kernel.org
5325T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git 5331T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
@@ -5430,7 +5436,7 @@ F: drivers/media/tuners/fc2580*
5430 5436
5431FCOE SUBSYSTEM (libfc, libfcoe, fcoe) 5437FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
5432M: Johannes Thumshirn <jth@kernel.org> 5438M: Johannes Thumshirn <jth@kernel.org>
5433L: fcoe-devel@open-fcoe.org 5439L: linux-scsi@vger.kernel.org
5434W: www.Open-FCoE.org 5440W: www.Open-FCoE.org
5435S: Supported 5441S: Supported
5436F: drivers/scsi/libfc/ 5442F: drivers/scsi/libfc/
@@ -6611,16 +6617,6 @@ L: linux-i2c@vger.kernel.org
6611S: Maintained 6617S: Maintained
6612F: drivers/i2c/i2c-stub.c 6618F: drivers/i2c/i2c-stub.c
6613 6619
6614i386 BOOT CODE
6615M: "H. Peter Anvin" <hpa@zytor.com>
6616S: Maintained
6617F: arch/x86/boot/
6618
6619i386 SETUP CODE / CPU ERRATA WORKAROUNDS
6620M: "H. Peter Anvin" <hpa@zytor.com>
6621T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
6622S: Maintained
6623
6624IA64 (Itanium) PLATFORM 6620IA64 (Itanium) PLATFORM
6625M: Tony Luck <tony.luck@intel.com> 6621M: Tony Luck <tony.luck@intel.com>
6626M: Fenghua Yu <fenghua.yu@intel.com> 6622M: Fenghua Yu <fenghua.yu@intel.com>
@@ -7766,6 +7762,7 @@ F: security/keys/
7766 7762
7767KGDB / KDB /debug_core 7763KGDB / KDB /debug_core
7768M: Jason Wessel <jason.wessel@windriver.com> 7764M: Jason Wessel <jason.wessel@windriver.com>
7765M: Daniel Thompson <daniel.thompson@linaro.org>
7769W: http://kgdb.wiki.kernel.org/ 7766W: http://kgdb.wiki.kernel.org/
7770L: kgdb-bugreport@lists.sourceforge.net 7767L: kgdb-bugreport@lists.sourceforge.net
7771T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git 7768T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -9086,6 +9083,7 @@ F: drivers/usb/image/microtek.*
9086 9083
9087MIPS 9084MIPS
9088M: Ralf Baechle <ralf@linux-mips.org> 9085M: Ralf Baechle <ralf@linux-mips.org>
9086M: James Hogan <jhogan@kernel.org>
9089L: linux-mips@linux-mips.org 9087L: linux-mips@linux-mips.org
9090W: http://www.linux-mips.org/ 9088W: http://www.linux-mips.org/
9091T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 9089T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
@@ -9639,8 +9637,8 @@ F: include/uapi/linux/sunrpc/
9639NILFS2 FILESYSTEM 9637NILFS2 FILESYSTEM
9640M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> 9638M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
9641L: linux-nilfs@vger.kernel.org 9639L: linux-nilfs@vger.kernel.org
9642W: http://nilfs.sourceforge.net/ 9640W: https://nilfs.sourceforge.io/
9643W: http://nilfs.osdn.jp/ 9641W: https://nilfs.osdn.jp/
9644T: git git://github.com/konis/nilfs2.git 9642T: git git://github.com/konis/nilfs2.git
9645S: Supported 9643S: Supported
9646F: Documentation/filesystems/nilfs2.txt 9644F: Documentation/filesystems/nilfs2.txt
@@ -10135,7 +10133,7 @@ F: drivers/irqchip/irq-ompic.c
10135F: drivers/irqchip/irq-or1k-* 10133F: drivers/irqchip/irq-or1k-*
10136 10134
10137OPENVSWITCH 10135OPENVSWITCH
10138M: Pravin Shelar <pshelar@nicira.com> 10136M: Pravin B Shelar <pshelar@ovn.org>
10139L: netdev@vger.kernel.org 10137L: netdev@vger.kernel.org
10140L: dev@openvswitch.org 10138L: dev@openvswitch.org
10141W: http://openvswitch.org 10139W: http://openvswitch.org
@@ -11652,8 +11650,8 @@ F: drivers/mtd/nand/r852.h
11652RISC-V ARCHITECTURE 11650RISC-V ARCHITECTURE
11653M: Palmer Dabbelt <palmer@sifive.com> 11651M: Palmer Dabbelt <palmer@sifive.com>
11654M: Albert Ou <albert@sifive.com> 11652M: Albert Ou <albert@sifive.com>
11655L: patches@groups.riscv.org 11653L: linux-riscv@lists.infradead.org
11656T: git https://github.com/riscv/riscv-linux 11654T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
11657S: Supported 11655S: Supported
11658F: arch/riscv/ 11656F: arch/riscv/
11659K: riscv 11657K: riscv
@@ -11777,6 +11775,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
11777S: Maintained 11775S: Maintained
11778F: drivers/net/wireless/realtek/rtl8xxxu/ 11776F: drivers/net/wireless/realtek/rtl8xxxu/
11779 11777
11778RXRPC SOCKETS (AF_RXRPC)
11779M: David Howells <dhowells@redhat.com>
11780L: linux-afs@lists.infradead.org
11781S: Supported
11782F: net/rxrpc/
11783F: include/keys/rxrpc-type.h
11784F: include/net/af_rxrpc.h
11785F: include/trace/events/rxrpc.h
11786F: include/uapi/linux/rxrpc.h
11787F: Documentation/networking/rxrpc.txt
11788W: https://www.infradead.org/~dhowells/kafs/
11789
11780S3 SAVAGE FRAMEBUFFER DRIVER 11790S3 SAVAGE FRAMEBUFFER DRIVER
11781M: Antonino Daplas <adaplas@gmail.com> 11791M: Antonino Daplas <adaplas@gmail.com>
11782L: linux-fbdev@vger.kernel.org 11792L: linux-fbdev@vger.kernel.org
@@ -12222,7 +12232,7 @@ M: Security Officers <security@kernel.org>
12222S: Supported 12232S: Supported
12223 12233
12224SECURITY SUBSYSTEM 12234SECURITY SUBSYSTEM
12225M: James Morris <james.l.morris@oracle.com> 12235M: James Morris <jmorris@namei.org>
12226M: "Serge E. Hallyn" <serge@hallyn.com> 12236M: "Serge E. Hallyn" <serge@hallyn.com>
12227L: linux-security-module@vger.kernel.org (suggested Cc:) 12237L: linux-security-module@vger.kernel.org (suggested Cc:)
12228T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git 12238T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
@@ -12630,6 +12640,14 @@ S: Maintained
12630F: drivers/ssb/ 12640F: drivers/ssb/
12631F: include/linux/ssb/ 12641F: include/linux/ssb/
12632 12642
12643SONY IMX274 SENSOR DRIVER
12644M: Leon Luo <leonl@leopardimaging.com>
12645L: linux-media@vger.kernel.org
12646T: git git://linuxtv.org/media_tree.git
12647S: Maintained
12648F: drivers/media/i2c/imx274.c
12649F: Documentation/devicetree/bindings/media/i2c/imx274.txt
12650
12633SONY MEMORYSTICK CARD SUPPORT 12651SONY MEMORYSTICK CARD SUPPORT
12634M: Alex Dubov <oakad@yahoo.com> 12652M: Alex Dubov <oakad@yahoo.com>
12635W: http://tifmxx.berlios.de/ 12653W: http://tifmxx.berlios.de/
@@ -13096,6 +13114,7 @@ F: drivers/dma/dw/
13096 13114
13097SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER 13115SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
13098M: Jie Deng <jiedeng@synopsys.com> 13116M: Jie Deng <jiedeng@synopsys.com>
13117M: Jose Abreu <Jose.Abreu@synopsys.com>
13099L: netdev@vger.kernel.org 13118L: netdev@vger.kernel.org
13100S: Supported 13119S: Supported
13101F: drivers/net/ethernet/synopsys/ 13120F: drivers/net/ethernet/synopsys/
@@ -13471,6 +13490,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com>
13471M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13490M: Yehezkel Bernat <yehezkel.bernat@intel.com>
13472T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git 13491T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
13473S: Maintained 13492S: Maintained
13493F: Documentation/admin-guide/thunderbolt.rst
13474F: drivers/thunderbolt/ 13494F: drivers/thunderbolt/
13475F: include/linux/thunderbolt.h 13495F: include/linux/thunderbolt.h
13476 13496
@@ -13648,10 +13668,8 @@ F: drivers/net/wireless/ti/
13648F: include/linux/wl12xx.h 13668F: include/linux/wl12xx.h
13649 13669
13650TILE ARCHITECTURE 13670TILE ARCHITECTURE
13651M: Chris Metcalf <cmetcalf@mellanox.com>
13652W: http://www.mellanox.com/repository/solutions/tile-scm/ 13671W: http://www.mellanox.com/repository/solutions/tile-scm/
13653T: git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git 13672S: Orphan
13654S: Supported
13655F: arch/tile/ 13673F: arch/tile/
13656F: drivers/char/tile-srom.c 13674F: drivers/char/tile-srom.c
13657F: drivers/edac/tile_edac.c 13675F: drivers/edac/tile_edac.c
@@ -14839,7 +14857,7 @@ F: net/x25/
14839X86 ARCHITECTURE (32-BIT AND 64-BIT) 14857X86 ARCHITECTURE (32-BIT AND 64-BIT)
14840M: Thomas Gleixner <tglx@linutronix.de> 14858M: Thomas Gleixner <tglx@linutronix.de>
14841M: Ingo Molnar <mingo@redhat.com> 14859M: Ingo Molnar <mingo@redhat.com>
14842M: "H. Peter Anvin" <hpa@zytor.com> 14860R: "H. Peter Anvin" <hpa@zytor.com>
14843M: x86@kernel.org 14861M: x86@kernel.org
14844L: linux-kernel@vger.kernel.org 14862L: linux-kernel@vger.kernel.org
14845T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core 14863T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
diff --git a/Makefile b/Makefile
index c988e46a53cd..c8b8e902d5a4 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 15 3PATCHLEVEL = 15
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc2 5EXTRAVERSION =
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -484,26 +484,6 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
484endif 484endif
485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
487KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
488KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
489KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
490KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
491KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
492# Quiet clang warning: comparison of unsigned expression < 0 is always false
493KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
494# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
495# source of a reference will be _MergedGlobals and not on of the whitelisted names.
496# See modpost pattern 2
497KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
498KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
499KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
500KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
501else
502
503# These warnings generated too much noise in a regular build.
504# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
505KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
506KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
507endif 487endif
508 488
509ifeq ($(config-targets),1) 489ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR
716endif 696endif
717KBUILD_CFLAGS += $(stackp-flag) 697KBUILD_CFLAGS += $(stackp-flag)
718 698
699ifeq ($(cc-name),clang)
700KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
701KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
702KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
703KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
704KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
705# Quiet clang warning: comparison of unsigned expression < 0 is always false
706KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
707# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
708# source of a reference will be _MergedGlobals and not on of the whitelisted names.
709# See modpost pattern 2
710KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
711KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
712KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
713KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
714else
715
716# These warnings generated too much noise in a regular build.
717# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
718KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
719KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
720endif
721
719ifdef CONFIG_FRAME_POINTER 722ifdef CONFIG_FRAME_POINTER
720KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 723KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
721else 724else
@@ -789,6 +792,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
789# disable invalid "can't wrap" optimizations for signed / pointers 792# disable invalid "can't wrap" optimizations for signed / pointers
790KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) 793KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
791 794
795# Make sure -fstack-check isn't enabled (like gentoo apparently did)
796KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
797
792# conserve stack if available 798# conserve stack if available
793KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) 799KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
794 800
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index b15bf6bc0e94..14a2e9af97e9 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3
4generic-y += bpf_perf_event.h
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 37bd6d9b8eb9..a6bdc1da47ad 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -102,6 +102,15 @@ sio_pci_route(void)
102 alpha_mv.sys.sio.route_tab); 102 alpha_mv.sys.sio.route_tab);
103} 103}
104 104
105static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
106{
107 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
108 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
109 return false;
110
111 return true;
112}
113
105static unsigned int __init 114static unsigned int __init
106sio_collect_irq_levels(void) 115sio_collect_irq_levels(void)
107{ 116{
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
110 119
111 /* Iterate through the devices, collecting IRQ levels. */ 120 /* Iterate through the devices, collecting IRQ levels. */
112 for_each_pci_dev(dev) { 121 for_each_pci_dev(dev) {
113 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && 122 if (!sio_pci_dev_irq_needs_level(dev))
114 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
115 continue; 123 continue;
116 124
117 if (dev->irq) 125 if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
120 return level_bits; 128 return level_bits;
121} 129}
122 130
123static void __init 131static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
124sio_fixup_irq_levels(unsigned int level_bits)
125{ 132{
126 unsigned int old_level_bits; 133 unsigned int old_level_bits;
127 134
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
139 */ 146 */
140 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); 147 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
141 148
142 level_bits |= (old_level_bits & 0x71ff); 149 if (reset)
150 old_level_bits &= 0x71ff;
151
152 level_bits |= old_level_bits;
143 153
144 outb((level_bits >> 0) & 0xff, 0x4d0); 154 outb((level_bits >> 0) & 0xff, 0x4d0);
145 outb((level_bits >> 8) & 0xff, 0x4d1); 155 outb((level_bits >> 8) & 0xff, 0x4d1);
146} 156}
147 157
158static inline void
159sio_fixup_irq_levels(unsigned int level_bits)
160{
161 __sio_fixup_irq_levels(level_bits, true);
162}
163
148static inline int 164static inline int
149noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 165noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
150{ 166{
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
181 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; 197 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
182 int irq = COMMON_TABLE_LOOKUP, tmp; 198 int irq = COMMON_TABLE_LOOKUP, tmp;
183 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); 199 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
184 return irq >= 0 ? tmp : -1; 200
201 irq = irq >= 0 ? tmp : -1;
202
203 /* Fixup IRQ level if an actual IRQ mapping is detected */
204 if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
205 __sio_fixup_irq_levels(1 << irq, false);
206
207 return irq;
185} 208}
186 209
187static inline int 210static inline int
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 316a99aa9efe..1cfcfbbea6f0 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -18,7 +18,7 @@
18 * The algorithm for the leading and trailing quadwords remains the same, 18 * The algorithm for the leading and trailing quadwords remains the same,
19 * however the loop has been unrolled to enable better memory throughput, 19 * however the loop has been unrolled to enable better memory throughput,
20 * and the code has been replicated for each of the entry points: __memset 20 * and the code has been replicated for each of the entry points: __memset
21 * and __memsetw to permit better scheduling to eliminate the stalling 21 * and __memset16 to permit better scheduling to eliminate the stalling
22 * encountered during the mask replication. 22 * encountered during the mask replication.
23 * A future enhancement might be to put in a byte store loop for really 23 * A future enhancement might be to put in a byte store loop for really
24 * small (say < 32 bytes) memset()s. Whether or not that change would be 24 * small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -34,7 +34,7 @@
34 .globl memset 34 .globl memset
35 .globl __memset 35 .globl __memset
36 .globl ___memset 36 .globl ___memset
37 .globl __memsetw 37 .globl __memset16
38 .globl __constant_c_memset 38 .globl __constant_c_memset
39 39
40 .ent ___memset 40 .ent ___memset
@@ -415,9 +415,9 @@ end:
415 * to mask stalls. Note that entry point names also had to change 415 * to mask stalls. Note that entry point names also had to change
416 */ 416 */
417 .align 5 417 .align 5
418 .ent __memsetw 418 .ent __memset16
419 419
420__memsetw: 420__memset16:
421 .frame $30,0,$26,0 421 .frame $30,0,$26,0
422 .prologue 0 422 .prologue 0
423 423
@@ -596,8 +596,8 @@ end_w:
596 nop 596 nop
597 ret $31,($26),1 # L0 : 597 ret $31,($26),1 # L0 :
598 598
599 .end __memsetw 599 .end __memset16
600 EXPORT_SYMBOL(__memsetw) 600 EXPORT_SYMBOL(__memset16)
601 601
602memset = ___memset 602memset = ___memset
603__memset = ___memset 603__memset = ___memset
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 4e6e9f57e790..dc91c663bcc0 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 90MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <90000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 63954a8b0100..69ff4895f2ba 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 100MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <100000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8f627c200d60..006aa3de5348 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -114,6 +114,14 @@
114 reg = <0x00 0x10>, <0x14B8 0x4>; 114 reg = <0x00 0x10>, <0x14B8 0x4>;
115 #clock-cells = <0>; 115 #clock-cells = <0>;
116 clocks = <&input_clk>; 116 clocks = <&input_clk>;
117
118 /*
119 * Set initial core pll output frequency to 1GHz.
120 * It will be applied at the core pll driver probing
121 * on early boot.
122 */
123 assigned-clocks = <&core_clk>;
124 assigned-clock-rates = <1000000000>;
117 }; 125 };
118 126
119 serial: serial@5000 { 127 serial: serial@5000 {
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 7b8f8faf8a24..ac6b0ed8341e 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y
49CONFIG_SERIAL_OF_PLATFORM=y 49CONFIG_SERIAL_OF_PLATFORM=y
50# CONFIG_HW_RANDOM is not set 50# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
52CONFIG_DRM=y
53# CONFIG_DRM_FBDEV_EMULATION is not set
54CONFIG_DRM_UDL=y
52CONFIG_FB=y 55CONFIG_FB=y
53CONFIG_FB_UDL=y
54CONFIG_FRAMEBUFFER_CONSOLE=y 56CONFIG_FRAMEBUFFER_CONSOLE=y
55CONFIG_USB=y
56CONFIG_USB_EHCI_HCD=y 57CONFIG_USB_EHCI_HCD=y
57CONFIG_USB_EHCI_HCD_PLATFORM=y 58CONFIG_USB_EHCI_HCD_PLATFORM=y
58CONFIG_USB_OHCI_HCD=y 59CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index f35974ee7264..c9173c02081c 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
668 return 0; 668 return 0;
669 669
670 __asm__ __volatile__( 670 __asm__ __volatile__(
671 " mov lp_count, %5 \n"
671 " lp 3f \n" 672 " lp 3f \n"
672 "1: ldb.ab %3, [%2, 1] \n" 673 "1: ldb.ab %3, [%2, 1] \n"
673 " breq.d %3, 0, 3f \n" 674 " breq.d %3, 0, 3f \n"
@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
684 " .word 1b, 4b \n" 685 " .word 1b, 4b \n"
685 " .previous \n" 686 " .previous \n"
686 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) 687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
687 : "g"(-EFAULT), "l"(count) 688 : "g"(-EFAULT), "r"(count)
688 : "memory"); 689 : "lp_count", "lp_start", "lp_end", "memory");
689 690
690 return res; 691 return res;
691} 692}
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index fa6d0ff4ff89..170b5db64afe 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7ef7d9a8ff89..9d27331fe69a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
199 unsigned int exec_ctrl; 199 unsigned int exec_ctrl;
200 200
201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
202 cpu->extn.dual_enb = exec_ctrl & 1; 202 cpu->extn.dual_enb = !(exec_ctrl & 1);
203 203
204 /* dual issue always present for this core */ 204 /* dual issue always present for this core */
205 cpu->extn.dual = 1; 205 cpu->extn.dual = 1;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 74315f302971..bf40e06f3fb8 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
163 */ 163 */
164static int __print_sym(unsigned int address, void *unused) 164static int __print_sym(unsigned int address, void *unused)
165{ 165{
166 __print_symbol(" %s\n", address); 166 printk(" %pS\n", (void *)address);
167 return 0; 167 return 0;
168} 168}
169 169
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index bcd7c9fc5d0f..133a4dae41fe 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR) 83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) 84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) 85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
86DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
86 87
87/* 88/*
88 * Entry Point for Misaligned Data access Exception, for emulating in software 89 * Entry Point for Misaligned Data access Exception, for emulating in software
@@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
115 * Thus TRAP_S <n> can be used for specific purpose 116 * Thus TRAP_S <n> can be used for specific purpose
116 * -1 used for software breakpointing (gdb) 117 * -1 used for software breakpointing (gdb)
117 * -2 used by kprobes 118 * -2 used by kprobes
119 * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
120 * -fno-isolate-erroneous-paths-dereference
118 */ 121 */
119void do_non_swi_trap(unsigned long address, struct pt_regs *regs) 122void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
120{ 123{
@@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
134 kgdb_trap(regs); 137 kgdb_trap(regs);
135 break; 138 break;
136 139
140 case 5:
141 do_trap5_error(address, regs);
142 break;
137 default: 143 default:
138 break; 144 break;
139 } 145 }
@@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
155 161
156 insterror_is_error(address, regs); 162 insterror_is_error(address, regs);
157} 163}
164
165/*
166 * abort() call generated by older gcc for __builtin_trap()
167 */
168void abort(void)
169{
170 __asm__ __volatile__("trap_s 5\n");
171}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7d8c1d6c2f60..6e9a0a9a6a04 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
163 else 163 else
164 pr_cont("Bus Error, check PRM\n"); 164 pr_cont("Bus Error, check PRM\n");
165#endif 165#endif
166 } else if (vec == ECR_V_TRAP) {
167 if (regs->ecr_param == 5)
168 pr_cont("gcc generated __builtin_trap\n");
166 } else { 169 } else {
167 pr_cont("Check Programmer's Manual\n"); 170 pr_cont("Check Programmer's Manual\n");
168 } 171 }
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..46544e88492d 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack 317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
318 * of fudging the freq in DT 318 * of fudging the freq in DT
319 */ 319 */
320#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
321
320 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; 322 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
321 if (num_cores > 2) { 323 if (num_cores > 2) {
322 u32 freq = 50, orig; 324 u32 freq;
323 /*
324 * TODO: use cpu node "cpu-freq" param instead of platform-specific
325 * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
326 */
327 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); 325 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
328 const struct fdt_property *prop; 326 const struct fdt_property *prop;
329 327
330 prop = fdt_get_property(initial_boot_params, off, 328 prop = fdt_get_property(initial_boot_params, off,
331 "clock-frequency", NULL); 329 "assigned-clock-rates", NULL);
332 orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000; 330 freq = be32_to_cpu(*(u32 *)(prop->data));
333 331
334 /* Patching .dtb in-place with new core clock value */ 332 /* Patching .dtb in-place with new core clock value */
335 if (freq != orig ) { 333 if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
336 freq = cpu_to_be32(freq * 1000000); 334 freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
337 fdt_setprop_inplace(initial_boot_params, off, 335 fdt_setprop_inplace(initial_boot_params, off,
338 "clock-frequency", &freq, sizeof(freq)); 336 "assigned-clock-rates", &freq, sizeof(freq));
339 } 337 }
340 } 338 }
341#endif 339#endif
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index fd0ae5e38639..2958aedb649a 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
38#define CREG_PAE (CREG_BASE + 0x180) 38#define CREG_PAE (CREG_BASE + 0x180)
39#define CREG_PAE_UPDATE (CREG_BASE + 0x194) 39#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
40 40
41#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
42#define CREG_CORE_IF_CLK_DIV_2 0x1
43#define CGU_BASE ARC_PERIPHERAL_BASE
44#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
45#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
46#define CGU_PLL_STATUS_LOCK BIT(0)
47#define CGU_PLL_STATUS_ERR BIT(1)
48#define CGU_PLL_CTRL_1GHZ 0x3A10
49#define HSDK_PLL_LOCK_TIMEOUT 500
50
51#define HSDK_PLL_LOCKED() \
52 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
53
54#define HSDK_PLL_ERR() \
55 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
56
57static void __init hsdk_set_cpu_freq_1ghz(void)
58{
59 u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
60
61 /*
62 * As we set cpu clock which exceeds 500MHz, the divider for the interface
63 * clock must be programmed to div-by-2.
64 */
65 iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
66
67 /* Set cpu clock to 1GHz */
68 iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
69
70 while (!HSDK_PLL_LOCKED() && timeout--)
71 cpu_relax();
72
73 if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
74 pr_err("Failed to setup CPU frequency to 1GHz!");
75}
76
77#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) 41#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
78#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) 42#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
79#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) 43#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
@@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
98 * minimum possible div-by-2. 62 * minimum possible div-by-2.
99 */ 63 */
100 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); 64 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
101
102 /*
103 * Setup CPU frequency to 1GHz.
104 * TODO: remove it after smart hsdk pll driver will be introduced.
105 */
106 hsdk_set_cpu_freq_1ghz();
107} 65}
108 66
109static const char *hsdk_compat[] __initconst = { 67static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 1b81c4e75772..d37f95025807 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -630,6 +630,7 @@
630 reg-names = "phy"; 630 reg-names = "phy";
631 status = "disabled"; 631 status = "disabled";
632 ti,ctrl_mod = <&usb_ctrl_mod>; 632 ti,ctrl_mod = <&usb_ctrl_mod>;
633 #phy-cells = <0>;
633 }; 634 };
634 635
635 usb0: usb@47401000 { 636 usb0: usb@47401000 {
@@ -678,6 +679,7 @@
678 reg-names = "phy"; 679 reg-names = "phy";
679 status = "disabled"; 680 status = "disabled";
680 ti,ctrl_mod = <&usb_ctrl_mod>; 681 ti,ctrl_mod = <&usb_ctrl_mod>;
682 #phy-cells = <0>;
681 }; 683 };
682 684
683 usb1: usb@47401800 { 685 usb1: usb@47401800 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index e5b061469bf8..4714a59fd86d 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -927,7 +927,8 @@
927 reg = <0x48038000 0x2000>, 927 reg = <0x48038000 0x2000>,
928 <0x46000000 0x400000>; 928 <0x46000000 0x400000>;
929 reg-names = "mpu", "dat"; 929 reg-names = "mpu", "dat";
930 interrupts = <80>, <81>; 930 interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
931 <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
931 interrupt-names = "tx", "rx"; 932 interrupt-names = "tx", "rx";
932 status = "disabled"; 933 status = "disabled";
933 dmas = <&edma 8 2>, 934 dmas = <&edma 8 2>,
@@ -941,7 +942,8 @@
941 reg = <0x4803C000 0x2000>, 942 reg = <0x4803C000 0x2000>,
942 <0x46400000 0x400000>; 943 <0x46400000 0x400000>;
943 reg-names = "mpu", "dat"; 944 reg-names = "mpu", "dat";
944 interrupts = <82>, <83>; 945 interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
946 <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
945 interrupt-names = "tx", "rx"; 947 interrupt-names = "tx", "rx";
946 status = "disabled"; 948 status = "disabled";
947 dmas = <&edma 10 2>, 949 dmas = <&edma 10 2>,
diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts
index 9e92d480576b..3b9a94c274a7 100644
--- a/arch/arm/boot/dts/am437x-cm-t43.dts
+++ b/arch/arm/boot/dts/am437x-cm-t43.dts
@@ -301,8 +301,8 @@
301 status = "okay"; 301 status = "okay";
302 pinctrl-names = "default"; 302 pinctrl-names = "default";
303 pinctrl-0 = <&spi0_pins>; 303 pinctrl-0 = <&spi0_pins>;
304 dmas = <&edma 16 304 dmas = <&edma 16 0
305 &edma 17>; 305 &edma 17 0>;
306 dma-names = "tx0", "rx0"; 306 dma-names = "tx0", "rx0";
307 307
308 flash: w25q64cvzpig@0 { 308 flash: w25q64cvzpig@0 {
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 25d2d720dc0e..678aa023335d 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -236,6 +236,7 @@
236 usb3_phy: usb3_phy { 236 usb3_phy: usb3_phy {
237 compatible = "usb-nop-xceiv"; 237 compatible = "usb-nop-xceiv";
238 vcc-supply = <&reg_xhci0_vbus>; 238 vcc-supply = <&reg_xhci0_vbus>;
239 #phy-cells = <0>;
239 }; 240 };
240 241
241 reg_xhci0_vbus: xhci0-vbus { 242 reg_xhci0_vbus: xhci0-vbus {
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index e1f355ffc8f7..434dc9aaa5e4 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -66,6 +66,7 @@
66 usb3_1_phy: usb3_1-phy { 66 usb3_1_phy: usb3_1-phy {
67 compatible = "usb-nop-xceiv"; 67 compatible = "usb-nop-xceiv";
68 vcc-supply = <&usb3_1_vbus>; 68 vcc-supply = <&usb3_1_vbus>;
69 #phy-cells = <0>;
69 }; 70 };
70 71
71 usb3_1_vbus: usb3_1-vbus { 72 usb3_1_vbus: usb3_1-vbus {
diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts
index 36ad571e76f3..0a3552ebda3b 100644
--- a/arch/arm/boot/dts/armada-385-synology-ds116.dts
+++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts
@@ -191,11 +191,13 @@
191 usb3_0_phy: usb3_0_phy { 191 usb3_0_phy: usb3_0_phy {
192 compatible = "usb-nop-xceiv"; 192 compatible = "usb-nop-xceiv";
193 vcc-supply = <&reg_usb3_0_vbus>; 193 vcc-supply = <&reg_usb3_0_vbus>;
194 #phy-cells = <0>;
194 }; 195 };
195 196
196 usb3_1_phy: usb3_1_phy { 197 usb3_1_phy: usb3_1_phy {
197 compatible = "usb-nop-xceiv"; 198 compatible = "usb-nop-xceiv";
198 vcc-supply = <&reg_usb3_1_vbus>; 199 vcc-supply = <&reg_usb3_1_vbus>;
200 #phy-cells = <0>;
199 }; 201 };
200 202
201 reg_usb3_0_vbus: usb3-vbus0 { 203 reg_usb3_0_vbus: usb3-vbus0 {
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index f503955dbd3b..51b4ee6df130 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -276,11 +276,13 @@
276 usb2_1_phy: usb2_1_phy { 276 usb2_1_phy: usb2_1_phy {
277 compatible = "usb-nop-xceiv"; 277 compatible = "usb-nop-xceiv";
278 vcc-supply = <&reg_usb2_1_vbus>; 278 vcc-supply = <&reg_usb2_1_vbus>;
279 #phy-cells = <0>;
279 }; 280 };
280 281
281 usb3_phy: usb3_phy { 282 usb3_phy: usb3_phy {
282 compatible = "usb-nop-xceiv"; 283 compatible = "usb-nop-xceiv";
283 vcc-supply = <&reg_usb3_vbus>; 284 vcc-supply = <&reg_usb3_vbus>;
285 #phy-cells = <0>;
284 }; 286 };
285 287
286 reg_usb3_vbus: usb3-vbus { 288 reg_usb3_vbus: usb3-vbus {
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 45d815a86d42..de08d9045cb8 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -219,7 +219,7 @@
219 compatible = "aspeed,ast2400-vuart"; 219 compatible = "aspeed,ast2400-vuart";
220 reg = <0x1e787000 0x40>; 220 reg = <0x1e787000 0x40>;
221 reg-shift = <2>; 221 reg-shift = <2>;
222 interrupts = <10>; 222 interrupts = <8>;
223 clocks = <&clk_uart>; 223 clocks = <&clk_uart>;
224 no-loopback-test; 224 no-loopback-test;
225 status = "disabled"; 225 status = "disabled";
diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
index 5f29010cdbd8..9b82cc8843e1 100644
--- a/arch/arm/boot/dts/at91-tse850-3.dts
+++ b/arch/arm/boot/dts/at91-tse850-3.dts
@@ -221,6 +221,7 @@
221 jc42@18 { 221 jc42@18 {
222 compatible = "nxp,se97b", "jedec,jc-42.4-temp"; 222 compatible = "nxp,se97b", "jedec,jc-42.4-temp";
223 reg = <0x18>; 223 reg = <0x18>;
224 smbus-timeout-disable;
224 }; 225 };
225 226
226 dpot: mcp4651-104@28 { 227 dpot: mcp4651-104@28 {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 528b9e3bc1da..dcc55aa84583 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -85,7 +85,7 @@
85 timer@20200 { 85 timer@20200 {
86 compatible = "arm,cortex-a9-global-timer"; 86 compatible = "arm,cortex-a9-global-timer";
87 reg = <0x20200 0x100>; 87 reg = <0x20200 0x100>;
88 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 88 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
89 clocks = <&periph_clk>; 89 clocks = <&periph_clk>;
90 }; 90 };
91 91
@@ -93,7 +93,7 @@
93 compatible = "arm,cortex-a9-twd-timer"; 93 compatible = "arm,cortex-a9-twd-timer";
94 reg = <0x20600 0x20>; 94 reg = <0x20600 0x20>;
95 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | 95 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
96 IRQ_TYPE_LEVEL_HIGH)>; 96 IRQ_TYPE_EDGE_RISING)>;
97 clocks = <&periph_clk>; 97 clocks = <&periph_clk>;
98 }; 98 };
99 99
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 013431e3d7c3..dcde93c85c2d 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -639,5 +639,6 @@
639 639
640 usbphy: phy { 640 usbphy: phy {
641 compatible = "usb-nop-xceiv"; 641 compatible = "usb-nop-xceiv";
642 #phy-cells = <0>;
642 }; 643 };
643}; 644};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index 3bc50849d013..b8bde13de90a 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -141,10 +141,6 @@
141 status = "okay"; 141 status = "okay";
142}; 142};
143 143
144&sata {
145 status = "okay";
146};
147
148&qspi { 144&qspi {
149 bspi-sel = <0>; 145 bspi-sel = <0>;
150 flash: m25p80@0 { 146 flash: m25p80@0 {
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index d94d14b3c745..6a44b8021702 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -177,10 +177,6 @@
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
180&sata {
181 status = "okay";
182};
183
184&srab { 180&srab {
185 compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; 181 compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
186 status = "okay"; 182 status = "okay";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index eed89e659143..a1f4d6d5a569 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -293,12 +293,12 @@
293 label = "u-boot env"; 293 label = "u-boot env";
294 reg = <0 0x020000>; 294 reg = <0 0x020000>;
295 }; 295 };
296 partition@0x020000 { 296 partition@20000 {
297 /* The LCDK defaults to booting from this partition */ 297 /* The LCDK defaults to booting from this partition */
298 label = "u-boot"; 298 label = "u-boot";
299 reg = <0x020000 0x080000>; 299 reg = <0x020000 0x080000>;
300 }; 300 };
301 partition@0x0a0000 { 301 partition@a0000 {
302 label = "free space"; 302 label = "free space";
303 reg = <0x0a0000 0>; 303 reg = <0x0a0000 0>;
304 }; 304 };
diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
index 413dbd5d9f64..81942ae83e1f 100644
--- a/arch/arm/boot/dts/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/da850-lego-ev3.dts
@@ -178,7 +178,7 @@
178 */ 178 */
179 battery { 179 battery {
180 pinctrl-names = "default"; 180 pinctrl-names = "default";
181 pintctrl-0 = <&battery_pins>; 181 pinctrl-0 = <&battery_pins>;
182 compatible = "lego,ev3-battery"; 182 compatible = "lego,ev3-battery";
183 io-channels = <&adc 4>, <&adc 3>; 183 io-channels = <&adc 4>, <&adc 3>;
184 io-channel-names = "voltage", "current"; 184 io-channel-names = "voltage", "current";
@@ -392,7 +392,7 @@
392 batt_volt_en { 392 batt_volt_en {
393 gpio-hog; 393 gpio-hog;
394 gpios = <6 GPIO_ACTIVE_HIGH>; 394 gpios = <6 GPIO_ACTIVE_HIGH>;
395 output-low; 395 output-high;
396 }; 396 };
397}; 397};
398 398
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 9708157f5daf..681f5487406e 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -75,6 +75,7 @@
75 reg = <0x47401300 0x100>; 75 reg = <0x47401300 0x100>;
76 reg-names = "phy"; 76 reg-names = "phy";
77 ti,ctrl_mod = <&usb_ctrl_mod>; 77 ti,ctrl_mod = <&usb_ctrl_mod>;
78 #phy-cells = <0>;
78 }; 79 };
79 80
80 usb0: usb@47401000 { 81 usb0: usb@47401000 {
@@ -385,6 +386,7 @@
385 reg = <0x1b00 0x100>; 386 reg = <0x1b00 0x100>;
386 reg-names = "phy"; 387 reg-names = "phy";
387 ti,ctrl_mod = <&usb_ctrl_mod>; 388 ti,ctrl_mod = <&usb_ctrl_mod>;
389 #phy-cells = <0>;
388 }; 390 };
389 }; 391 };
390 392
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index b2b95ff205e8..0029ec27819c 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -664,6 +664,10 @@
664 status = "okay"; 664 status = "okay";
665}; 665};
666 666
667&mixer {
668 status = "okay";
669};
670
667/* eMMC flash */ 671/* eMMC flash */
668&mmc_0 { 672&mmc_0 {
669 status = "okay"; 673 status = "okay";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 589a67c5f796..84f17f7abb71 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -433,15 +433,6 @@
433 clock-names = "ipg", "per"; 433 clock-names = "ipg", "per";
434 }; 434 };
435 435
436 srtc: srtc@53fa4000 {
437 compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
438 reg = <0x53fa4000 0x4000>;
439 interrupts = <24>;
440 interrupt-parent = <&tzic>;
441 clocks = <&clks IMX5_CLK_SRTC_GATE>;
442 clock-names = "ipg";
443 };
444
445 iomuxc: iomuxc@53fa8000 { 436 iomuxc: iomuxc@53fa8000 {
446 compatible = "fsl,imx53-iomuxc"; 437 compatible = "fsl,imx53-iomuxc";
447 reg = <0x53fa8000 0x4000>; 438 reg = <0x53fa8000 0x4000>;
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
index cf2f5240e176..27cc913ca0f5 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
@@ -53,7 +53,8 @@
53 }; 53 };
54 54
55 pinctrl: pin-controller@10000 { 55 pinctrl: pin-controller@10000 {
56 pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; 56 pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
57 &pmx_gpio_header_gpo>;
57 pinctrl-names = "default"; 58 pinctrl-names = "default";
58 59
59 pmx_uart0: pmx-uart0 { 60 pmx_uart0: pmx-uart0 {
@@ -85,11 +86,16 @@
85 * ground. 86 * ground.
86 */ 87 */
87 pmx_gpio_header: pmx-gpio-header { 88 pmx_gpio_header: pmx-gpio-header {
88 marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", 89 marvell,pins = "mpp17", "mpp29", "mpp28",
89 "mpp35", "mpp34", "mpp40"; 90 "mpp35", "mpp34", "mpp40";
90 marvell,function = "gpio"; 91 marvell,function = "gpio";
91 }; 92 };
92 93
94 pmx_gpio_header_gpo: pxm-gpio-header-gpo {
95 marvell,pins = "mpp7";
96 marvell,function = "gpo";
97 };
98
93 pmx_gpio_init: pmx-init { 99 pmx_gpio_init: pmx-init {
94 marvell,pins = "mpp38"; 100 marvell,pins = "mpp38";
95 marvell,function = "gpio"; 101 marvell,function = "gpio";
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index 38faa90007d7..2fa5eb4bd402 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -72,7 +72,8 @@
72}; 72};
73 73
74&gpmc { 74&gpmc {
75 ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */ 75 ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */
76 1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */
76 77
77 ethernet@gpmc { 78 ethernet@gpmc {
78 pinctrl-names = "default"; 79 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 26cce4d18405..29cb804d10cc 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -33,11 +33,12 @@
33 hsusb2_phy: hsusb2_phy { 33 hsusb2_phy: hsusb2_phy {
34 compatible = "usb-nop-xceiv"; 34 compatible = "usb-nop-xceiv";
35 reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */ 35 reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */
36 #phy-cells = <0>;
36 }; 37 };
37}; 38};
38 39
39&gpmc { 40&gpmc {
40 ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ 41 ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
41 42
42 nand@0,0 { 43 nand@0,0 {
43 compatible = "ti,omap2-nand"; 44 compatible = "ti,omap2-nand";
@@ -121,7 +122,7 @@
121 122
122&mmc3 { 123&mmc3 {
123 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 124 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
124 pinctrl-0 = <&mmc3_pins>; 125 pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
125 pinctrl-names = "default"; 126 pinctrl-names = "default";
126 vmmc-supply = <&wl12xx_vmmc>; 127 vmmc-supply = <&wl12xx_vmmc>;
127 non-removable; 128 non-removable;
@@ -132,8 +133,8 @@
132 wlcore: wlcore@2 { 133 wlcore: wlcore@2 {
133 compatible = "ti,wl1273"; 134 compatible = "ti,wl1273";
134 reg = <2>; 135 reg = <2>;
135 interrupt-parent = <&gpio5>; 136 interrupt-parent = <&gpio1>;
136 interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */ 137 interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
137 ref-clock-frequency = <26000000>; 138 ref-clock-frequency = <26000000>;
138 }; 139 };
139}; 140};
@@ -157,8 +158,6 @@
157 OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */ 158 OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
158 OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */ 159 OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
159 OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */ 160 OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
160 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
161 OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
162 OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */ 161 OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
163 OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */ 162 OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
164 >; 163 >;
@@ -228,6 +227,12 @@
228 OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */ 227 OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */
229 >; 228 >;
230 }; 229 };
230 wl127x_gpio: pinmux_wl127x_gpio_pin {
231 pinctrl-single,pins = <
232 OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
233 OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
234 >;
235 };
231}; 236};
232 237
233&omap3_pmx_core2 { 238&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 940875316d0f..67b4de0e3439 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -215,7 +215,7 @@
215 reg = <0x2a>; 215 reg = <0x2a>;
216 VDDA-supply = <&reg_3p3v>; 216 VDDA-supply = <&reg_3p3v>;
217 VDDIO-supply = <&reg_3p3v>; 217 VDDIO-supply = <&reg_3p3v>;
218 clocks = <&sys_mclk 1>; 218 clocks = <&sys_mclk>;
219 }; 219 };
220 }; 220 };
221 }; 221 };
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index a8b148ad1dd2..44715c8ef756 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -187,7 +187,7 @@
187 reg = <0x0a>; 187 reg = <0x0a>;
188 VDDA-supply = <&reg_3p3v>; 188 VDDA-supply = <&reg_3p3v>;
189 VDDIO-supply = <&reg_3p3v>; 189 VDDIO-supply = <&reg_3p3v>;
190 clocks = <&sys_mclk 1>; 190 clocks = <&sys_mclk>;
191 }; 191 };
192}; 192};
193 193
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 4926133077b3..0d9faf1a51ea 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -85,15 +85,6 @@
85 reg = <0x7c00 0x200>; 85 reg = <0x7c00 0x200>;
86 }; 86 };
87 87
88 gpio_intc: interrupt-controller@9880 {
89 compatible = "amlogic,meson-gpio-intc";
90 reg = <0xc1109880 0x10>;
91 interrupt-controller;
92 #interrupt-cells = <2>;
93 amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
94 status = "disabled";
95 };
96
97 hwrng: rng@8100 { 88 hwrng: rng@8100 {
98 compatible = "amlogic,meson-rng"; 89 compatible = "amlogic,meson-rng";
99 reg = <0x8100 0x8>; 90 reg = <0x8100 0x8>;
@@ -191,6 +182,15 @@
191 status = "disabled"; 182 status = "disabled";
192 }; 183 };
193 184
185 gpio_intc: interrupt-controller@9880 {
186 compatible = "amlogic,meson-gpio-intc";
187 reg = <0x9880 0x10>;
188 interrupt-controller;
189 #interrupt-cells = <2>;
190 amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
191 status = "disabled";
192 };
193
194 wdt: watchdog@9900 { 194 wdt: watchdog@9900 {
195 compatible = "amlogic,meson6-wdt"; 195 compatible = "amlogic,meson6-wdt";
196 reg = <0x9900 0x8>; 196 reg = <0x9900 0x8>;
diff --git a/arch/arm/boot/dts/nspire.dtsi b/arch/arm/boot/dts/nspire.dtsi
index ec2283b1a638..1a5ae4cd107f 100644
--- a/arch/arm/boot/dts/nspire.dtsi
+++ b/arch/arm/boot/dts/nspire.dtsi
@@ -56,6 +56,7 @@
56 56
57 usb_phy: usb_phy { 57 usb_phy: usb_phy {
58 compatible = "usb-nop-xceiv"; 58 compatible = "usb-nop-xceiv";
59 #phy-cells = <0>;
59 }; 60 };
60 61
61 vbus_reg: vbus_reg { 62 vbus_reg: vbus_reg {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 683b96a8f73e..0349fcc9dc26 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -90,6 +90,7 @@
90 compatible = "usb-nop-xceiv"; 90 compatible = "usb-nop-xceiv";
91 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */ 91 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
92 vcc-supply = <&hsusb2_power>; 92 vcc-supply = <&hsusb2_power>;
93 #phy-cells = <0>;
93 }; 94 };
94 95
95 tfp410: encoder0 { 96 tfp410: encoder0 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 4d2eaf843fa9..3ca8991a6c3e 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -64,6 +64,7 @@
64 compatible = "usb-nop-xceiv"; 64 compatible = "usb-nop-xceiv";
65 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */ 65 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
66 vcc-supply = <&hsusb2_power>; 66 vcc-supply = <&hsusb2_power>;
67 #phy-cells = <0>;
67 }; 68 };
68 69
69 sound { 70 sound {
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index 31d5ebf38892..ab6003fe5a43 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -43,12 +43,14 @@
43 hsusb1_phy: hsusb1_phy { 43 hsusb1_phy: hsusb1_phy {
44 compatible = "usb-nop-xceiv"; 44 compatible = "usb-nop-xceiv";
45 vcc-supply = <&hsusb1_power>; 45 vcc-supply = <&hsusb1_power>;
46 #phy-cells = <0>;
46 }; 47 };
47 48
48 /* HS USB Host PHY on PORT 2 */ 49 /* HS USB Host PHY on PORT 2 */
49 hsusb2_phy: hsusb2_phy { 50 hsusb2_phy: hsusb2_phy {
50 compatible = "usb-nop-xceiv"; 51 compatible = "usb-nop-xceiv";
51 vcc-supply = <&hsusb2_power>; 52 vcc-supply = <&hsusb2_power>;
53 #phy-cells = <0>;
52 }; 54 };
53 55
54 ads7846reg: ads7846-reg { 56 ads7846reg: ads7846-reg {
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index dbc3f030a16c..ee64191e41ca 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -29,6 +29,7 @@
29 compatible = "usb-nop-xceiv"; 29 compatible = "usb-nop-xceiv";
30 reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */ 30 reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */
31 vcc-supply = <&hsusb2_power>; 31 vcc-supply = <&hsusb2_power>;
32 #phy-cells = <0>;
32 }; 33 };
33 34
34 leds { 35 leds {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index 4504908c23fe..3dc56fb156b7 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -120,6 +120,7 @@
120 hsusb2_phy: hsusb2_phy { 120 hsusb2_phy: hsusb2_phy {
121 compatible = "usb-nop-xceiv"; 121 compatible = "usb-nop-xceiv";
122 reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; 122 reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
123 #phy-cells = <0>;
123 }; 124 };
124 125
125 tv0: connector { 126 tv0: connector {
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index 667f96245729..ecbec23af49f 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -58,6 +58,7 @@
58 compatible = "usb-nop-xceiv"; 58 compatible = "usb-nop-xceiv";
59 reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */ 59 reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
60 vcc-supply = <&hsusb1_power>; 60 vcc-supply = <&hsusb1_power>;
61 #phy-cells = <0>;
61 }; 62 };
62 63
63 tfp410: encoder { 64 tfp410: encoder {
diff --git a/arch/arm/boot/dts/omap3-igep0030-common.dtsi b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
index e94d9427450c..443f71707437 100644
--- a/arch/arm/boot/dts/omap3-igep0030-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
@@ -37,6 +37,7 @@
37 hsusb2_phy: hsusb2_phy { 37 hsusb2_phy: hsusb2_phy {
38 compatible = "usb-nop-xceiv"; 38 compatible = "usb-nop-xceiv";
39 reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; /* gpio_54 */ 39 reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; /* gpio_54 */
40 #phy-cells = <0>;
40 }; 41 };
41}; 42};
42 43
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index 343a36d8031d..7ada1e93e166 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -51,6 +51,7 @@
51 hsusb1_phy: hsusb1_phy { 51 hsusb1_phy: hsusb1_phy {
52 compatible = "usb-nop-xceiv"; 52 compatible = "usb-nop-xceiv";
53 vcc-supply = <&reg_vcc3>; 53 vcc-supply = <&reg_vcc3>;
54 #phy-cells = <0>;
54 }; 55 };
55}; 56};
56 57
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index f25e158e7163..ac141fcd1742 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -51,6 +51,7 @@
51 compatible = "usb-nop-xceiv"; 51 compatible = "usb-nop-xceiv";
52 reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>; /* gpio_183 */ 52 reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>; /* gpio_183 */
53 vcc-supply = <&hsusb2_power>; 53 vcc-supply = <&hsusb2_power>;
54 #phy-cells = <0>;
54 }; 55 };
55 56
56 /* Regulator to trigger the nPoweron signal of the Wifi module */ 57 /* Regulator to trigger the nPoweron signal of the Wifi module */
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index 53e007abdc71..cd53dc6c0051 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -205,6 +205,7 @@
205 compatible = "usb-nop-xceiv"; 205 compatible = "usb-nop-xceiv";
206 reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */ 206 reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */
207 vcc-supply = <&vaux2>; 207 vcc-supply = <&vaux2>;
208 #phy-cells = <0>;
208 }; 209 };
209 210
210 /* HS USB Host VBUS supply 211 /* HS USB Host VBUS supply
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index 9a601d15247b..6f5bd027b717 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -46,6 +46,7 @@
46 compatible = "usb-nop-xceiv"; 46 compatible = "usb-nop-xceiv";
47 reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* gpio_162 */ 47 reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* gpio_162 */
48 vcc-supply = <&hsusb2_power>; 48 vcc-supply = <&hsusb2_power>;
49 #phy-cells = <0>;
49 }; 50 };
50 51
51 sound { 52 sound {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 90b5c7148feb..bb33935df7b0 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -715,6 +715,7 @@
715 compatible = "ti,ohci-omap3"; 715 compatible = "ti,ohci-omap3";
716 reg = <0x48064400 0x400>; 716 reg = <0x48064400 0x400>;
717 interrupts = <76>; 717 interrupts = <76>;
718 remote-wakeup-connected;
718 }; 719 };
719 720
720 usbhsehci: ehci@48064800 { 721 usbhsehci: ehci@48064800 {
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 8b93d37310f2..24a463f8641f 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -73,6 +73,7 @@
73 /* HS USB Host PHY on PORT 1 */ 73 /* HS USB Host PHY on PORT 1 */
74 hsusb1_phy: hsusb1_phy { 74 hsusb1_phy: hsusb1_phy {
75 compatible = "usb-nop-xceiv"; 75 compatible = "usb-nop-xceiv";
76 #phy-cells = <0>;
76 }; 77 };
77 78
78 /* LCD regulator from sw5 source */ 79 /* LCD regulator from sw5 source */
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index 6e6810c258eb..eb123b24c8e3 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -43,6 +43,7 @@
43 hsusb1_phy: hsusb1_phy { 43 hsusb1_phy: hsusb1_phy {
44 compatible = "usb-nop-xceiv"; 44 compatible = "usb-nop-xceiv";
45 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */ 45 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */
46 #phy-cells = <0>;
46 47
47 pinctrl-names = "default"; 48 pinctrl-names = "default";
48 pinctrl-0 = <&hsusb1phy_pins>; 49 pinctrl-0 = <&hsusb1phy_pins>;
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 22c1eee9b07a..5501d1b4e6cd 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -89,6 +89,7 @@
89 hsusb1_phy: hsusb1_phy { 89 hsusb1_phy: hsusb1_phy {
90 compatible = "usb-nop-xceiv"; 90 compatible = "usb-nop-xceiv";
91 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */ 91 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */
92 #phy-cells = <0>;
92 vcc-supply = <&hsusb1_power>; 93 vcc-supply = <&hsusb1_power>;
93 clocks = <&auxclk3_ck>; 94 clocks = <&auxclk3_ck>;
94 clock-names = "main_clk"; 95 clock-names = "main_clk";
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 6500bfc8d130..10fce28ceb5b 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -44,6 +44,7 @@
44 44
45 reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */ 45 reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */
46 vcc-supply = <&vbat>; 46 vcc-supply = <&vbat>;
47 #phy-cells = <0>;
47 48
48 clocks = <&auxclk3_ck>; 49 clocks = <&auxclk3_ck>;
49 clock-names = "main_clk"; 50 clock-names = "main_clk";
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 1dc5a76b3c71..cc1a07a3620f 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -398,7 +398,7 @@
398 elm: elm@48078000 { 398 elm: elm@48078000 {
399 compatible = "ti,am3352-elm"; 399 compatible = "ti,am3352-elm";
400 reg = <0x48078000 0x2000>; 400 reg = <0x48078000 0x2000>;
401 interrupts = <4>; 401 interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
402 ti,hwmods = "elm"; 402 ti,hwmods = "elm";
403 status = "disabled"; 403 status = "disabled";
404 }; 404 };
@@ -1081,14 +1081,13 @@
1081 usbhsohci: ohci@4a064800 { 1081 usbhsohci: ohci@4a064800 {
1082 compatible = "ti,ohci-omap3"; 1082 compatible = "ti,ohci-omap3";
1083 reg = <0x4a064800 0x400>; 1083 reg = <0x4a064800 0x400>;
1084 interrupt-parent = <&gic>;
1085 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; 1084 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
1085 remote-wakeup-connected;
1086 }; 1086 };
1087 1087
1088 usbhsehci: ehci@4a064c00 { 1088 usbhsehci: ehci@4a064c00 {
1089 compatible = "ti,ehci-omap"; 1089 compatible = "ti,ehci-omap";
1090 reg = <0x4a064c00 0x400>; 1090 reg = <0x4a064c00 0x400>;
1091 interrupt-parent = <&gic>;
1092 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; 1091 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
1093 }; 1092 };
1094 }; 1093 };
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 575ecffb0e9e..1b20838bb9a4 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -73,12 +73,14 @@
73 clocks = <&auxclk1_ck>; 73 clocks = <&auxclk1_ck>;
74 clock-names = "main_clk"; 74 clock-names = "main_clk";
75 clock-frequency = <19200000>; 75 clock-frequency = <19200000>;
76 #phy-cells = <0>;
76 }; 77 };
77 78
78 /* HS USB Host PHY on PORT 3 */ 79 /* HS USB Host PHY on PORT 3 */
79 hsusb3_phy: hsusb3_phy { 80 hsusb3_phy: hsusb3_phy {
80 compatible = "usb-nop-xceiv"; 81 compatible = "usb-nop-xceiv";
81 reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */ 82 reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
83 #phy-cells = <0>;
82 }; 84 };
83 85
84 tpd12s015: encoder { 86 tpd12s015: encoder {
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5b172a04b6f1..5e21fb430a65 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -63,12 +63,14 @@
63 hsusb2_phy: hsusb2_phy { 63 hsusb2_phy: hsusb2_phy {
64 compatible = "usb-nop-xceiv"; 64 compatible = "usb-nop-xceiv";
65 reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */ 65 reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */
66 #phy-cells = <0>;
66 }; 67 };
67 68
68 /* HS USB Host PHY on PORT 3 */ 69 /* HS USB Host PHY on PORT 3 */
69 hsusb3_phy: hsusb3_phy { 70 hsusb3_phy: hsusb3_phy {
70 compatible = "usb-nop-xceiv"; 71 compatible = "usb-nop-xceiv";
71 reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */ 72 reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */
73 #phy-cells = <0>;
72 }; 74 };
73 75
74 leds { 76 leds {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4cd0005e462f..51a7fb3d7b9a 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -940,6 +940,7 @@
940 compatible = "ti,ohci-omap3"; 940 compatible = "ti,ohci-omap3";
941 reg = <0x4a064800 0x400>; 941 reg = <0x4a064800 0x400>;
942 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; 942 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
943 remote-wakeup-connected;
943 }; 944 };
944 945
945 usbhsehci: ehci@4a064c00 { 946 usbhsehci: ehci@4a064c00 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 2f017fee4009..62baabd757b6 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1201,6 +1201,7 @@
1201 clock-names = "extal", "usb_extal"; 1201 clock-names = "extal", "usb_extal";
1202 #clock-cells = <2>; 1202 #clock-cells = <2>;
1203 #power-domain-cells = <0>; 1203 #power-domain-cells = <0>;
1204 #reset-cells = <1>;
1204 }; 1205 };
1205 1206
1206 prr: chipid@ff000044 { 1207 prr: chipid@ff000044 {
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 131f65b0426e..3d080e07374c 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -829,6 +829,7 @@
829 clock-names = "extal"; 829 clock-names = "extal";
830 #clock-cells = <2>; 830 #clock-cells = <2>;
831 #power-domain-cells = <0>; 831 #power-domain-cells = <0>;
832 #reset-cells = <1>;
832 }; 833 };
833 }; 834 };
834 835
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 58eae569b4e0..0cd1035de1a4 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -1088,6 +1088,7 @@
1088 clock-names = "extal", "usb_extal"; 1088 clock-names = "extal", "usb_extal";
1089 #clock-cells = <2>; 1089 #clock-cells = <2>;
1090 #power-domain-cells = <0>; 1090 #power-domain-cells = <0>;
1091 #reset-cells = <1>;
1091 }; 1092 };
1092 1093
1093 rst: reset-controller@e6160000 { 1094 rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 905e50c9b524..5643976c1356 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -1099,6 +1099,7 @@
1099 clock-names = "extal", "usb_extal"; 1099 clock-names = "extal", "usb_extal";
1100 #clock-cells = <2>; 1100 #clock-cells = <2>;
1101 #power-domain-cells = <0>; 1101 #power-domain-cells = <0>;
1102 #reset-cells = <1>;
1102 }; 1103 };
1103 1104
1104 rst: reset-controller@e6160000 { 1105 rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index c6d92c25df42..d23ee6d911ac 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -83,6 +83,10 @@
83 }; 83 };
84}; 84};
85 85
86&cpu0 {
87 cpu0-supply = <&vdd_arm>;
88};
89
86&i2c1 { 90&i2c1 {
87 status = "okay"; 91 status = "okay";
88 clock-frequency = <400000>; 92 clock-frequency = <400000>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index cd24894ee5c6..6102e4e7f35c 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -956,7 +956,7 @@
956 iep_mmu: iommu@ff900800 { 956 iep_mmu: iommu@ff900800 {
957 compatible = "rockchip,iommu"; 957 compatible = "rockchip,iommu";
958 reg = <0x0 0xff900800 0x0 0x40>; 958 reg = <0x0 0xff900800 0x0 0x40>;
959 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; 959 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
960 interrupt-names = "iep_mmu"; 960 interrupt-names = "iep_mmu";
961 #iommu-cells = <0>; 961 #iommu-cells = <0>;
962 status = "disabled"; 962 status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index b91300d49a31..4f2f2eea0755 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -502,8 +502,8 @@
502 reg = <0x01c16000 0x1000>; 502 reg = <0x01c16000 0x1000>;
503 interrupts = <58>; 503 interrupts = <58>;
504 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, 504 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
505 <&ccu 9>, 505 <&ccu CLK_PLL_VIDEO0_2X>,
506 <&ccu 18>; 506 <&ccu CLK_PLL_VIDEO1_2X>;
507 clock-names = "ahb", "mod", "pll-0", "pll-1"; 507 clock-names = "ahb", "mod", "pll-0", "pll-1";
508 dmas = <&dma SUN4I_DMA_NORMAL 16>, 508 dmas = <&dma SUN4I_DMA_NORMAL 16>,
509 <&dma SUN4I_DMA_NORMAL 16>, 509 <&dma SUN4I_DMA_NORMAL 16>,
@@ -1104,7 +1104,7 @@
1104 1104
1105 be1_out_tcon0: endpoint@0 { 1105 be1_out_tcon0: endpoint@0 {
1106 reg = <0>; 1106 reg = <0>;
1107 remote-endpoint = <&tcon1_in_be0>; 1107 remote-endpoint = <&tcon0_in_be1>;
1108 }; 1108 };
1109 1109
1110 be1_out_tcon1: endpoint@1 { 1110 be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 6ae4d95e230e..316cb8b2945b 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -82,8 +82,8 @@
82 reg = <0x01c16000 0x1000>; 82 reg = <0x01c16000 0x1000>;
83 interrupts = <58>; 83 interrupts = <58>;
84 clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>, 84 clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
85 <&ccu 9>, 85 <&ccu CLK_PLL_VIDEO0_2X>,
86 <&ccu 16>; 86 <&ccu CLK_PLL_VIDEO1_2X>;
87 clock-names = "ahb", "mod", "pll-0", "pll-1"; 87 clock-names = "ahb", "mod", "pll-0", "pll-1";
88 dmas = <&dma SUN4I_DMA_NORMAL 16>, 88 dmas = <&dma SUN4I_DMA_NORMAL 16>,
89 <&dma SUN4I_DMA_NORMAL 16>, 89 <&dma SUN4I_DMA_NORMAL 16>,
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 8bfa12b548e0..72d3fe44ecaf 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -429,8 +429,8 @@
429 interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>; 429 interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
430 clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>, 430 clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>,
431 <&ccu CLK_HDMI_DDC>, 431 <&ccu CLK_HDMI_DDC>,
432 <&ccu 7>, 432 <&ccu CLK_PLL_VIDEO0_2X>,
433 <&ccu 13>; 433 <&ccu CLK_PLL_VIDEO1_2X>;
434 clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1"; 434 clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1";
435 resets = <&ccu RST_AHB1_HDMI>; 435 resets = <&ccu RST_AHB1_HDMI>;
436 reset-names = "ahb"; 436 reset-names = "ahb";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 68dfa82544fc..bd0cd3204273 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -581,8 +581,8 @@
581 reg = <0x01c16000 0x1000>; 581 reg = <0x01c16000 0x1000>;
582 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>; 582 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
583 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, 583 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
584 <&ccu 9>, 584 <&ccu CLK_PLL_VIDEO0_2X>,
585 <&ccu 18>; 585 <&ccu CLK_PLL_VIDEO1_2X>;
586 clock-names = "ahb", "mod", "pll-0", "pll-1"; 586 clock-names = "ahb", "mod", "pll-0", "pll-1";
587 dmas = <&dma SUN4I_DMA_NORMAL 16>, 587 dmas = <&dma SUN4I_DMA_NORMAL 16>,
588 <&dma SUN4I_DMA_NORMAL 16>, 588 <&dma SUN4I_DMA_NORMAL 16>,
@@ -1354,7 +1354,7 @@
1354 1354
1355 be1_out_tcon0: endpoint@0 { 1355 be1_out_tcon0: endpoint@0 {
1356 reg = <0>; 1356 reg = <0>;
1357 remote-endpoint = <&tcon1_in_be0>; 1357 remote-endpoint = <&tcon0_in_be1>;
1358 }; 1358 };
1359 1359
1360 be1_out_tcon1: endpoint@1 { 1360 be1_out_tcon1: endpoint@1 {
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 98715538932f..a021ee6da396 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -146,6 +146,7 @@
146 status = "okay"; 146 status = "okay";
147 147
148 axp81x: pmic@3a3 { 148 axp81x: pmic@3a3 {
149 compatible = "x-powers,axp813";
149 reg = <0x3a3>; 150 reg = <0x3a3>;
150 interrupt-parent = <&r_intc>; 151 interrupt-parent = <&r_intc>;
151 interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 152 interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi
index 0ec1b0a317b4..ff72a8efb73d 100644
--- a/arch/arm/boot/dts/tango4-common.dtsi
+++ b/arch/arm/boot/dts/tango4-common.dtsi
@@ -156,7 +156,6 @@
156 reg = <0x6e000 0x400>; 156 reg = <0x6e000 0x400>;
157 ranges = <0 0x6e000 0x400>; 157 ranges = <0 0x6e000 0x400>;
158 interrupt-parent = <&gic>; 158 interrupt-parent = <&gic>;
159 interrupt-controller;
160 #address-cells = <1>; 159 #address-cells = <1>;
161 #size-cells = <1>; 160 #size-cells = <1>;
162 161
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
index 02a6227c717c..4b8edc8982cf 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
@@ -121,7 +121,7 @@
121 switch0port10: port@10 { 121 switch0port10: port@10 {
122 reg = <10>; 122 reg = <10>;
123 label = "dsa"; 123 label = "dsa";
124 phy-mode = "xgmii"; 124 phy-mode = "xaui";
125 link = <&switch1port10>; 125 link = <&switch1port10>;
126 }; 126 };
127 }; 127 };
@@ -208,7 +208,7 @@
208 switch1port10: port@10 { 208 switch1port10: port@10 {
209 reg = <10>; 209 reg = <10>;
210 label = "dsa"; 210 label = "dsa";
211 phy-mode = "xgmii"; 211 phy-mode = "xaui";
212 link = <&switch0port10>; 212 link = <&switch0port10>;
213 }; 213 };
214 }; 214 };
@@ -359,7 +359,7 @@
359}; 359};
360 360
361&i2c1 { 361&i2c1 {
362 at24mac602@0 { 362 at24mac602@50 {
363 compatible = "atmel,24c02"; 363 compatible = "atmel,24c02";
364 reg = <0x50>; 364 reg = <0x50>;
365 read-only; 365 read-only;
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 5caaf971fb50..df433abfcb02 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -10,6 +10,7 @@ CONFIG_SMP=y
10CONFIG_NR_CPUS=8 10CONFIG_NR_CPUS=8
11CONFIG_AEABI=y 11CONFIG_AEABI=y
12CONFIG_HIGHMEM=y 12CONFIG_HIGHMEM=y
13CONFIG_CMA=y
13CONFIG_ARM_APPENDED_DTB=y 14CONFIG_ARM_APPENDED_DTB=y
14CONFIG_ARM_ATAG_DTB_COMPAT=y 15CONFIG_ARM_ATAG_DTB_COMPAT=y
15CONFIG_CPU_FREQ=y 16CONFIG_CPU_FREQ=y
@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
33# CONFIG_WIRELESS is not set 34# CONFIG_WIRELESS is not set
34CONFIG_DEVTMPFS=y 35CONFIG_DEVTMPFS=y
35CONFIG_DEVTMPFS_MOUNT=y 36CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_DMA_CMA=y
36CONFIG_BLK_DEV_SD=y 38CONFIG_BLK_DEV_SD=y
37CONFIG_ATA=y 39CONFIG_ATA=y
38CONFIG_AHCI_SUNXI=y 40CONFIG_AHCI_SUNXI=y
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index c8781450905b..3ab8b3781bfe 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
161#else 161#else
162#define VTTBR_X (5 - KVM_T0SZ) 162#define VTTBR_X (5 - KVM_T0SZ)
163#endif 163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 164#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
165#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT _AC(48, ULL) 165#define VTTBR_VMID_SHIFT _AC(48, ULL)
167#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 166#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
168 167
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 242151ea6908..a9f7d3f47134 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} 285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} 286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} 287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
288static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
289 struct kvm_run *run)
290{
291 return false;
292}
288 293
289int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 294int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
290 struct kvm_device_attr *attr); 295 struct kvm_device_attr *attr);
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 4d53de308ee0..4d1cc1847edf 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ generated-y += unistd-oabi.h
7generated-y += unistd-eabi.h 7generated-y += unistd-eabi.h
8 8
9generic-y += bitsperlong.h 9generic-y += bitsperlong.h
10generic-y += bpf_perf_event.h
10generic-y += errno.h 11generic-y += errno.h
11generic-y += ioctl.h 12generic-y += ioctl.h
12generic-y += ipcbuf.h 13generic-y += ipcbuf.h
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5cf04888c581..3e26c6f7a191 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -793,7 +793,6 @@ void abort(void)
793 /* if that doesn't kill us, halt */ 793 /* if that doesn't kill us, halt */
794 panic("Oops failed to kill thread"); 794 panic("Oops failed to kill thread");
795} 795}
796EXPORT_SYMBOL(abort);
797 796
798void __init trap_init(void) 797void __init trap_init(void)
799{ 798{
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f132b80d..b83fdc06286a 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -85,7 +85,11 @@
85 .pushsection .text.fixup,"ax" 85 .pushsection .text.fixup,"ax"
86 .align 4 86 .align 4
879001: mov r4, #-EFAULT 879001: mov r4, #-EFAULT
88#ifdef CONFIG_CPU_SW_DOMAIN_PAN
89 ldr r5, [sp, #9*4] @ *err_ptr
90#else
88 ldr r5, [sp, #8*4] @ *err_ptr 91 ldr r5, [sp, #8*4] @ *err_ptr
92#endif
89 str r4, [r5] 93 str r4, [r5]
90 ldmia sp, {r1, r2} @ retrieve dst, len 94 ldmia sp, {r1, r2} @ retrieve dst, len
91 add r2, r2, r1 95 add r2, r2, r1
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8be04ec95adf..5ace9380626a 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = {
868 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) }, 868 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) },
869 { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) }, 869 { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) },
870 { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) }, 870 { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) },
871 { "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) }, 871 { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
872 { "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) }, 872 { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
873 { "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) }, 873 { "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
874 { "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) }, 874 { "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
875}; 875};
876 876
877static struct edma_soc_info dm365_edma_pdata = { 877static struct edma_soc_info dm365_edma_pdata = {
@@ -925,12 +925,14 @@ static struct resource edma_resources[] = {
925 /* not using TC*_ERR */ 925 /* not using TC*_ERR */
926}; 926};
927 927
928static struct platform_device dm365_edma_device = { 928static const struct platform_device_info dm365_edma_device __initconst = {
929 .name = "edma", 929 .name = "edma",
930 .id = 0, 930 .id = 0,
931 .dev.platform_data = &dm365_edma_pdata, 931 .dma_mask = DMA_BIT_MASK(32),
932 .num_resources = ARRAY_SIZE(edma_resources), 932 .res = edma_resources,
933 .resource = edma_resources, 933 .num_res = ARRAY_SIZE(edma_resources),
934 .data = &dm365_edma_pdata,
935 .size_data = sizeof(dm365_edma_pdata),
934}; 936};
935 937
936static struct resource dm365_asp_resources[] = { 938static struct resource dm365_asp_resources[] = {
@@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
1428 1430
1429static int __init dm365_init_devices(void) 1431static int __init dm365_init_devices(void)
1430{ 1432{
1433 struct platform_device *edma_pdev;
1431 int ret = 0; 1434 int ret = 0;
1432 1435
1433 if (!cpu_is_davinci_dm365()) 1436 if (!cpu_is_davinci_dm365())
1434 return 0; 1437 return 0;
1435 1438
1436 davinci_cfg_reg(DM365_INT_EDMA_CC); 1439 davinci_cfg_reg(DM365_INT_EDMA_CC);
1437 platform_device_register(&dm365_edma_device); 1440 edma_pdev = platform_device_register_full(&dm365_edma_device);
1441 if (IS_ERR(edma_pdev)) {
1442 pr_warn("%s: Failed to register eDMA\n", __func__);
1443 return PTR_ERR(edma_pdev);
1444 }
1438 1445
1439 platform_device_register(&dm365_mdio_device); 1446 platform_device_register(&dm365_mdio_device);
1440 platform_device_register(&dm365_emac_device); 1447 platform_device_register(&dm365_emac_device);
diff --git a/arch/arm/mach-meson/platsmp.c b/arch/arm/mach-meson/platsmp.c
index 2555f9056a33..cad7ee8f0d6b 100644
--- a/arch/arm/mach-meson/platsmp.c
+++ b/arch/arm/mach-meson/platsmp.c
@@ -102,7 +102,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
102 102
103 scu_base = of_iomap(node, 0); 103 scu_base = of_iomap(node, 0);
104 if (!scu_base) { 104 if (!scu_base) {
105 pr_err("Couln't map SCU registers\n"); 105 pr_err("Couldn't map SCU registers\n");
106 return; 106 return;
107 } 107 }
108 108
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index d555791cf349..83c6fa74cc31 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -68,14 +68,17 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
68int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, 68int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
69 u8 *idlest_reg_id) 69 u8 *idlest_reg_id)
70{ 70{
71 int ret;
71 if (!cm_ll_data->split_idlest_reg) { 72 if (!cm_ll_data->split_idlest_reg) {
72 WARN_ONCE(1, "cm: %s: no low-level function defined\n", 73 WARN_ONCE(1, "cm: %s: no low-level function defined\n",
73 __func__); 74 __func__);
74 return -EINVAL; 75 return -EINVAL;
75 } 76 }
76 77
77 return cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst, 78 ret = cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
78 idlest_reg_id); 79 idlest_reg_id);
80 *prcm_inst -= cm_base.offset;
81 return ret;
79} 82}
80 83
81/** 84/**
@@ -337,6 +340,7 @@ int __init omap2_cm_base_init(void)
337 if (mem) { 340 if (mem) {
338 mem->pa = res.start + data->offset; 341 mem->pa = res.start + data->offset;
339 mem->va = data->mem + data->offset; 342 mem->va = data->mem + data->offset;
343 mem->offset = data->offset;
340 } 344 }
341 345
342 data->np = np; 346 data->np = np;
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 5ac122e88f67..fa7f308c9027 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void)
73 return omap_secure_memblock_base; 73 return omap_secure_memblock_base;
74} 74}
75 75
76#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
77u32 omap3_save_secure_ram(void __iomem *addr, int size)
78{
79 u32 ret;
80 u32 param[5];
81
82 if (size != OMAP3_SAVE_SECURE_RAM_SZ)
83 return OMAP3_SAVE_SECURE_RAM_SZ;
84
85 param[0] = 4; /* Number of arguments */
86 param[1] = __pa(addr); /* Physical address for saving */
87 param[2] = 0;
88 param[3] = 1;
89 param[4] = 1;
90
91 ret = save_secure_ram_context(__pa(param));
92
93 return ret;
94}
95#endif
96
76/** 97/**
77 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls 98 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
78 * @idx: The PPA API index 99 * @idx: The PPA API index
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index bae263fba640..c509cde71f93 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -31,6 +31,8 @@
31/* Maximum Secure memory storage size */ 31/* Maximum Secure memory storage size */
32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) 32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
33 33
34#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F
35
34/* Secure low power HAL API index */ 36/* Secure low power HAL API index */
35#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a 37#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
36#define OMAP4_HAL_SAVEHW_INDEX 0x1b 38#define OMAP4_HAL_SAVEHW_INDEX 0x1b
@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
65extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs); 67extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
66extern phys_addr_t omap_secure_ram_mempool_base(void); 68extern phys_addr_t omap_secure_ram_mempool_base(void);
67extern int omap_secure_ram_reserve_memblock(void); 69extern int omap_secure_ram_reserve_memblock(void);
70extern u32 save_secure_ram_context(u32 args_pa);
71extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
68 72
69extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, 73extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
70 u32 arg1, u32 arg2, u32 arg3, u32 arg4); 74 u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index d45cbfdb4be6..f0388058b7da 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -391,10 +391,8 @@ omap_device_copy_resources(struct omap_hwmod *oh,
391 const char *name; 391 const char *name;
392 int error, irq = 0; 392 int error, irq = 0;
393 393
394 if (!oh || !oh->od || !oh->od->pdev) { 394 if (!oh || !oh->od || !oh->od->pdev)
395 error = -EINVAL; 395 return -EINVAL;
396 goto error;
397 }
398 396
399 np = oh->od->pdev->dev.of_node; 397 np = oh->od->pdev->dev.of_node;
400 if (!np) { 398 if (!np) {
@@ -516,8 +514,10 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
516 goto odbs_exit1; 514 goto odbs_exit1;
517 515
518 od = omap_device_alloc(pdev, &oh, 1); 516 od = omap_device_alloc(pdev, &oh, 1);
519 if (IS_ERR(od)) 517 if (IS_ERR(od)) {
518 ret = PTR_ERR(od);
520 goto odbs_exit1; 519 goto odbs_exit1;
520 }
521 521
522 ret = platform_device_add_data(pdev, pdata, pdata_len); 522 ret = platform_device_add_data(pdev, pdata, pdata_len);
523 if (ret) 523 if (ret)
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d2106ae4410a..52c9d585b44d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1646,6 +1646,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
1646 .main_clk = "mmchs3_fck", 1646 .main_clk = "mmchs3_fck",
1647 .prcm = { 1647 .prcm = {
1648 .omap2 = { 1648 .omap2 = {
1649 .module_offs = CORE_MOD,
1649 .prcm_reg_id = 1, 1650 .prcm_reg_id = 1,
1650 .module_bit = OMAP3430_EN_MMC3_SHIFT, 1651 .module_bit = OMAP3430_EN_MMC3_SHIFT,
1651 .idlest_reg_id = 1, 1652 .idlest_reg_id = 1,
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index b668719b9b25..8e30772cfe32 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
81/* ... and its pointer from SRAM after copy */ 81/* ... and its pointer from SRAM after copy */
82extern void (*omap3_do_wfi_sram)(void); 82extern void (*omap3_do_wfi_sram)(void);
83 83
84/* save_secure_ram_context function pointer and size, for copy to SRAM */
85extern int save_secure_ram_context(u32 *addr);
86extern unsigned int save_secure_ram_context_sz;
87
88extern void omap3_save_scratchpad_contents(void); 84extern void omap3_save_scratchpad_contents(void);
89 85
90#define PM_RTA_ERRATUM_i608 (1 << 0) 86#define PM_RTA_ERRATUM_i608 (1 << 0)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 841ba19d64a6..36c55547137c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -48,6 +48,7 @@
48#include "prm3xxx.h" 48#include "prm3xxx.h"
49#include "pm.h" 49#include "pm.h"
50#include "sdrc.h" 50#include "sdrc.h"
51#include "omap-secure.h"
51#include "sram.h" 52#include "sram.h"
52#include "control.h" 53#include "control.h"
53#include "vc.h" 54#include "vc.h"
@@ -66,7 +67,6 @@ struct power_state {
66 67
67static LIST_HEAD(pwrst_list); 68static LIST_HEAD(pwrst_list);
68 69
69static int (*_omap_save_secure_sram)(u32 *addr);
70void (*omap3_do_wfi_sram)(void); 70void (*omap3_do_wfi_sram)(void);
71 71
72static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 72static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
121 * will hang the system. 121 * will hang the system.
122 */ 122 */
123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
124 ret = _omap_save_secure_sram((u32 *)(unsigned long) 124 ret = omap3_save_secure_ram(omap3_secure_ram_storage,
125 __pa(omap3_secure_ram_storage)); 125 OMAP3_SAVE_SECURE_RAM_SZ);
126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
127 /* Following is for error tracking, it should not happen */ 127 /* Following is for error tracking, it should not happen */
128 if (ret) { 128 if (ret) {
@@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
434 * 434 *
435 * The minimum set of functions is pushed to SRAM for execution: 435 * The minimum set of functions is pushed to SRAM for execution:
436 * - omap3_do_wfi for erratum i581 WA, 436 * - omap3_do_wfi for erratum i581 WA,
437 * - save_secure_ram_context for security extensions.
438 */ 437 */
439void omap_push_sram_idle(void) 438void omap_push_sram_idle(void)
440{ 439{
441 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); 440 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
442
443 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
444 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
445 save_secure_ram_context_sz);
446} 441}
447 442
448static void __init pm_errata_configure(void) 443static void __init pm_errata_configure(void)
@@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
553 clkdm_add_wkdep(neon_clkdm, mpu_clkdm); 548 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
554 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 549 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
555 omap3_secure_ram_storage = 550 omap3_secure_ram_storage =
556 kmalloc(0x803F, GFP_KERNEL); 551 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
557 if (!omap3_secure_ram_storage) 552 if (!omap3_secure_ram_storage)
558 pr_err("Memory allocation failed when allocating for secure sram context\n"); 553 pr_err("Memory allocation failed when allocating for secure sram context\n");
559 554
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 0592b23902c6..0977da0dab76 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -528,6 +528,7 @@ struct omap_prcm_irq_setup {
528struct omap_domain_base { 528struct omap_domain_base {
529 u32 pa; 529 u32 pa;
530 void __iomem *va; 530 void __iomem *va;
531 s16 offset;
531}; 532};
532 533
533/** 534/**
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
index d2c5bcabdbeb..ebaf80d72a10 100644
--- a/arch/arm/mach-omap2/prm33xx.c
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
176 return v; 176 return v;
177} 177}
178 178
179static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
180{
181 u32 v;
182
183 v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
184 v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
185 v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
186
187 return v;
188}
189
190static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) 179static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
191{ 180{
192 am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK, 181 am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
357 .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst, 346 .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst,
358 .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst, 347 .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst,
359 .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst, 348 .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst,
360 .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst,
361 .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst, 349 .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst,
362 .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst, 350 .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst,
363 .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst, 351 .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst,
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index fa5fd24f524c..22daf4efed68 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
93ENDPROC(enable_omap3630_toggle_l2_on_restore) 93ENDPROC(enable_omap3630_toggle_l2_on_restore)
94 94
95/* 95/*
96 * Function to call rom code to save secure ram context. This gets 96 * Function to call rom code to save secure ram context.
97 * relocated to SRAM, so it can be all in .data section. Otherwise 97 *
98 * we need to initialize api_params separately. 98 * r0 = physical address of the parameters
99 */ 99 */
100 .data
101 .align 3
102ENTRY(save_secure_ram_context) 100ENTRY(save_secure_ram_context)
103 stmfd sp!, {r4 - r11, lr} @ save registers on stack 101 stmfd sp!, {r4 - r11, lr} @ save registers on stack
104 adr r3, api_params @ r3 points to parameters 102 mov r3, r0 @ physical address of parameters
105 str r0, [r3,#0x4] @ r0 has sdram address
106 ldr r12, high_mask
107 and r3, r3, r12
108 ldr r12, sram_phy_addr_mask
109 orr r3, r3, r12
110 mov r0, #25 @ set service ID for PPA 103 mov r0, #25 @ set service ID for PPA
111 mov r12, r0 @ copy secure service ID in r12 104 mov r12, r0 @ copy secure service ID in r12
112 mov r1, #0 @ set task id for ROM code in r1 105 mov r1, #0 @ set task id for ROM code in r1
@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
120 nop 113 nop
121 nop 114 nop
122 ldmfd sp!, {r4 - r11, pc} 115 ldmfd sp!, {r4 - r11, pc}
123 .align
124sram_phy_addr_mask:
125 .word SRAM_BASE_P
126high_mask:
127 .word 0xffff
128api_params:
129 .word 0x4, 0x0, 0x0, 0x1, 0x1
130ENDPROC(save_secure_ram_context) 116ENDPROC(save_secure_ram_context)
131ENTRY(save_secure_ram_context_sz)
132 .word . - save_secure_ram_context
133
134 .text
135 117
136/* 118/*
137 * ====================== 119 * ======================
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c199990e12b6..323a4df59a6c 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -27,14 +27,58 @@
27 27
28int bpf_jit_enable __read_mostly; 28int bpf_jit_enable __read_mostly;
29 29
30/*
31 * eBPF prog stack layout:
32 *
33 * high
34 * original ARM_SP => +-----+
35 * | | callee saved registers
36 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
37 * | ... | eBPF JIT scratch space
38 * eBPF fp register => +-----+
39 * (BPF_FP) | ... | eBPF prog stack
40 * +-----+
41 * |RSVD | JIT scratchpad
42 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
43 * | |
44 * | ... | Function call stack
45 * | |
46 * +-----+
47 * low
48 *
49 * The callee saved registers depends on whether frame pointers are enabled.
50 * With frame pointers (to be compliant with the ABI):
51 *
52 * high
53 * original ARM_SP => +------------------+ \
54 * | pc | |
55 * current ARM_FP => +------------------+ } callee saved registers
56 * |r4-r8,r10,fp,ip,lr| |
57 * +------------------+ /
58 * low
59 *
60 * Without frame pointers:
61 *
62 * high
63 * original ARM_SP => +------------------+
64 * | r4-r8,r10,fp,lr | callee saved registers
65 * current ARM_FP => +------------------+
66 * low
67 *
68 * When popping registers off the stack at the end of a BPF function, we
69 * reference them via the current ARM_FP register.
70 */
71#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
72 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
73 1 << ARM_FP)
74#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
75#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
76
30#define STACK_OFFSET(k) (k) 77#define STACK_OFFSET(k) (k)
31#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ 78#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
32#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ 79#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
33#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ 80#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
34 81
35/* Flags used for JIT optimization */
36#define SEEN_CALL (1 << 0)
37
38#define FLAG_IMM_OVERFLOW (1 << 0) 82#define FLAG_IMM_OVERFLOW (1 << 0)
39 83
40/* 84/*
@@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
95 * idx : index of current last JITed instruction. 139 * idx : index of current last JITed instruction.
96 * prologue_bytes : bytes used in prologue. 140 * prologue_bytes : bytes used in prologue.
97 * epilogue_offset : offset of epilogue starting. 141 * epilogue_offset : offset of epilogue starting.
98 * seen : bit mask used for JIT optimization.
99 * offsets : array of eBPF instruction offsets in 142 * offsets : array of eBPF instruction offsets in
100 * JITed code. 143 * JITed code.
101 * target : final JITed code. 144 * target : final JITed code.
@@ -110,7 +153,6 @@ struct jit_ctx {
110 unsigned int idx; 153 unsigned int idx;
111 unsigned int prologue_bytes; 154 unsigned int prologue_bytes;
112 unsigned int epilogue_offset; 155 unsigned int epilogue_offset;
113 u32 seen;
114 u32 flags; 156 u32 flags;
115 u32 *offsets; 157 u32 *offsets;
116 u32 *target; 158 u32 *target;
@@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
179 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); 221 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
180} 222}
181 223
182/* Stack must be multiples of 16 Bytes */ 224#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
183#define STACK_ALIGN(sz) (((sz) + 3) & ~3) 225/* EABI requires the stack to be aligned to 64-bit boundaries */
226#define STACK_ALIGNMENT 8
227#else
228/* Stack must be aligned to 32-bit boundaries */
229#define STACK_ALIGNMENT 4
230#endif
184 231
185/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, 232/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
186 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, 233 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
194 + SCRATCH_SIZE + \ 241 + SCRATCH_SIZE + \
195 + 4 /* extra for skb_copy_bits buffer */) 242 + 4 /* extra for skb_copy_bits buffer */)
196 243
197#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) 244#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
198 245
199/* Get the offset of eBPF REGISTERs stored on scratch space. */ 246/* Get the offset of eBPF REGISTERs stored on scratch space. */
200#define STACK_VAR(off) (STACK_SIZE-off-4) 247#define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
285 emit_mov_i_no8m(rd, val, ctx); 332 emit_mov_i_no8m(rd, val, ctx);
286} 333}
287 334
288static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) 335static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
289{ 336{
290 ctx->seen |= SEEN_CALL;
291#if __LINUX_ARM_ARCH__ < 5
292 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
293
294 if (elf_hwcap & HWCAP_THUMB) 337 if (elf_hwcap & HWCAP_THUMB)
295 emit(ARM_BX(tgt_reg), ctx); 338 emit(ARM_BX(tgt_reg), ctx);
296 else 339 else
297 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); 340 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
341}
342
343static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
344{
345#if __LINUX_ARM_ARCH__ < 5
346 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
347 emit_bx_r(tgt_reg, ctx);
298#else 348#else
299 emit(ARM_BLX_R(tgt_reg), ctx); 349 emit(ARM_BLX_R(tgt_reg), ctx);
300#endif 350#endif
@@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
354 } 404 }
355 405
356 /* Call appropriate function */ 406 /* Call appropriate function */
357 ctx->seen |= SEEN_CALL;
358 emit_mov_i(ARM_IP, op == BPF_DIV ? 407 emit_mov_i(ARM_IP, op == BPF_DIV ?
359 (u32)jit_udiv32 : (u32)jit_mod32, ctx); 408 (u32)jit_udiv32 : (u32)jit_mod32, ctx);
360 emit_blx_r(ARM_IP, ctx); 409 emit_blx_r(ARM_IP, ctx);
@@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
620 /* Do LSH operation */ 669 /* Do LSH operation */
621 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); 670 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
622 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); 671 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
623 /* As we are using ARM_LR */
624 ctx->seen |= SEEN_CALL;
625 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); 672 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
626 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); 673 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
627 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); 674 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
656 /* Do the ARSH operation */ 703 /* Do the ARSH operation */
657 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 704 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
658 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 705 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
659 /* As we are using ARM_LR */
660 ctx->seen |= SEEN_CALL;
661 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); 706 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
662 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); 707 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
663 _emit(ARM_COND_MI, ARM_B(0), ctx); 708 _emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
692 /* Do LSH operation */ 737 /* Do LSH operation */
693 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); 738 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
694 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); 739 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
695 /* As we are using ARM_LR */
696 ctx->seen |= SEEN_CALL;
697 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); 740 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
698 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); 741 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
699 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); 742 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
828 /* Do Multiplication */ 871 /* Do Multiplication */
829 emit(ARM_MUL(ARM_IP, rd, rn), ctx); 872 emit(ARM_MUL(ARM_IP, rd, rn), ctx);
830 emit(ARM_MUL(ARM_LR, rm, rt), ctx); 873 emit(ARM_MUL(ARM_LR, rm, rt), ctx);
831 /* As we are using ARM_LR */
832 ctx->seen |= SEEN_CALL;
833 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); 874 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
834 875
835 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); 876 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
872} 913}
873 914
874/* dst = *(size*)(src + off) */ 915/* dst = *(size*)(src + off) */
875static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, 916static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
876 const s32 off, struct jit_ctx *ctx, const u8 sz){ 917 s32 off, struct jit_ctx *ctx, const u8 sz){
877 const u8 *tmp = bpf2a32[TMP_REG_1]; 918 const u8 *tmp = bpf2a32[TMP_REG_1];
878 u8 rd = dstk ? tmp[1] : dst; 919 const u8 *rd = dstk ? tmp : dst;
879 u8 rm = src; 920 u8 rm = src;
921 s32 off_max;
880 922
881 if (off) { 923 if (sz == BPF_H)
924 off_max = 0xff;
925 else
926 off_max = 0xfff;
927
928 if (off < 0 || off > off_max) {
882 emit_a32_mov_i(tmp[0], off, false, ctx); 929 emit_a32_mov_i(tmp[0], off, false, ctx);
883 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); 930 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
884 rm = tmp[0]; 931 rm = tmp[0];
932 off = 0;
933 } else if (rd[1] == rm) {
934 emit(ARM_MOV_R(tmp[0], rm), ctx);
935 rm = tmp[0];
885 } 936 }
886 switch (sz) { 937 switch (sz) {
887 case BPF_W: 938 case BPF_B:
888 /* Load a Word */ 939 /* Load a Byte */
889 emit(ARM_LDR_I(rd, rm, 0), ctx); 940 emit(ARM_LDRB_I(rd[1], rm, off), ctx);
941 emit_a32_mov_i(dst[0], 0, dstk, ctx);
890 break; 942 break;
891 case BPF_H: 943 case BPF_H:
892 /* Load a HalfWord */ 944 /* Load a HalfWord */
893 emit(ARM_LDRH_I(rd, rm, 0), ctx); 945 emit(ARM_LDRH_I(rd[1], rm, off), ctx);
946 emit_a32_mov_i(dst[0], 0, dstk, ctx);
894 break; 947 break;
895 case BPF_B: 948 case BPF_W:
896 /* Load a Byte */ 949 /* Load a Word */
897 emit(ARM_LDRB_I(rd, rm, 0), ctx); 950 emit(ARM_LDR_I(rd[1], rm, off), ctx);
951 emit_a32_mov_i(dst[0], 0, dstk, ctx);
952 break;
953 case BPF_DW:
954 /* Load a Double Word */
955 emit(ARM_LDR_I(rd[1], rm, off), ctx);
956 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
898 break; 957 break;
899 } 958 }
900 if (dstk) 959 if (dstk)
901 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); 960 emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
961 if (dstk && sz == BPF_DW)
962 emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
902} 963}
903 964
904/* Arithmatic Operation */ 965/* Arithmatic Operation */
@@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
906 const u8 rn, struct jit_ctx *ctx, u8 op) { 967 const u8 rn, struct jit_ctx *ctx, u8 op) {
907 switch (op) { 968 switch (op) {
908 case BPF_JSET: 969 case BPF_JSET:
909 ctx->seen |= SEEN_CALL;
910 emit(ARM_AND_R(ARM_IP, rt, rn), ctx); 970 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
911 emit(ARM_AND_R(ARM_LR, rd, rm), ctx); 971 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
912 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); 972 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
945 const u8 *tcc = bpf2a32[TCALL_CNT]; 1005 const u8 *tcc = bpf2a32[TCALL_CNT];
946 const int idx0 = ctx->idx; 1006 const int idx0 = ctx->idx;
947#define cur_offset (ctx->idx - idx0) 1007#define cur_offset (ctx->idx - idx0)
948#define jmp_offset (out_offset - (cur_offset)) 1008#define jmp_offset (out_offset - (cur_offset) - 2)
949 u32 off, lo, hi; 1009 u32 off, lo, hi;
950 1010
951 /* if (index >= array->map.max_entries) 1011 /* if (index >= array->map.max_entries)
@@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
956 emit_a32_mov_i(tmp[1], off, false, ctx); 1016 emit_a32_mov_i(tmp[1], off, false, ctx);
957 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); 1017 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
958 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); 1018 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
959 /* index (64 bit) */ 1019 /* index is 32-bit for arrays */
960 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); 1020 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
961 /* index >= array->map.max_entries */ 1021 /* index >= array->map.max_entries */
962 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); 1022 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
997 emit_a32_mov_i(tmp2[1], off, false, ctx); 1057 emit_a32_mov_i(tmp2[1], off, false, ctx);
998 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); 1058 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
999 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); 1059 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1000 emit(ARM_BX(tmp[1]), ctx); 1060 emit_bx_r(tmp[1], ctx);
1001 1061
1002 /* out: */ 1062 /* out: */
1003 if (out_offset == -1) 1063 if (out_offset == -1)
@@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
1070 const u8 r2 = bpf2a32[BPF_REG_1][1]; 1130 const u8 r2 = bpf2a32[BPF_REG_1][1];
1071 const u8 r3 = bpf2a32[BPF_REG_1][0]; 1131 const u8 r3 = bpf2a32[BPF_REG_1][0];
1072 const u8 r4 = bpf2a32[BPF_REG_6][1]; 1132 const u8 r4 = bpf2a32[BPF_REG_6][1];
1073 const u8 r5 = bpf2a32[BPF_REG_6][0];
1074 const u8 r6 = bpf2a32[TMP_REG_1][1];
1075 const u8 r7 = bpf2a32[TMP_REG_1][0];
1076 const u8 r8 = bpf2a32[TMP_REG_2][1];
1077 const u8 r10 = bpf2a32[TMP_REG_2][0];
1078 const u8 fplo = bpf2a32[BPF_REG_FP][1]; 1133 const u8 fplo = bpf2a32[BPF_REG_FP][1];
1079 const u8 fphi = bpf2a32[BPF_REG_FP][0]; 1134 const u8 fphi = bpf2a32[BPF_REG_FP][0];
1080 const u8 sp = ARM_SP;
1081 const u8 *tcc = bpf2a32[TCALL_CNT]; 1135 const u8 *tcc = bpf2a32[TCALL_CNT];
1082 1136
1083 u16 reg_set = 0;
1084
1085 /*
1086 * eBPF prog stack layout
1087 *
1088 * high
1089 * original ARM_SP => +-----+ eBPF prologue
1090 * |FP/LR|
1091 * current ARM_FP => +-----+
1092 * | ... | callee saved registers
1093 * eBPF fp register => +-----+ <= (BPF_FP)
1094 * | ... | eBPF JIT scratch space
1095 * | | eBPF prog stack
1096 * +-----+
1097 * |RSVD | JIT scratchpad
1098 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
1099 * | |
1100 * | ... | Function call stack
1101 * | |
1102 * +-----+
1103 * low
1104 */
1105
1106 /* Save callee saved registers. */ 1137 /* Save callee saved registers. */
1107 reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
1108#ifdef CONFIG_FRAME_POINTER 1138#ifdef CONFIG_FRAME_POINTER
1109 reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC); 1139 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1110 emit(ARM_MOV_R(ARM_IP, sp), ctx); 1140 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1111 emit(ARM_PUSH(reg_set), ctx); 1141 emit(ARM_PUSH(reg_set), ctx);
1112 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); 1142 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1113#else 1143#else
1114 /* Check if call instruction exists in BPF body */ 1144 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1115 if (ctx->seen & SEEN_CALL) 1145 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1116 reg_set |= (1<<ARM_LR);
1117 emit(ARM_PUSH(reg_set), ctx);
1118#endif 1146#endif
1119 /* Save frame pointer for later */ 1147 /* Save frame pointer for later */
1120 emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx); 1148 emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1121 1149
1122 ctx->stack_size = imm8m(STACK_SIZE); 1150 ctx->stack_size = imm8m(STACK_SIZE);
1123 1151
@@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
1140 /* end of prologue */ 1168 /* end of prologue */
1141} 1169}
1142 1170
1171/* restore callee saved registers. */
1143static void build_epilogue(struct jit_ctx *ctx) 1172static void build_epilogue(struct jit_ctx *ctx)
1144{ 1173{
1145 const u8 r4 = bpf2a32[BPF_REG_6][1];
1146 const u8 r5 = bpf2a32[BPF_REG_6][0];
1147 const u8 r6 = bpf2a32[TMP_REG_1][1];
1148 const u8 r7 = bpf2a32[TMP_REG_1][0];
1149 const u8 r8 = bpf2a32[TMP_REG_2][1];
1150 const u8 r10 = bpf2a32[TMP_REG_2][0];
1151 u16 reg_set = 0;
1152
1153 /* unwind function call stack */
1154 emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1155
1156 /* restore callee saved registers. */
1157 reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
1158#ifdef CONFIG_FRAME_POINTER 1174#ifdef CONFIG_FRAME_POINTER
1159 /* the first instruction of the prologue was: mov ip, sp */ 1175 /* When using frame pointers, some additional registers need to
1160 reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC); 1176 * be loaded. */
1177 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1178 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1161 emit(ARM_LDM(ARM_SP, reg_set), ctx); 1179 emit(ARM_LDM(ARM_SP, reg_set), ctx);
1162#else 1180#else
1163 if (ctx->seen & SEEN_CALL)
1164 reg_set |= (1<<ARM_PC);
1165 /* Restore callee saved registers. */ 1181 /* Restore callee saved registers. */
1166 emit(ARM_POP(reg_set), ctx); 1182 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1167 /* Return back to the callee function */ 1183 emit(ARM_POP(CALLEE_POP_MASK), ctx);
1168 if (!(ctx->seen & SEEN_CALL))
1169 emit(ARM_BX(ARM_LR), ctx);
1170#endif 1184#endif
1171} 1185}
1172 1186
@@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1394 emit_rev32(rt, rt, ctx); 1408 emit_rev32(rt, rt, ctx);
1395 goto emit_bswap_uxt; 1409 goto emit_bswap_uxt;
1396 case 64: 1410 case 64:
1397 /* Because of the usage of ARM_LR */
1398 ctx->seen |= SEEN_CALL;
1399 emit_rev32(ARM_LR, rt, ctx); 1411 emit_rev32(ARM_LR, rt, ctx);
1400 emit_rev32(rt, rd, ctx); 1412 emit_rev32(rt, rd, ctx);
1401 emit(ARM_MOV_R(rd, ARM_LR), ctx); 1413 emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1460,7 @@ exit:
1448 rn = sstk ? tmp2[1] : src_lo; 1460 rn = sstk ? tmp2[1] : src_lo;
1449 if (sstk) 1461 if (sstk)
1450 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); 1462 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1451 switch (BPF_SIZE(code)) { 1463 emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
1452 case BPF_W:
1453 /* Load a Word */
1454 case BPF_H:
1455 /* Load a Half-Word */
1456 case BPF_B:
1457 /* Load a Byte */
1458 emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
1459 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1460 break;
1461 case BPF_DW:
1462 /* Load a double word */
1463 emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
1464 emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
1465 break;
1466 }
1467 break; 1464 break;
1468 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 1465 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1469 case BPF_LD | BPF_ABS | BPF_W: 1466 case BPF_LD | BPF_ABS | BPF_W:
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a93339f5178f..c9a7e9e1414f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
557 557
558 If unsure, say Y. 558 If unsure, say Y.
559 559
560
561config SOCIONEXT_SYNQUACER_PREITS 560config SOCIONEXT_SYNQUACER_PREITS
562 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" 561 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
563 default y 562 default y
@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
576 a 128kB offset to be applied to the target address in this commands. 575 a 128kB offset to be applied to the target address in this commands.
577 576
578 If unsure, say Y. 577 If unsure, say Y.
578
579config QCOM_FALKOR_ERRATUM_E1041
580 bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
581 default y
582 help
583 Falkor CPU may speculatively fetch instructions from an improper
584 memory location when MMU translation is changed from SCTLR_ELn[M]=1
585 to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
586
587 If unsure, say Y.
588
579endmenu 589endmenu
580 590
581 591
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index d7c22d51bc50..4aa50b9b26bc 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -12,6 +12,7 @@ subdir-y += cavium
12subdir-y += exynos 12subdir-y += exynos
13subdir-y += freescale 13subdir-y += freescale
14subdir-y += hisilicon 14subdir-y += hisilicon
15subdir-y += lg
15subdir-y += marvell 16subdir-y += marvell
16subdir-y += mediatek 17subdir-y += mediatek
17subdir-y += nvidia 18subdir-y += nvidia
@@ -22,5 +23,4 @@ subdir-y += rockchip
22subdir-y += socionext 23subdir-y += socionext
23subdir-y += sprd 24subdir-y += sprd
24subdir-y += xilinx 25subdir-y += xilinx
25subdir-y += lg
26subdir-y += zte 26subdir-y += zte
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 45bdbfb96126..4a8d3f83a36e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -75,6 +75,7 @@
75 pinctrl-0 = <&rgmii_pins>; 75 pinctrl-0 = <&rgmii_pins>;
76 phy-mode = "rgmii"; 76 phy-mode = "rgmii";
77 phy-handle = <&ext_rgmii_phy>; 77 phy-handle = <&ext_rgmii_phy>;
78 phy-supply = <&reg_dc1sw>;
78 status = "okay"; 79 status = "okay";
79}; 80};
80 81
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 806442d3e846..604cdaedac38 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -77,6 +77,7 @@
77 pinctrl-0 = <&rmii_pins>; 77 pinctrl-0 = <&rmii_pins>;
78 phy-mode = "rmii"; 78 phy-mode = "rmii";
79 phy-handle = <&ext_rmii_phy1>; 79 phy-handle = <&ext_rmii_phy1>;
80 phy-supply = <&reg_dc1sw>;
80 status = "okay"; 81 status = "okay";
81 82
82}; 83};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 0eb2acedf8c3..abe179de35d7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -82,6 +82,7 @@
82 pinctrl-0 = <&rgmii_pins>; 82 pinctrl-0 = <&rgmii_pins>;
83 phy-mode = "rgmii"; 83 phy-mode = "rgmii";
84 phy-handle = <&ext_rgmii_phy>; 84 phy-handle = <&ext_rgmii_phy>;
85 phy-supply = <&reg_dc1sw>;
85 status = "okay"; 86 status = "okay";
86}; 87};
87 88
@@ -95,7 +96,7 @@
95&mmc2 { 96&mmc2 {
96 pinctrl-names = "default"; 97 pinctrl-names = "default";
97 pinctrl-0 = <&mmc2_pins>; 98 pinctrl-0 = <&mmc2_pins>;
98 vmmc-supply = <&reg_vcc3v3>; 99 vmmc-supply = <&reg_dcdc1>;
99 vqmmc-supply = <&reg_vcc1v8>; 100 vqmmc-supply = <&reg_vcc1v8>;
100 bus-width = <8>; 101 bus-width = <8>;
101 non-removable; 102 non-removable;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index a5da18a6f286..43418bd881d8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -45,19 +45,10 @@
45 45
46#include "sun50i-a64.dtsi" 46#include "sun50i-a64.dtsi"
47 47
48/ {
49 reg_vcc3v3: vcc3v3 {
50 compatible = "regulator-fixed";
51 regulator-name = "vcc3v3";
52 regulator-min-microvolt = <3300000>;
53 regulator-max-microvolt = <3300000>;
54 };
55};
56
57&mmc0 { 48&mmc0 {
58 pinctrl-names = "default"; 49 pinctrl-names = "default";
59 pinctrl-0 = <&mmc0_pins>; 50 pinctrl-0 = <&mmc0_pins>;
60 vmmc-supply = <&reg_vcc3v3>; 51 vmmc-supply = <&reg_dcdc1>;
61 non-removable; 52 non-removable;
62 disable-wp; 53 disable-wp;
63 bus-width = <4>; 54 bus-width = <4>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index b6b7a561df8c..a42fd79a62a3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -71,7 +71,7 @@
71 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>; 71 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
72 vmmc-supply = <&reg_vcc3v3>; 72 vmmc-supply = <&reg_vcc3v3>;
73 bus-width = <4>; 73 bus-width = <4>;
74 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; 74 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
75 status = "okay"; 75 status = "okay";
76}; 76};
77 77
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 7c9bdc7ab50b..9db19314c60c 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -66,6 +66,7 @@
66 <&cpu1>, 66 <&cpu1>,
67 <&cpu2>, 67 <&cpu2>,
68 <&cpu3>; 68 <&cpu3>;
69 interrupt-parent = <&intc>;
69 }; 70 };
70 71
71 psci { 72 psci {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index ead895a4e9a5..1fb8b9d6cb4e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -753,12 +753,12 @@
753 753
754&uart_B { 754&uart_B {
755 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; 755 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
756 clock-names = "xtal", "core", "baud"; 756 clock-names = "xtal", "pclk", "baud";
757}; 757};
758 758
759&uart_C { 759&uart_C {
760 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; 760 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
761 clock-names = "xtal", "core", "baud"; 761 clock-names = "xtal", "pclk", "baud";
762}; 762};
763 763
764&vpu { 764&vpu {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 8ed981f59e5a..6524b89e7115 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -688,7 +688,7 @@
688 688
689&uart_A { 689&uart_A {
690 clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>; 690 clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
691 clock-names = "xtal", "core", "baud"; 691 clock-names = "xtal", "pclk", "baud";
692}; 692};
693 693
694&uart_AO { 694&uart_AO {
@@ -703,12 +703,12 @@
703 703
704&uart_B { 704&uart_B {
705 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; 705 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
706 clock-names = "xtal", "core", "baud"; 706 clock-names = "xtal", "pclk", "baud";
707}; 707};
708 708
709&uart_C { 709&uart_C {
710 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; 710 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
711 clock-names = "xtal", "core", "baud"; 711 clock-names = "xtal", "pclk", "baud";
712}; 712};
713 713
714&vpu { 714&vpu {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e3b64d03fbd8..9c7724e82aff 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -63,8 +63,10 @@
63 cpm_ethernet: ethernet@0 { 63 cpm_ethernet: ethernet@0 {
64 compatible = "marvell,armada-7k-pp22"; 64 compatible = "marvell,armada-7k-pp22";
65 reg = <0x0 0x100000>, <0x129000 0xb000>; 65 reg = <0x0 0x100000>, <0x129000 0xb000>;
66 clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; 66 clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
67 clock-names = "pp_clk", "gop_clk", "mg_clk"; 67 <&cpm_clk 1 5>, <&cpm_clk 1 18>;
68 clock-names = "pp_clk", "gop_clk",
69 "mg_clk","axi_clk";
68 marvell,system-controller = <&cpm_syscon0>; 70 marvell,system-controller = <&cpm_syscon0>;
69 status = "disabled"; 71 status = "disabled";
70 dma-coherent; 72 dma-coherent;
@@ -155,7 +157,8 @@
155 #size-cells = <0>; 157 #size-cells = <0>;
156 compatible = "marvell,orion-mdio"; 158 compatible = "marvell,orion-mdio";
157 reg = <0x12a200 0x10>; 159 reg = <0x12a200 0x10>;
158 clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>; 160 clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
161 <&cpm_clk 1 6>, <&cpm_clk 1 18>;
159 status = "disabled"; 162 status = "disabled";
160 }; 163 };
161 164
@@ -338,8 +341,8 @@
338 compatible = "marvell,armada-cp110-sdhci"; 341 compatible = "marvell,armada-cp110-sdhci";
339 reg = <0x780000 0x300>; 342 reg = <0x780000 0x300>;
340 interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>; 343 interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
341 clock-names = "core"; 344 clock-names = "core","axi";
342 clocks = <&cpm_clk 1 4>; 345 clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
343 dma-coherent; 346 dma-coherent;
344 status = "disabled"; 347 status = "disabled";
345 }; 348 };
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 0d51096c69f8..87ac68b2cf37 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -63,8 +63,10 @@
63 cps_ethernet: ethernet@0 { 63 cps_ethernet: ethernet@0 {
64 compatible = "marvell,armada-7k-pp22"; 64 compatible = "marvell,armada-7k-pp22";
65 reg = <0x0 0x100000>, <0x129000 0xb000>; 65 reg = <0x0 0x100000>, <0x129000 0xb000>;
66 clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; 66 clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
67 clock-names = "pp_clk", "gop_clk", "mg_clk"; 67 <&cps_clk 1 5>, <&cps_clk 1 18>;
68 clock-names = "pp_clk", "gop_clk",
69 "mg_clk", "axi_clk";
68 marvell,system-controller = <&cps_syscon0>; 70 marvell,system-controller = <&cps_syscon0>;
69 status = "disabled"; 71 status = "disabled";
70 dma-coherent; 72 dma-coherent;
@@ -155,7 +157,8 @@
155 #size-cells = <0>; 157 #size-cells = <0>;
156 compatible = "marvell,orion-mdio"; 158 compatible = "marvell,orion-mdio";
157 reg = <0x12a200 0x10>; 159 reg = <0x12a200 0x10>;
158 clocks = <&cps_clk 1 9>, <&cps_clk 1 5>; 160 clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
161 <&cps_clk 1 6>, <&cps_clk 1 18>;
159 status = "disabled"; 162 status = "disabled";
160 }; 163 };
161 164
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index a298df74ca6c..dbe2648649db 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -255,7 +255,6 @@
255&avb { 255&avb {
256 pinctrl-0 = <&avb_pins>; 256 pinctrl-0 = <&avb_pins>;
257 pinctrl-names = "default"; 257 pinctrl-names = "default";
258 renesas,no-ether-link;
259 phy-handle = <&phy0>; 258 phy-handle = <&phy0>;
260 status = "okay"; 259 status = "okay";
261 260
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 0d85b315ce71..73439cf48659 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -145,7 +145,6 @@
145&avb { 145&avb {
146 pinctrl-0 = <&avb_pins>; 146 pinctrl-0 = <&avb_pins>;
147 pinctrl-names = "default"; 147 pinctrl-names = "default";
148 renesas,no-ether-link;
149 phy-handle = <&phy0>; 148 phy-handle = <&phy0>;
150 status = "okay"; 149 status = "okay";
151 150
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index d4f80786e7c2..3890468678ce 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -132,6 +132,8 @@
132 assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; 132 assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
133 assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; 133 assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
134 clock_in_out = "input"; 134 clock_in_out = "input";
135 /* shows instability at 1GBit right now */
136 max-speed = <100>;
135 phy-supply = <&vcc_io>; 137 phy-supply = <&vcc_io>;
136 phy-mode = "rgmii"; 138 phy-mode = "rgmii";
137 pinctrl-names = "default"; 139 pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 41d61840fb99..2426da631938 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -514,7 +514,7 @@
514 tsadc: tsadc@ff250000 { 514 tsadc: tsadc@ff250000 {
515 compatible = "rockchip,rk3328-tsadc"; 515 compatible = "rockchip,rk3328-tsadc";
516 reg = <0x0 0xff250000 0x0 0x100>; 516 reg = <0x0 0xff250000 0x0 0x100>;
517 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>; 517 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
518 assigned-clocks = <&cru SCLK_TSADC>; 518 assigned-clocks = <&cru SCLK_TSADC>;
519 assigned-clock-rates = <50000>; 519 assigned-clock-rates = <50000>;
520 clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; 520 clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 910628d18add..1fc5060d7027 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -155,17 +155,6 @@
155 regulator-min-microvolt = <5000000>; 155 regulator-min-microvolt = <5000000>;
156 regulator-max-microvolt = <5000000>; 156 regulator-max-microvolt = <5000000>;
157 }; 157 };
158
159 vdd_log: vdd-log {
160 compatible = "pwm-regulator";
161 pwms = <&pwm2 0 25000 0>;
162 regulator-name = "vdd_log";
163 regulator-min-microvolt = <800000>;
164 regulator-max-microvolt = <1400000>;
165 regulator-always-on;
166 regulator-boot-on;
167 status = "okay";
168 };
169}; 158};
170 159
171&cpu_b0 { 160&cpu_b0 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
index dd7193acc7df..6bdefb26b329 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -40,7 +40,6 @@
40}; 40};
41 41
42&ethsc { 42&ethsc {
43 interrupt-parent = <&gpio>;
44 interrupts = <0 8>; 43 interrupts = <0 8>;
45}; 44};
46 45
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index d99e3731358c..254d6795c67e 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -40,7 +40,6 @@
40}; 40};
41 41
42&ethsc { 42&ethsc {
43 interrupt-parent = <&gpio>;
44 interrupts = <0 8>; 43 interrupts = <0 8>;
45}; 44};
46 45
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
index 864feeb35180..f9f06fcfb94a 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
@@ -38,8 +38,7 @@
38}; 38};
39 39
40&ethsc { 40&ethsc {
41 interrupt-parent = <&gpio>; 41 interrupts = <4 8>;
42 interrupts = <0 8>;
43}; 42};
44 43
45&serial0 { 44&serial0 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 48e733136db4..0ac2ace82435 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -198,8 +198,8 @@
198 gpio-controller; 198 gpio-controller;
199 #gpio-cells = <2>; 199 #gpio-cells = <2>;
200 gpio-ranges = <&pinctrl 0 0 0>, 200 gpio-ranges = <&pinctrl 0 0 0>,
201 <&pinctrl 96 0 0>, 201 <&pinctrl 104 0 0>,
202 <&pinctrl 160 0 0>; 202 <&pinctrl 168 0 0>;
203 gpio-ranges-group-names = "gpio_range0", 203 gpio-ranges-group-names = "gpio_range0",
204 "gpio_range1", 204 "gpio_range1",
205 "gpio_range2"; 205 "gpio_range2";
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aef72d886677..8b168280976f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -512,4 +512,14 @@ alternative_else_nop_endif
512#endif 512#endif
513 .endm 513 .endm
514 514
515/**
516 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
517 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
518 */
519 .macro pre_disable_mmu_workaround
520#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
521 isb
522#endif
523 .endm
524
515#endif /* __ASM_ASSEMBLER_H */ 525#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index ac67cfc2585a..060e3a4008ab 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -60,6 +60,9 @@ enum ftr_type {
60#define FTR_VISIBLE true /* Feature visible to the user space */ 60#define FTR_VISIBLE true /* Feature visible to the user space */
61#define FTR_HIDDEN false /* Feature is hidden from the user */ 61#define FTR_HIDDEN false /* Feature is hidden from the user */
62 62
63#define FTR_VISIBLE_IF_IS_ENABLED(config) \
64 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
65
63struct arm64_ftr_bits { 66struct arm64_ftr_bits {
64 bool sign; /* Value is signed ? */ 67 bool sign; /* Value is signed ? */
65 bool visible; 68 bool visible;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 235e77d98261..cbf08d7cbf30 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -91,6 +91,7 @@
91#define BRCM_CPU_PART_VULCAN 0x516 91#define BRCM_CPU_PART_VULCAN 0x516
92 92
93#define QCOM_CPU_PART_FALKOR_V1 0x800 93#define QCOM_CPU_PART_FALKOR_V1 0x800
94#define QCOM_CPU_PART_FALKOR 0xC00
94 95
95#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 96#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
96#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 97#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
@@ -99,6 +100,7 @@
99#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 100#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
100#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) 101#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
101#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) 102#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
103#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
102 104
103#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
104 106
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 650344d01124..c4cd5081d78b 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
132 * Defer the switch to the current thread's TTBR0_EL1 132 * Defer the switch to the current thread's TTBR0_EL1
133 * until uaccess_enable(). Restore the current 133 * until uaccess_enable(). Restore the current
134 * thread's saved ttbr0 corresponding to its active_mm 134 * thread's saved ttbr0 corresponding to its active_mm
135 * (if different from init_mm).
136 */ 135 */
137 cpu_set_reserved_ttbr0(); 136 cpu_set_reserved_ttbr0();
138 if (current->active_mm != &init_mm) 137 update_saved_ttbr0(current, current->active_mm);
139 update_saved_ttbr0(current, current->active_mm);
140 } 138 }
141 } 139 }
142} 140}
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7f069ff37f06..715d395ef45b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) 170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) 171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
172 172
173#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 173#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
174#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
175#define VTTBR_VMID_SHIFT (UL(48)) 174#define VTTBR_VMID_SHIFT (UL(48))
176#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 175#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
177 176
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 674912d7a571..ea6cb5b24258 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
373bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
373int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 374int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
374 struct kvm_device_attr *attr); 375 struct kvm_device_attr *attr);
375int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 376int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 3257895a9b5e..9d155fa9a507 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
156 156
157#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) 157#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
158 158
159/*
160 * This is called when "tsk" is about to enter lazy TLB mode.
161 *
162 * mm: describes the currently active mm context
163 * tsk: task which is entering lazy tlb
164 * cpu: cpu number which is entering lazy tlb
165 *
166 * tsk->mm will be NULL
167 */
168static inline void
169enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
170{
171}
172
173#ifdef CONFIG_ARM64_SW_TTBR0_PAN 159#ifdef CONFIG_ARM64_SW_TTBR0_PAN
174static inline void update_saved_ttbr0(struct task_struct *tsk, 160static inline void update_saved_ttbr0(struct task_struct *tsk,
175 struct mm_struct *mm) 161 struct mm_struct *mm)
176{ 162{
177 if (system_uses_ttbr0_pan()) { 163 u64 ttbr;
178 BUG_ON(mm->pgd == swapper_pg_dir); 164
179 task_thread_info(tsk)->ttbr0 = 165 if (!system_uses_ttbr0_pan())
180 virt_to_phys(mm->pgd) | ASID(mm) << 48; 166 return;
181 } 167
168 if (mm == &init_mm)
169 ttbr = __pa_symbol(empty_zero_page);
170 else
171 ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
172
173 task_thread_info(tsk)->ttbr0 = ttbr;
182} 174}
183#else 175#else
184static inline void update_saved_ttbr0(struct task_struct *tsk, 176static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
187} 179}
188#endif 180#endif
189 181
182static inline void
183enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
184{
185 /*
186 * We don't actually care about the ttbr0 mapping, so point it at the
187 * zero page.
188 */
189 update_saved_ttbr0(tsk, &init_mm);
190}
191
190static inline void __switch_mm(struct mm_struct *next) 192static inline void __switch_mm(struct mm_struct *next)
191{ 193{
192 unsigned int cpu = smp_processor_id(); 194 unsigned int cpu = smp_processor_id();
@@ -214,11 +216,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
214 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous 216 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
215 * value may have not been initialised yet (activate_mm caller) or the 217 * value may have not been initialised yet (activate_mm caller) or the
216 * ASID has changed since the last run (following the context switch 218 * ASID has changed since the last run (following the context switch
217 * of another thread of the same process). Avoid setting the reserved 219 * of another thread of the same process).
218 * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
219 */ 220 */
220 if (next != &init_mm) 221 update_saved_ttbr0(tsk, next);
221 update_saved_ttbr0(tsk, next);
222} 222}
223 223
224#define deactivate_mm(tsk,mm) do { } while (0) 224#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 8d5cbec17d80..f9ccc36d3dc3 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -18,6 +18,7 @@
18#define __ASM_PERF_EVENT_H 18#define __ASM_PERF_EVENT_H
19 19
20#include <asm/stack_pointer.h> 20#include <asm/stack_pointer.h>
21#include <asm/ptrace.h>
21 22
22#define ARMV8_PMU_MAX_COUNTERS 32 23#define ARMV8_PMU_MAX_COUNTERS 32
23#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) 24#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -79,6 +80,7 @@ struct pt_regs;
79extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 80extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
80extern unsigned long perf_misc_flags(struct pt_regs *regs); 81extern unsigned long perf_misc_flags(struct pt_regs *regs);
81#define perf_misc_flags(regs) perf_misc_flags(regs) 82#define perf_misc_flags(regs) perf_misc_flags(regs)
83#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
82#endif 84#endif
83 85
84#define perf_arch_fetch_caller_regs(regs, __ip) { \ 86#define perf_arch_fetch_caller_regs(regs, __ip) { \
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 149d05fb9421..bdcc7f1c9d06 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -42,6 +42,8 @@
42#include <asm/cmpxchg.h> 42#include <asm/cmpxchg.h>
43#include <asm/fixmap.h> 43#include <asm/fixmap.h>
44#include <linux/mmdebug.h> 44#include <linux/mmdebug.h>
45#include <linux/mm_types.h>
46#include <linux/sched.h>
45 47
46extern void __pte_error(const char *file, int line, unsigned long val); 48extern void __pte_error(const char *file, int line, unsigned long val);
47extern void __pmd_error(const char *file, int line, unsigned long val); 49extern void __pmd_error(const char *file, int line, unsigned long val);
@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
149 151
150static inline pte_t pte_mkclean(pte_t pte) 152static inline pte_t pte_mkclean(pte_t pte)
151{ 153{
152 return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
156
157 return pte;
153} 158}
154 159
155static inline pte_t pte_mkdirty(pte_t pte) 160static inline pte_t pte_mkdirty(pte_t pte)
156{ 161{
157 return set_pte_bit(pte, __pgprot(PTE_DIRTY)); 162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
163
164 if (pte_write(pte))
165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
166
167 return pte;
158} 168}
159 169
160static inline pte_t pte_mkold(pte_t pte) 170static inline pte_t pte_mkold(pte_t pte)
@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
207 } 217 }
208} 218}
209 219
210struct mm_struct;
211struct vm_area_struct;
212
213extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 220extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
214 221
215/* 222/*
@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
238 * hardware updates of the pte (ptep_set_access_flags safely changes 245 * hardware updates of the pte (ptep_set_access_flags safely changes
239 * valid ptes without going through an invalid entry). 246 * valid ptes without going through an invalid entry).
240 */ 247 */
241 if (pte_valid(*ptep) && pte_valid(pte)) { 248 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
249 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
242 VM_WARN_ONCE(!pte_young(pte), 250 VM_WARN_ONCE(!pte_young(pte),
243 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 251 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
244 __func__, pte_val(*ptep), pte_val(pte)); 252 __func__, pte_val(*ptep), pte_val(pte));
@@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
641#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 649#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
642 650
643/* 651/*
644 * ptep_set_wrprotect - mark read-only while preserving the hardware update of 652 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
645 * the Access Flag. 653 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
646 */ 654 */
647#define __HAVE_ARCH_PTEP_SET_WRPROTECT 655#define __HAVE_ARCH_PTEP_SET_WRPROTECT
648static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 656static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
649{ 657{
650 pte_t old_pte, pte; 658 pte_t old_pte, pte;
651 659
652 /*
653 * ptep_set_wrprotect() is only called on CoW mappings which are
654 * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
655 * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
656 * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
657 * protection_map[]. There is no race with the hardware update of the
658 * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
659 * is set.
660 */
661 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
662 "%s: potential race with hardware DBM", __func__);
663 pte = READ_ONCE(*ptep); 660 pte = READ_ONCE(*ptep);
664 do { 661 do {
665 old_pte = pte; 662 old_pte = pte;
663 /*
664 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
665 * clear), set the PTE_DIRTY bit.
666 */
667 if (pte_hw_dirty(pte))
668 pte = pte_mkdirty(pte);
666 pte = pte_wrprotect(pte); 669 pte = pte_wrprotect(pte);
667 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 670 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
668 pte_val(old_pte), pte_val(pte)); 671 pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef struct user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 65f42d257414..2a752cb2a0f3 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
37 mrs x12, sctlr_el1 37 mrs x12, sctlr_el1
38 ldr x13, =SCTLR_ELx_FLAGS 38 ldr x13, =SCTLR_ELx_FLAGS
39 bic x12, x12, x13 39 bic x12, x12, x13
40 pre_disable_mmu_workaround
40 msr sctlr_el1, x12 41 msr sctlr_el1, x12
41 isb 42 isb
42 43
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c5ba0097887f..a73a5928f09b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
145}; 145};
146 146
147static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 147static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
148 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), 148 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
149 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
149 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), 150 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
150 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), 151 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
151 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 152 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 4e6ad355bd05..6b9736c3fb56 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -96,6 +96,7 @@ ENTRY(entry)
96 mrs x0, sctlr_el2 96 mrs x0, sctlr_el2
97 bic x0, x0, #1 << 0 // clear SCTLR.M 97 bic x0, x0, #1 << 0 // clear SCTLR.M
98 bic x0, x0, #1 << 2 // clear SCTLR.C 98 bic x0, x0, #1 << 2 // clear SCTLR.C
99 pre_disable_mmu_workaround
99 msr sctlr_el2, x0 100 msr sctlr_el2, x0
100 isb 101 isb
101 b 2f 102 b 2f
@@ -103,6 +104,7 @@ ENTRY(entry)
103 mrs x0, sctlr_el1 104 mrs x0, sctlr_el1
104 bic x0, x0, #1 << 0 // clear SCTLR.M 105 bic x0, x0, #1 << 0 // clear SCTLR.M
105 bic x0, x0, #1 << 2 // clear SCTLR.C 106 bic x0, x0, #1 << 2 // clear SCTLR.C
107 pre_disable_mmu_workaround
106 msr sctlr_el1, x0 108 msr sctlr_el1, x0
107 isb 109 isb
1082: 1102:
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5084e699447a..fae81f7964b4 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -114,7 +114,12 @@
114 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so 114 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
115 * whatever is in the FPSIMD registers is not saved to memory, but discarded. 115 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
116 */ 116 */
117static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); 117struct fpsimd_last_state_struct {
118 struct fpsimd_state *st;
119 bool sve_in_use;
120};
121
122static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
118 123
119/* Default VL for tasks that don't set it explicitly: */ 124/* Default VL for tasks that don't set it explicitly: */
120static int sve_default_vl = -1; 125static int sve_default_vl = -1;
@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next)
905 */ 910 */
906 struct fpsimd_state *st = &next->thread.fpsimd_state; 911 struct fpsimd_state *st = &next->thread.fpsimd_state;
907 912
908 if (__this_cpu_read(fpsimd_last_state) == st 913 if (__this_cpu_read(fpsimd_last_state.st) == st
909 && st->cpu == smp_processor_id()) 914 && st->cpu == smp_processor_id())
910 clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); 915 clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
911 else 916 else
@@ -992,6 +997,21 @@ void fpsimd_signal_preserve_current_state(void)
992} 997}
993 998
994/* 999/*
1000 * Associate current's FPSIMD context with this cpu
1001 * Preemption must be disabled when calling this function.
1002 */
1003static void fpsimd_bind_to_cpu(void)
1004{
1005 struct fpsimd_last_state_struct *last =
1006 this_cpu_ptr(&fpsimd_last_state);
1007 struct fpsimd_state *st = &current->thread.fpsimd_state;
1008
1009 last->st = st;
1010 last->sve_in_use = test_thread_flag(TIF_SVE);
1011 st->cpu = smp_processor_id();
1012}
1013
1014/*
995 * Load the userland FPSIMD state of 'current' from memory, but only if the 1015 * Load the userland FPSIMD state of 'current' from memory, but only if the
996 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD 1016 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
997 * state of 'current' 1017 * state of 'current'
@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void)
1004 local_bh_disable(); 1024 local_bh_disable();
1005 1025
1006 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { 1026 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1007 struct fpsimd_state *st = &current->thread.fpsimd_state;
1008
1009 task_fpsimd_load(); 1027 task_fpsimd_load();
1010 __this_cpu_write(fpsimd_last_state, st); 1028 fpsimd_bind_to_cpu();
1011 st->cpu = smp_processor_id();
1012 } 1029 }
1013 1030
1014 local_bh_enable(); 1031 local_bh_enable();
@@ -1026,18 +1043,14 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
1026 1043
1027 local_bh_disable(); 1044 local_bh_disable();
1028 1045
1029 current->thread.fpsimd_state = *state; 1046 current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
1030 if (system_supports_sve() && test_thread_flag(TIF_SVE)) 1047 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1031 fpsimd_to_sve(current); 1048 fpsimd_to_sve(current);
1032 1049
1033 task_fpsimd_load(); 1050 task_fpsimd_load();
1034 1051
1035 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { 1052 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
1036 struct fpsimd_state *st = &current->thread.fpsimd_state; 1053 fpsimd_bind_to_cpu();
1037
1038 __this_cpu_write(fpsimd_last_state, st);
1039 st->cpu = smp_processor_id();
1040 }
1041 1054
1042 local_bh_enable(); 1055 local_bh_enable();
1043} 1056}
@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
1052 1065
1053static inline void fpsimd_flush_cpu_state(void) 1066static inline void fpsimd_flush_cpu_state(void)
1054{ 1067{
1055 __this_cpu_write(fpsimd_last_state, NULL); 1068 __this_cpu_write(fpsimd_last_state.st, NULL);
1056} 1069}
1057 1070
1058/* 1071/*
@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void)
1065#ifdef CONFIG_ARM64_SVE 1078#ifdef CONFIG_ARM64_SVE
1066void sve_flush_cpu_state(void) 1079void sve_flush_cpu_state(void)
1067{ 1080{
1068 struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state); 1081 struct fpsimd_last_state_struct const *last =
1069 struct task_struct *tsk; 1082 this_cpu_ptr(&fpsimd_last_state);
1070
1071 if (!fpstate)
1072 return;
1073 1083
1074 tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state); 1084 if (last->st && last->sve_in_use)
1075 if (test_tsk_thread_flag(tsk, TIF_SVE))
1076 fpsimd_flush_cpu_state(); 1085 fpsimd_flush_cpu_state();
1077} 1086}
1078#endif /* CONFIG_ARM64_SVE */ 1087#endif /* CONFIG_ARM64_SVE */
@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { }
1267#ifdef CONFIG_HOTPLUG_CPU 1276#ifdef CONFIG_HOTPLUG_CPU
1268static int fpsimd_cpu_dead(unsigned int cpu) 1277static int fpsimd_cpu_dead(unsigned int cpu)
1269{ 1278{
1270 per_cpu(fpsimd_last_state, cpu) = NULL; 1279 per_cpu(fpsimd_last_state.st, cpu) = NULL;
1271 return 0; 1280 return 0;
1272} 1281}
1273 1282
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 67e86a0f57ac..e3cb9fbf96b6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -750,6 +750,7 @@ __primary_switch:
750 * to take into account by discarding the current kernel mapping and 750 * to take into account by discarding the current kernel mapping and
751 * creating a new one. 751 * creating a new one.
752 */ 752 */
753 pre_disable_mmu_workaround
753 msr sctlr_el1, x20 // disable the MMU 754 msr sctlr_el1, x20 // disable the MMU
754 isb 755 isb
755 bl __create_page_tables // recreate kernel mapping 756 bl __create_page_tables // recreate kernel mapping
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 749f81779420..74bb56f656ef 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29#include <linux/ptrace.h> 29#include <linux/ptrace.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/uaccess.h>
31 32
32#include <asm/compat.h> 33#include <asm/compat.h>
33#include <asm/current.h> 34#include <asm/current.h>
@@ -36,7 +37,6 @@
36#include <asm/traps.h> 37#include <asm/traps.h>
37#include <asm/cputype.h> 38#include <asm/cputype.h>
38#include <asm/system_misc.h> 39#include <asm/system_misc.h>
39#include <asm/uaccess.h>
40 40
41/* Breakpoint currently in use for each BRP. */ 41/* Breakpoint currently in use for each BRP. */
42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index b2adcce7bc18..6b7dcf4310ac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
314 clear_tsk_thread_flag(p, TIF_SVE); 314 clear_tsk_thread_flag(p, TIF_SVE);
315 p->thread.sve_state = NULL; 315 p->thread.sve_state = NULL;
316 316
317 /*
318 * In case p was allocated the same task_struct pointer as some
319 * other recently-exited task, make sure p is disassociated from
320 * any cpu that may have run that now-exited task recently.
321 * Otherwise we could erroneously skip reloading the FPSIMD
322 * registers for p.
323 */
324 fpsimd_flush_task_state(p);
325
317 if (likely(!(p->flags & PF_KTHREAD))) { 326 if (likely(!(p->flags & PF_KTHREAD))) {
318 *childregs = *current_pt_regs(); 327 *childregs = *current_pt_regs();
319 childregs->regs[0] = 0; 328 childregs->regs[0] = 0;
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index ce704a4aeadd..f407e422a720 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
45 mrs x0, sctlr_el2 45 mrs x0, sctlr_el2
46 ldr x1, =SCTLR_ELx_FLAGS 46 ldr x1, =SCTLR_ELx_FLAGS
47 bic x0, x0, x1 47 bic x0, x0, x1
48 pre_disable_mmu_workaround
48 msr sctlr_el2, x0 49 msr sctlr_el2, x0
49 isb 50 isb
501: 511:
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index dbadfaf850a7..fa63b28c65e0 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
221 } 221 }
222 } 222 }
223} 223}
224
225
226/*
227 * After successfully emulating an instruction, we might want to
228 * return to user space with a KVM_EXIT_DEBUG. We can only do this
229 * once the emulation is complete, though, so for userspace emulations
230 * we have to wait until we have re-entered KVM before calling this
231 * helper.
232 *
233 * Return true (and set exit_reason) to return to userspace or false
234 * if no further action is required.
235 */
236bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
237{
238 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
239 run->exit_reason = KVM_EXIT_DEBUG;
240 run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
241 return true;
242 }
243 return false;
244}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b71247995469..e60494f1eef9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -28,6 +28,7 @@
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_mmu.h> 29#include <asm/kvm_mmu.h>
30#include <asm/kvm_psci.h> 30#include <asm/kvm_psci.h>
31#include <asm/debug-monitors.h>
31 32
32#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
33#include "trace.h" 34#include "trace.h"
@@ -44,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
44 45
45 ret = kvm_psci_call(vcpu); 46 ret = kvm_psci_call(vcpu);
46 if (ret < 0) { 47 if (ret < 0) {
47 kvm_inject_undefined(vcpu); 48 vcpu_set_reg(vcpu, 0, ~0UL);
48 return 1; 49 return 1;
49 } 50 }
50 51
@@ -53,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
53 54
54static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
55{ 56{
56 kvm_inject_undefined(vcpu); 57 vcpu_set_reg(vcpu, 0, ~0UL);
57 return 1; 58 return 1;
58} 59}
59 60
@@ -187,14 +188,46 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
187} 188}
188 189
189/* 190/*
191 * We may be single-stepping an emulated instruction. If the emulation
192 * has been completed in the kernel, we can return to userspace with a
193 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
194 * emulation first.
195 */
196static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
197{
198 int handled;
199
200 /*
201 * See ARM ARM B1.14.1: "Hyp traps on instructions
202 * that fail their condition code check"
203 */
204 if (!kvm_condition_valid(vcpu)) {
205 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
206 handled = 1;
207 } else {
208 exit_handle_fn exit_handler;
209
210 exit_handler = kvm_get_exit_handler(vcpu);
211 handled = exit_handler(vcpu, run);
212 }
213
214 /*
215 * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
216 * structure if we need to return to userspace.
217 */
218 if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
219 handled = 0;
220
221 return handled;
222}
223
224/*
190 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 225 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
191 * proper exit to userspace. 226 * proper exit to userspace.
192 */ 227 */
193int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 228int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
194 int exception_index) 229 int exception_index)
195{ 230{
196 exit_handle_fn exit_handler;
197
198 if (ARM_SERROR_PENDING(exception_index)) { 231 if (ARM_SERROR_PENDING(exception_index)) {
199 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 232 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
200 233
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
220 return 1; 253 return 1;
221 case ARM_EXCEPTION_EL1_SERROR: 254 case ARM_EXCEPTION_EL1_SERROR:
222 kvm_inject_vabt(vcpu); 255 kvm_inject_vabt(vcpu);
223 return 1; 256 /* We may still need to return for single-step */
224 case ARM_EXCEPTION_TRAP: 257 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
225 /* 258 && kvm_arm_handle_step_debug(vcpu, run))
226 * See ARM ARM B1.14.1: "Hyp traps on instructions 259 return 0;
227 * that fail their condition code check" 260 else
228 */
229 if (!kvm_condition_valid(vcpu)) {
230 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
231 return 1; 261 return 1;
232 } 262 case ARM_EXCEPTION_TRAP:
233 263 return handle_trap_exceptions(vcpu, run);
234 exit_handler = kvm_get_exit_handler(vcpu);
235
236 return exit_handler(vcpu, run);
237 case ARM_EXCEPTION_HYP_GONE: 264 case ARM_EXCEPTION_HYP_GONE:
238 /* 265 /*
239 * EL2 has been reset to the hyp-stub. This happens when a guest 266 * EL2 has been reset to the hyp-stub. This happens when a guest
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3f9615582377..870828c364c5 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -151,6 +151,7 @@ reset:
151 mrs x5, sctlr_el2 151 mrs x5, sctlr_el2
152 ldr x6, =SCTLR_ELx_FLAGS 152 ldr x6, =SCTLR_ELx_FLAGS
153 bic x5, x5, x6 // Clear SCTL_M and etc 153 bic x5, x5, x6 // Clear SCTL_M and etc
154 pre_disable_mmu_workaround
154 msr sctlr_el2, x5 155 msr sctlr_el2, x5
155 isb 156 isb
156 157
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 321c9c05dd9e..f4363d40e2cd 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
74{ 74{
75 u64 reg; 75 u64 reg;
76 76
77 /* Clear pmscr in case of early return */
78 *pmscr_el1 = 0;
79
77 /* SPE present on this CPU? */ 80 /* SPE present on this CPU? */
78 if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), 81 if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
79 ID_AA64DFR0_PMSVER_SHIFT)) 82 ID_AA64DFR0_PMSVER_SHIFT))
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f48867..f7c651f3a8c0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 23#include <asm/kvm_hyp.h>
24#include <asm/fpsimd.h> 24#include <asm/fpsimd.h>
25#include <asm/debug-monitors.h>
25 26
26static bool __hyp_text __fpsimd_enabled_nvhe(void) 27static bool __hyp_text __fpsimd_enabled_nvhe(void)
27{ 28{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
269 return true; 270 return true;
270} 271}
271 272
272static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 273/* Skip an instruction which has been emulated. Returns true if
274 * execution can continue or false if we need to exit hyp mode because
275 * single-step was in effect.
276 */
277static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
273{ 278{
274 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 279 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
275 280
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
282 } 287 }
283 288
284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 289 write_sysreg_el2(*vcpu_pc(vcpu), elr);
290
291 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
292 vcpu->arch.fault.esr_el2 =
293 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
294 return false;
295 } else {
296 return true;
297 }
285} 298}
286 299
287int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) 300int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 355 int ret = __vgic_v2_perform_cpuif_access(vcpu);
343 356
344 if (ret == 1) { 357 if (ret == 1) {
345 __skip_instr(vcpu); 358 if (__skip_instr(vcpu))
346 goto again; 359 goto again;
360 else
361 exit_code = ARM_EXCEPTION_TRAP;
347 } 362 }
348 363
349 if (ret == -1) { 364 if (ret == -1) {
350 /* Promote an illegal access to an SError */ 365 /* Promote an illegal access to an
351 __skip_instr(vcpu); 366 * SError. If we would be returning
367 * due to single-step clear the SS
368 * bit so handle_exit knows what to
369 * do after dealing with the error.
370 */
371 if (!__skip_instr(vcpu))
372 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
352 exit_code = ARM_EXCEPTION_EL1_SERROR; 373 exit_code = ARM_EXCEPTION_EL1_SERROR;
353 } 374 }
354 375
@@ -363,8 +384,10 @@ again:
363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 384 int ret = __vgic_v3_perform_cpuif_access(vcpu);
364 385
365 if (ret == 1) { 386 if (ret == 1) {
366 __skip_instr(vcpu); 387 if (__skip_instr(vcpu))
367 goto again; 388 goto again;
389 else
390 exit_code = ARM_EXCEPTION_TRAP;
368 } 391 }
369 392
370 /* 0 falls through to be handled out of EL2 */ 393 /* 0 falls through to be handled out of EL2 */
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index ca74a2aace42..7b60d62ac593 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -389,7 +389,7 @@ void ptdump_check_wx(void)
389 .check_wx = true, 389 .check_wx = true,
390 }; 390 };
391 391
392 walk_pgd(&st, &init_mm, 0); 392 walk_pgd(&st, &init_mm, VA_START);
393 note_page(&st, 0, 0, 0); 393 note_page(&st, 0, 0, 0);
394 if (st.wx_pages || st.uxn_pages) 394 if (st.wx_pages || st.uxn_pages)
395 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", 395 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 22168cd0dde7..9b7f89df49db 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
574{ 574{
575 struct siginfo info; 575 struct siginfo info;
576 const struct fault_info *inf; 576 const struct fault_info *inf;
577 int ret = 0;
578 577
579 inf = esr_to_fault_info(esr); 578 inf = esr_to_fault_info(esr);
580 pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", 579 pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
589 if (interrupts_enabled(regs)) 588 if (interrupts_enabled(regs))
590 nmi_enter(); 589 nmi_enter();
591 590
592 ret = ghes_notify_sea(); 591 ghes_notify_sea();
593 592
594 if (interrupts_enabled(regs)) 593 if (interrupts_enabled(regs))
595 nmi_exit(); 594 nmi_exit();
@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
604 info.si_addr = (void __user *)addr; 603 info.si_addr = (void __user *)addr;
605 arm64_notify_die("", regs, &info, esr); 604 arm64_notify_die("", regs, &info, esr);
606 605
607 return ret; 606 return 0;
608} 607}
609 608
610static const struct fault_info fault_info[] = { 609static const struct fault_info fault_info[] = {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5960bef0170d..00e7b900ca41 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
476 476
477 reserve_elfcorehdr(); 477 reserve_elfcorehdr();
478 478
479 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
480
479 dma_contiguous_reserve(arm64_dma_phys_limit); 481 dma_contiguous_reserve(arm64_dma_phys_limit);
480 482
481 memblock_allow_resize(); 483 memblock_allow_resize();
@@ -502,7 +504,6 @@ void __init bootmem_init(void)
502 sparse_init(); 504 sparse_init();
503 zone_sizes_init(min, max); 505 zone_sizes_init(min, max);
504 506
505 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
506 memblock_dump_all(); 507 memblock_dump_all();
507} 508}
508 509
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ba38d403abb2..bb32f7f6dd0f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
148/* Stack must be multiples of 16B */ 148/* Stack must be multiples of 16B */
149#define STACK_ALIGN(sz) (((sz) + 15) & ~15) 149#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
150 150
151#define PROLOGUE_OFFSET 8 151/* Tail call offset to jump into */
152#define PROLOGUE_OFFSET 7
152 153
153static int build_prologue(struct jit_ctx *ctx) 154static int build_prologue(struct jit_ctx *ctx)
154{ 155{
@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
200 /* Initialize tail_call_cnt */ 201 /* Initialize tail_call_cnt */
201 emit(A64_MOVZ(1, tcc, 0, 0), ctx); 202 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
202 203
203 /* 4 byte extra for skb_copy_bits buffer */
204 ctx->stack_size = prog->aux->stack_depth + 4;
205 ctx->stack_size = STACK_ALIGN(ctx->stack_size);
206
207 /* Set up function call stack */
208 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
209
210 cur_offset = ctx->idx - idx0; 204 cur_offset = ctx->idx - idx0;
211 if (cur_offset != PROLOGUE_OFFSET) { 205 if (cur_offset != PROLOGUE_OFFSET) {
212 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", 206 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
213 cur_offset, PROLOGUE_OFFSET); 207 cur_offset, PROLOGUE_OFFSET);
214 return -1; 208 return -1;
215 } 209 }
210
211 /* 4 byte extra for skb_copy_bits buffer */
212 ctx->stack_size = prog->aux->stack_depth + 4;
213 ctx->stack_size = STACK_ALIGN(ctx->stack_size);
214
215 /* Set up function call stack */
216 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
216 return 0; 217 return 0;
217} 218}
218 219
@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
260 emit(A64_LDR64(prg, tmp, prg), ctx); 261 emit(A64_LDR64(prg, tmp, prg), ctx);
261 emit(A64_CBZ(1, prg, jmp_offset), ctx); 262 emit(A64_CBZ(1, prg, jmp_offset), ctx);
262 263
263 /* goto *(prog->bpf_func + prologue_size); */ 264 /* goto *(prog->bpf_func + prologue_offset); */
264 off = offsetof(struct bpf_prog, bpf_func); 265 off = offsetof(struct bpf_prog, bpf_func);
265 emit_a64_mov_i64(tmp, off, ctx); 266 emit_a64_mov_i64(tmp, off, ctx);
266 emit(A64_LDR64(tmp, prg, tmp), ctx); 267 emit(A64_LDR64(tmp, prg, tmp), ctx);
267 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); 268 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
269 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
268 emit(A64_BR(tmp), ctx); 270 emit(A64_BR(tmp), ctx);
269 271
270 /* out: */ 272 /* out: */
diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild
index aa624b4ab655..2240b38c2915 100644
--- a/arch/blackfin/include/uapi/asm/Kbuild
+++ b/arch/blackfin/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += ioctl.h 8generic-y += ioctl.h
8generic-y += ipcbuf.h 9generic-y += ipcbuf.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 67ee896a76a7..26644e15d854 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 3687b54bb18e..3470c6e9c7b9 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild
index b15bf6bc0e94..14a2e9af97e9 100644
--- a/arch/frv/include/uapi/asm/Kbuild
+++ b/arch/frv/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3
4generic-y += bpf_perf_event.h
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 187aed820e71..2f65f78792cb 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bpf_perf_event.h
5generic-y += errno.h 6generic-y += errno.h
6generic-y += fcntl.h 7generic-y += fcntl.h
7generic-y += ioctl.h 8generic-y += ioctl.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index cb5df3aad3a8..41a176dbb53e 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bpf_perf_event.h
5generic-y += errno.h 6generic-y += errno.h
6generic-y += fcntl.h 7generic-y += fcntl.h
7generic-y += ioctl.h 8generic-y += ioctl.h
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 28e02c99be6d..762eeb0fcc1d 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
65ATOMIC_OPS(add, +) 65ATOMIC_OPS(add, +)
66ATOMIC_OPS(sub, -) 66ATOMIC_OPS(sub, -)
67 67
68#define atomic_add_return(i,v) \ 68#ifdef __OPTIMIZE__
69#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
70 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
71 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
72
73#define atomic_add_return(i, v) \
69({ \ 74({ \
70 int __ia64_aar_i = (i); \ 75 int __i = (i); \
71 (__builtin_constant_p(i) \ 76 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
72 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 77 __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
73 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 78 ia64_atomic_add(__i, v); \
74 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
75 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
76 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
77 : ia64_atomic_add(__ia64_aar_i, v); \
78}) 79})
79 80
80#define atomic_sub_return(i,v) \ 81#define atomic_sub_return(i, v) \
81({ \ 82({ \
82 int __ia64_asr_i = (i); \ 83 int __i = (i); \
83 (__builtin_constant_p(i) \ 84 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
84 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 85 __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
85 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 86 ia64_atomic_sub(__i, v); \
86 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
87 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
88 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
89 : ia64_atomic_sub(__ia64_asr_i, v); \
90}) 87})
88#else
89#define atomic_add_return(i, v) ia64_atomic_add(i, v)
90#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
91#endif
91 92
92#define atomic_fetch_add(i,v) \ 93#define atomic_fetch_add(i,v) \
93({ \ 94({ \
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 13a97aa2285f..f5c6967a93bb 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += kvm_para.h 5generic-y += kvm_para.h
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c6ecb97151a2..9025699049ca 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
88 } 88 }
89 89
90 if (ti->softirq_time) { 90 if (ti->softirq_time) {
91 delta = cycle_to_nsec(ti->softirq_time)); 91 delta = cycle_to_nsec(ti->softirq_time);
92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); 92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
93 } 93 }
94 94
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index 1c44d3b3eba0..451bf6071c6e 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,5 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += kvm_para.h 5generic-y += kvm_para.h
5generic-y += siginfo.h 6generic-y += siginfo.h
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index cb79fba79d43..b88a8dd14933 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -122,7 +122,6 @@ void abort(void)
122 /* if that doesn't kill us, halt */ 122 /* if that doesn't kill us, halt */
123 panic("Oops failed to kill thread"); 123 panic("Oops failed to kill thread");
124} 124}
125EXPORT_SYMBOL(abort);
126 125
127void __init trap_init(void) 126void __init trap_init(void)
128{ 127{
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 55e55dbc2fb6..3d07b1de7eb0 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_NAMESPACES=y 6CONFIG_NAMESPACES=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_INITRAMFS_SOURCE="../uClinux-dist/romfs"
9# CONFIG_RD_BZIP2 is not set 8# CONFIG_RD_BZIP2 is not set
10# CONFIG_RD_LZMA is not set 9# CONFIG_RD_LZMA is not set
11# CONFIG_RD_XZ is not set 10# CONFIG_RD_XZ is not set
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 3717b64a620d..c2e26a44c482 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += ioctl.h 8generic-y += ioctl.h
8generic-y += ipcbuf.h 9generic-y += ipcbuf.h
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 3aa571a513b5..cf6edda38971 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -45,6 +45,8 @@ SECTIONS {
45 .text : { 45 .text : {
46 HEAD_TEXT 46 HEAD_TEXT
47 TEXT_TEXT 47 TEXT_TEXT
48 IRQENTRY_TEXT
49 SOFTIRQENTRY_TEXT
48 SCHED_TEXT 50 SCHED_TEXT
49 CPUIDLE_TEXT 51 CPUIDLE_TEXT
50 LOCK_TEXT 52 LOCK_TEXT
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 89172b8974b9..625a5785804f 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -16,6 +16,8 @@ SECTIONS
16 .text : { 16 .text : {
17 HEAD_TEXT 17 HEAD_TEXT
18 TEXT_TEXT 18 TEXT_TEXT
19 IRQENTRY_TEXT
20 SOFTIRQENTRY_TEXT
19 SCHED_TEXT 21 SCHED_TEXT
20 CPUIDLE_TEXT 22 CPUIDLE_TEXT
21 LOCK_TEXT 23 LOCK_TEXT
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 293990efc917..9868270b0984 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -16,6 +16,8 @@ SECTIONS
16 .text : { 16 .text : {
17 HEAD_TEXT 17 HEAD_TEXT
18 TEXT_TEXT 18 TEXT_TEXT
19 IRQENTRY_TEXT
20 SOFTIRQENTRY_TEXT
19 SCHED_TEXT 21 SCHED_TEXT
20 CPUIDLE_TEXT 22 CPUIDLE_TEXT
21 LOCK_TEXT 23 LOCK_TEXT
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index 6ac763d9a3e3..f9eaf07d29f8 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 06609ca36115..2c6a6bffea32 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bitsperlong.h 4generic-y += bitsperlong.h
5generic-y += bpf_perf_event.h
5generic-y += errno.h 6generic-y += errno.h
6generic-y += fcntl.h 7generic-y += fcntl.h
7generic-y += ioctl.h 8generic-y += ioctl.h
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 350a990fc719..8e0b3702f1c0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -259,6 +259,7 @@ config BCM47XX
259 select LEDS_GPIO_REGISTER 259 select LEDS_GPIO_REGISTER
260 select BCM47XX_NVRAM 260 select BCM47XX_NVRAM
261 select BCM47XX_SPROM 261 select BCM47XX_SPROM
262 select BCM47XX_SSB if !BCM47XX_BCMA
262 help 263 help
263 Support for BCM47XX based boards 264 Support for BCM47XX based boards
264 265
@@ -389,6 +390,7 @@ config LANTIQ
389 select SYS_SUPPORTS_32BIT_KERNEL 390 select SYS_SUPPORTS_32BIT_KERNEL
390 select SYS_SUPPORTS_MIPS16 391 select SYS_SUPPORTS_MIPS16
391 select SYS_SUPPORTS_MULTITHREADING 392 select SYS_SUPPORTS_MULTITHREADING
393 select SYS_SUPPORTS_VPE_LOADER
392 select SYS_HAS_EARLY_PRINTK 394 select SYS_HAS_EARLY_PRINTK
393 select GPIOLIB 395 select GPIOLIB
394 select SWAP_IO_SPACE 396 select SWAP_IO_SPACE
@@ -516,6 +518,7 @@ config MIPS_MALTA
516 select SYS_SUPPORTS_MIPS16 518 select SYS_SUPPORTS_MIPS16
517 select SYS_SUPPORTS_MULTITHREADING 519 select SYS_SUPPORTS_MULTITHREADING
518 select SYS_SUPPORTS_SMARTMIPS 520 select SYS_SUPPORTS_SMARTMIPS
521 select SYS_SUPPORTS_VPE_LOADER
519 select SYS_SUPPORTS_ZBOOT 522 select SYS_SUPPORTS_ZBOOT
520 select SYS_SUPPORTS_RELOCATABLE 523 select SYS_SUPPORTS_RELOCATABLE
521 select USE_OF 524 select USE_OF
@@ -2281,9 +2284,16 @@ config MIPSR2_TO_R6_EMULATOR
2281 The only reason this is a build-time option is to save ~14K from the 2284 The only reason this is a build-time option is to save ~14K from the
2282 final kernel image. 2285 final kernel image.
2283 2286
2287config SYS_SUPPORTS_VPE_LOADER
2288 bool
2289 depends on SYS_SUPPORTS_MULTITHREADING
2290 help
2291 Indicates that the platform supports the VPE loader, and provides
2292 physical_memsize.
2293
2284config MIPS_VPE_LOADER 2294config MIPS_VPE_LOADER
2285 bool "VPE loader support." 2295 bool "VPE loader support."
2286 depends on SYS_SUPPORTS_MULTITHREADING && MODULES 2296 depends on SYS_SUPPORTS_VPE_LOADER && MODULES
2287 select CPU_MIPSR2_IRQ_VI 2297 select CPU_MIPSR2_IRQ_VI
2288 select CPU_MIPSR2_IRQ_EI 2298 select CPU_MIPSR2_IRQ_EI
2289 select MIPS_MT 2299 select MIPS_MT
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 464af5e025d6..0749c3724543 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
124 124
125 If unsure, say N. 125 If unsure, say N.
126 126
127menuconfig MIPS_CPS_NS16550 127menuconfig MIPS_CPS_NS16550_BOOL
128 bool "CPS SMP NS16550 UART output" 128 bool "CPS SMP NS16550 UART output"
129 depends on MIPS_CPS 129 depends on MIPS_CPS
130 help 130 help
131 Output debug information via an ns16550 compatible UART if exceptions 131 Output debug information via an ns16550 compatible UART if exceptions
132 occur early in the boot process of a secondary core. 132 occur early in the boot process of a secondary core.
133 133
134if MIPS_CPS_NS16550 134if MIPS_CPS_NS16550_BOOL
135
136config MIPS_CPS_NS16550
137 def_bool MIPS_CPS_NS16550_BASE != 0
135 138
136config MIPS_CPS_NS16550_BASE 139config MIPS_CPS_NS16550_BASE
137 hex "UART Base Address" 140 hex "UART Base Address"
138 default 0x1b0003f8 if MIPS_MALTA 141 default 0x1b0003f8 if MIPS_MALTA
142 default 0
139 help 143 help
140 The base address of the ns16550 compatible UART on which to output 144 The base address of the ns16550 compatible UART on which to output
141 debug information from the early stages of core startup. 145 debug information from the early stages of core startup.
142 146
147 This is only used if non-zero.
148
143config MIPS_CPS_NS16550_SHIFT 149config MIPS_CPS_NS16550_SHIFT
144 int "UART Register Shift" 150 int "UART Register Shift"
145 default 0 if MIPS_MALTA 151 default 0
146 help 152 help
147 The number of bits to shift ns16550 register indices by in order to 153 The number of bits to shift ns16550 register indices by in order to
148 form their addresses. That is, log base 2 of the span between 154 form their addresses. That is, log base 2 of the span between
149 adjacent ns16550 registers in the system. 155 adjacent ns16550 registers in the system.
150 156
151endif # MIPS_CPS_NS16550 157endif # MIPS_CPS_NS16550_BOOL
152 158
153endmenu 159endmenu
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 4674f1efbe7a..e1675c25d5d4 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
575 uart_port.type = PORT_AR7; 575 uart_port.type = PORT_AR7;
576 uart_port.uartclk = clk_get_rate(bus_clk) / 2; 576 uart_port.uartclk = clk_get_rate(bus_clk) / 2;
577 uart_port.iotype = UPIO_MEM32; 577 uart_port.iotype = UPIO_MEM32;
578 uart_port.flags = UPF_FIXED_TYPE; 578 uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
579 uart_port.regshift = 2; 579 uart_port.regshift = 2;
580 580
581 uart_port.line = 0; 581 uart_port.line = 0;
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
index e1156347da53..301a9028273c 100644
--- a/arch/mips/ath25/devices.c
+++ b/arch/mips/ath25/devices.c
@@ -73,6 +73,7 @@ const char *get_system_type(void)
73 73
74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk) 74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
75{ 75{
76#ifdef CONFIG_SERIAL_8250_CONSOLE
76 struct uart_port s; 77 struct uart_port s;
77 78
78 memset(&s, 0, sizeof(s)); 79 memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
85 s.uartclk = uartclk; 86 s.uartclk = uartclk;
86 87
87 early_serial_setup(&s); 88 early_serial_setup(&s);
89#endif /* CONFIG_SERIAL_8250_CONSOLE */
88} 90}
89 91
90int __init ath25_add_wmac(int nr, u32 base, int irq) 92int __init ath25_add_wmac(int nr, u32 base, int irq)
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 7c8aab23bce8..b1f66699677d 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += qrwlock.h
16generic-y += qspinlock.h 16generic-y += qspinlock.h
17generic-y += sections.h 17generic-y += sections.h
18generic-y += segment.h 18generic-y += segment.h
19generic-y += serial.h
20generic-y += trace_clock.h 19generic-y += trace_clock.h
21generic-y += unaligned.h 20generic-y += unaligned.h
22generic-y += user.h 21generic-y += user.h
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
new file mode 100644
index 000000000000..1d830c6666c2
--- /dev/null
+++ b/arch/mips/include/asm/serial.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2017 MIPS Tech, LLC
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9#ifndef __ASM__SERIAL_H
10#define __ASM__SERIAL_H
11
12#ifdef CONFIG_MIPS_GENERIC
13/*
14 * Generic kernels cannot know a correct value for all platforms at
15 * compile time. Set it to 0 to prevent 8250_early using it
16 */
17#define BASE_BAUD 0
18#else
19#include <asm-generic/serial.h>
20#endif
21
22#endif /* __ASM__SERIAL_H */
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index a0266feba9e6..7a4becd8963a 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += ipcbuf.h 5generic-y += ipcbuf.h
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index c7ed26029cbb..e68e6e04063a 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
235 has_mt t0, 3f 235 has_mt t0, 3f
236 236
237 .set push 237 .set push
238 .set MIPS_ISA_LEVEL_RAW
238 .set mt 239 .set mt
239 240
240 /* Only allow 1 TC per VPE to execute... */ 241 /* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
388#elif defined(CONFIG_MIPS_MT) 389#elif defined(CONFIG_MIPS_MT)
389 390
390 .set push 391 .set push
392 .set MIPS_ISA_LEVEL_RAW
391 .set mt 393 .set mt
392 394
393 /* If the core doesn't support MT then return */ 395 /* If the core doesn't support MT then return */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index dd5567b1e305..8f5bd04f320a 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
292 *this_cpu_ptr(&cm_core_lock_flags)); 292 *this_cpu_ptr(&cm_core_lock_flags));
293 } else { 293 } else {
294 WARN_ON(cluster != 0); 294 WARN_ON(cluster != 0);
295 WARN_ON(vp != 0);
296 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); 295 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
297 296
298 /* 297 /*
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 45d0b6b037ee..57028d49c202 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
705 struct task_struct *t; 705 struct task_struct *t;
706 int max_users; 706 int max_users;
707 707
708 /* If nothing to change, return right away, successfully. */
709 if (value == mips_get_process_fp_mode(task))
710 return 0;
711
712 /* Only accept a mode change if 64-bit FP enabled for o32. */
713 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
714 return -EOPNOTSUPP;
715
716 /* And only for o32 tasks. */
717 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
718 return -EOPNOTSUPP;
719
708 /* Check the value is valid */ 720 /* Check the value is valid */
709 if (value & ~known_bits) 721 if (value & ~known_bits)
710 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index efbd8df8b665..0b23b1ad99e6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
419 419
420#endif /* CONFIG_64BIT */ 420#endif /* CONFIG_64BIT */
421 421
422/*
423 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
424 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
425 * correspond 1:1 to buffer slots. Only general registers are copied.
426 */
427static int fpr_get_fpa(struct task_struct *target,
428 unsigned int *pos, unsigned int *count,
429 void **kbuf, void __user **ubuf)
430{
431 return user_regset_copyout(pos, count, kbuf, ubuf,
432 &target->thread.fpu,
433 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
434}
435
436/*
437 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
438 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
439 * general register slots are copied to buffer slots. Only general
440 * registers are copied.
441 */
442static int fpr_get_msa(struct task_struct *target,
443 unsigned int *pos, unsigned int *count,
444 void **kbuf, void __user **ubuf)
445{
446 unsigned int i;
447 u64 fpr_val;
448 int err;
449
450 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
451 for (i = 0; i < NUM_FPU_REGS; i++) {
452 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
453 err = user_regset_copyout(pos, count, kbuf, ubuf,
454 &fpr_val, i * sizeof(elf_fpreg_t),
455 (i + 1) * sizeof(elf_fpreg_t));
456 if (err)
457 return err;
458 }
459
460 return 0;
461}
462
463/*
464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
465 * Choose the appropriate helper for general registers, and then copy
466 * the FCSR register separately.
467 */
422static int fpr_get(struct task_struct *target, 468static int fpr_get(struct task_struct *target,
423 const struct user_regset *regset, 469 const struct user_regset *regset,
424 unsigned int pos, unsigned int count, 470 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf) 471 void *kbuf, void __user *ubuf)
426{ 472{
427 unsigned i; 473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
428 int err; 474 int err;
429 u64 fpr_val;
430 475
431 /* XXX fcr31 */ 476 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
477 err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
478 else
479 err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
480 if (err)
481 return err;
432 482
433 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 483 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
434 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 484 &target->thread.fpu.fcr31,
435 &target->thread.fpu, 485 fcr31_pos, fcr31_pos + sizeof(u32));
436 0, sizeof(elf_fpregset_t));
437 486
438 for (i = 0; i < NUM_FPU_REGS; i++) { 487 return err;
439 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 488}
440 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 489
441 &fpr_val, i * sizeof(elf_fpreg_t), 490/*
442 (i + 1) * sizeof(elf_fpreg_t)); 491 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
492 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
493 * context's general register slots. Only general registers are copied.
494 */
495static int fpr_set_fpa(struct task_struct *target,
496 unsigned int *pos, unsigned int *count,
497 const void **kbuf, const void __user **ubuf)
498{
499 return user_regset_copyin(pos, count, kbuf, ubuf,
500 &target->thread.fpu,
501 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
502}
503
504/*
505 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
506 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
507 * bits only of FP context's general register slots. Only general
508 * registers are copied.
509 */
510static int fpr_set_msa(struct task_struct *target,
511 unsigned int *pos, unsigned int *count,
512 const void **kbuf, const void __user **ubuf)
513{
514 unsigned int i;
515 u64 fpr_val;
516 int err;
517
518 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
519 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
520 err = user_regset_copyin(pos, count, kbuf, ubuf,
521 &fpr_val, i * sizeof(elf_fpreg_t),
522 (i + 1) * sizeof(elf_fpreg_t));
443 if (err) 523 if (err)
444 return err; 524 return err;
525 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
445 } 526 }
446 527
447 return 0; 528 return 0;
448} 529}
449 530
531/*
532 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
533 * Choose the appropriate helper for general registers, and then copy
534 * the FCSR register separately.
535 *
536 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
537 * which is supposed to have been guaranteed by the kernel before
538 * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
539 * so that we can safely avoid preinitializing temporaries for
540 * partial register writes.
541 */
450static int fpr_set(struct task_struct *target, 542static int fpr_set(struct task_struct *target,
451 const struct user_regset *regset, 543 const struct user_regset *regset,
452 unsigned int pos, unsigned int count, 544 unsigned int pos, unsigned int count,
453 const void *kbuf, const void __user *ubuf) 545 const void *kbuf, const void __user *ubuf)
454{ 546{
455 unsigned i; 547 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
548 u32 fcr31;
456 int err; 549 int err;
457 u64 fpr_val;
458 550
459 /* XXX fcr31 */ 551 BUG_ON(count % sizeof(elf_fpreg_t));
552
553 if (pos + count > sizeof(elf_fpregset_t))
554 return -EIO;
460 555
461 init_fp_ctx(target); 556 init_fp_ctx(target);
462 557
463 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 558 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
464 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 559 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
465 &target->thread.fpu, 560 else
466 0, sizeof(elf_fpregset_t)); 561 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
562 if (err)
563 return err;
467 564
468 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 565 if (count > 0) {
469 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
470 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 566 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
471 &fpr_val, i * sizeof(elf_fpreg_t), 567 &fcr31,
472 (i + 1) * sizeof(elf_fpreg_t)); 568 fcr31_pos, fcr31_pos + sizeof(u32));
473 if (err) 569 if (err)
474 return err; 570 return err;
475 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 571
572 ptrace_setfcr31(target, fcr31);
476 } 573 }
477 574
478 return 0; 575 return err;
479} 576}
480 577
481enum mips_regset { 578enum mips_regset {
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 78c2affeabf8..e84e12655fa8 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o 16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
17 17
18# libgcc-style stuff needed in the kernel 18# libgcc-style stuff needed in the kernel
19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o 19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
20 ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 28002ed90c2c..199a7f96282f 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
10struct DWstruct { 10struct DWstruct {
11 int high, low; 11 int high, low;
12}; 12};
13
14struct TWstruct {
15 long long high, low;
16};
13#elif defined(__LITTLE_ENDIAN) 17#elif defined(__LITTLE_ENDIAN)
14struct DWstruct { 18struct DWstruct {
15 int low, high; 19 int low, high;
16}; 20};
21
22struct TWstruct {
23 long long low, high;
24};
17#else 25#else
18#error I feel sick. 26#error I feel sick.
19#endif 27#endif
@@ -23,4 +31,13 @@ typedef union {
23 long long ll; 31 long long ll;
24} DWunion; 32} DWunion;
25 33
34#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
35typedef int ti_type __attribute__((mode(TI)));
36
37typedef union {
38 struct TWstruct s;
39 ti_type ti;
40} TWunion;
41#endif
42
26#endif /* __ASM_LIBGCC_H */ 43#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3
4#include "libgcc.h"
5
6/*
7 * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
8 * specific case only we'll implement it here.
9 *
10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
11 */
12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
13
14/* multiply 64-bit values, low 64-bits returned */
15static inline long long notrace dmulu(long long a, long long b)
16{
17 long long res;
18
19 asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
20 return res;
21}
22
23/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
24static inline long long notrace dmuhu(long long a, long long b)
25{
26 long long res;
27
28 asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
29 return res;
30}
31
32/* multiply 128-bit values, low 128-bits returned */
33ti_type notrace __multi3(ti_type a, ti_type b)
34{
35 TWunion res, aa, bb;
36
37 aa.ti = a;
38 bb.ti = b;
39
40 /*
41 * a * b = (a.lo * b.lo)
42 * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
43 * [+ 2^128 * (a.hi * b.hi)]
44 */
45 res.s.low = dmulu(aa.s.low, bb.s.low);
46 res.s.high = dmuhu(aa.s.low, bb.s.low);
47 res.s.high += dmulu(aa.s.high, bb.s.low);
48 res.s.high += dmulu(aa.s.low, bb.s.high);
49
50 return res.ti;
51}
52EXPORT_SYMBOL(__multi3);
53
54#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index cdb5a191b9d5..9bb6baa45da3 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -40,7 +40,7 @@
40 40
41#include "uasm.c" 41#include "uasm.c"
42 42
43static const struct insn const insn_table_MM[insn_invalid] = { 43static const struct insn insn_table_MM[insn_invalid] = {
44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, 44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, 46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index d4469b20d176..4f46a4509f79 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
109 } 109 }
110 110
111 rt->irq = platform_get_irq(pdev, 0); 111 rt->irq = platform_get_irq(pdev, 0);
112 if (!rt->irq) { 112 if (rt->irq < 0) {
113 dev_err(&pdev->dev, "failed to load irq\n"); 113 dev_err(&pdev->dev, "failed to load irq\n");
114 return -ENOENT; 114 return rt->irq;
115 } 115 }
116 116
117 rt->membase = devm_ioremap_resource(&pdev->dev, res); 117 rt->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/mips/rb532/Makefile b/arch/mips/rb532/Makefile
index efdecdb6e3ea..8186afca2234 100644
--- a/arch/mips/rb532/Makefile
+++ b/arch/mips/rb532/Makefile
@@ -2,4 +2,6 @@
2# Makefile for the RB532 board specific parts of the kernel 2# Makefile for the RB532 board specific parts of the kernel
3# 3#
4 4
5obj-y += irq.o time.o setup.o serial.o prom.o gpio.o devices.o 5obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
6
7obj-y += irq.o time.o setup.o prom.o gpio.o devices.o
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 32ea3e6731d6..354d258396ff 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); 310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
311} 311}
312 312
313#ifdef CONFIG_NET
314
313static int __init setup_kmac(char *s) 315static int __init setup_kmac(char *s)
314{ 316{
315 printk(KERN_INFO "korina mac = %s\n", s); 317 printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
322 324
323__setup("kmac=", setup_kmac); 325__setup("kmac=", setup_kmac);
324 326
327#endif /* CONFIG_NET */
328
325arch_initcall(plat_setup_devices); 329arch_initcall(plat_setup_devices);
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index c94ee54210bc..81271d3af47c 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += siginfo.h 5generic-y += siginfo.h
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index ffca24da7647..13a3d77b4d7b 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 62286dbeb904..130c16ccba0a 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index 9345b44b86f0..f57118e1f6b4 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -123,8 +123,8 @@ int puts(const char *s)
123 while ((nuline = strchr(s, '\n')) != NULL) { 123 while ((nuline = strchr(s, '\n')) != NULL) {
124 if (nuline != s) 124 if (nuline != s)
125 pdc_iodc_print(s, nuline - s); 125 pdc_iodc_print(s, nuline - s);
126 pdc_iodc_print("\r\n", 2); 126 pdc_iodc_print("\r\n", 2);
127 s = nuline + 1; 127 s = nuline + 1;
128 } 128 }
129 if (*s != '\0') 129 if (*s != '\0')
130 pdc_iodc_print(s, strlen(s)); 130 pdc_iodc_print(s, strlen(s));
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index dd5a08aaa4da..3eb4bfc1fb36 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -12,6 +12,7 @@
12 for the semaphore. */ 12 for the semaphore. */
13 13
14#define __PA_LDCW_ALIGNMENT 16 14#define __PA_LDCW_ALIGNMENT 16
15#define __PA_LDCW_ALIGN_ORDER 4
15#define __ldcw_align(a) ({ \ 16#define __ldcw_align(a) ({ \
16 unsigned long __ret = (unsigned long) &(a)->lock[0]; \ 17 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
17 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ 18 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
@@ -29,6 +30,7 @@
29 ldcd). */ 30 ldcd). */
30 31
31#define __PA_LDCW_ALIGNMENT 4 32#define __PA_LDCW_ALIGNMENT 4
33#define __PA_LDCW_ALIGN_ORDER 2
32#define __ldcw_align(a) (&(a)->slock) 34#define __ldcw_align(a) (&(a)->slock)
33#define __LDCW "ldcw,co" 35#define __LDCW "ldcw,co"
34 36
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index c980a02a52bc..598c8d60fa5e 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -35,7 +35,12 @@ struct thread_info {
35 35
36/* thread information allocation */ 36/* thread information allocation */
37 37
38#ifdef CONFIG_IRQSTACKS
39#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
40#else
38#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ 41#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
42#endif
43
39/* Be sure to hunt all references to this down when you change the size of 44/* Be sure to hunt all references to this down when you change the size of
40 * the kernel stack */ 45 * the kernel stack */
41#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 46#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 196d2a4efb31..286ef5a5904b 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bpf_perf_event.h
5generic-y += kvm_para.h 6generic-y += kvm_para.h
6generic-y += param.h 7generic-y += param.h
7generic-y += poll.h 8generic-y += poll.h
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index d8f77358e2ba..29b99b8964aa 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
870 static int count; 870 static int count;
871 871
872 print_pa_hwpath(dev, hw_path); 872 print_pa_hwpath(dev, hw_path);
873 printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", 873 printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, 874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); 875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
876 876
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index a4fd296c958e..e95207c0565e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,6 +35,7 @@
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/signal.h> 36#include <asm/signal.h>
37#include <asm/unistd.h> 37#include <asm/unistd.h>
38#include <asm/ldcw.h>
38#include <asm/thread_info.h> 39#include <asm/thread_info.h>
39 40
40#include <linux/linkage.h> 41#include <linux/linkage.h>
@@ -46,6 +47,14 @@
46#endif 47#endif
47 48
48 .import pa_tlb_lock,data 49 .import pa_tlb_lock,data
50 .macro load_pa_tlb_lock reg
51#if __PA_LDCW_ALIGNMENT > 4
52 load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
53 depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
54#else
55 load32 PA(pa_tlb_lock), \reg
56#endif
57 .endm
49 58
50 /* space_to_prot macro creates a prot id from a space id */ 59 /* space_to_prot macro creates a prot id from a space id */
51 60
@@ -457,7 +466,7 @@
457 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 466 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP 467#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f 468 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_tlb_lock),\tmp 469 load_pa_tlb_lock \tmp
4611: LDCW 0(\tmp),\tmp1 4701: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b 471 cmpib,COND(=) 0,\tmp1,1b
463 nop 472 nop
@@ -480,7 +489,7 @@
480 /* Release pa_tlb_lock lock. */ 489 /* Release pa_tlb_lock lock. */
481 .macro tlb_unlock1 spc,tmp 490 .macro tlb_unlock1 spc,tmp
482#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
483 load32 PA(pa_tlb_lock),\tmp 492 load_pa_tlb_lock \tmp
484 tlb_unlock0 \spc,\tmp 493 tlb_unlock0 \spc,\tmp
485#endif 494#endif
486 .endm 495 .endm
@@ -878,9 +887,6 @@ ENTRY_CFI(syscall_exit_rfi)
878 STREG %r19,PT_SR7(%r16) 887 STREG %r19,PT_SR7(%r16)
879 888
880intr_return: 889intr_return:
881 /* NOTE: Need to enable interrupts incase we schedule. */
882 ssm PSW_SM_I, %r0
883
884 /* check for reschedule */ 890 /* check for reschedule */
885 mfctl %cr30,%r1 891 mfctl %cr30,%r1
886 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 892 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +913,11 @@ intr_check_sig:
907 LDREG PT_IASQ1(%r16), %r20 913 LDREG PT_IASQ1(%r16), %r20
908 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 914 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
909 915
916 /* NOTE: We need to enable interrupts if we have to deliver
917 * signals. We used to do this earlier but it caused kernel
918 * stack overflows. */
919 ssm PSW_SM_I, %r0
920
910 copy %r0, %r25 /* long in_syscall = 0 */ 921 copy %r0, %r25 /* long in_syscall = 0 */
911#ifdef CONFIG_64BIT 922#ifdef CONFIG_64BIT
912 ldo -16(%r30),%r29 /* Reference param save area */ 923 ldo -16(%r30),%r29 /* Reference param save area */
@@ -958,6 +969,10 @@ intr_do_resched:
958 cmpib,COND(=) 0, %r20, intr_do_preempt 969 cmpib,COND(=) 0, %r20, intr_do_preempt
959 nop 970 nop
960 971
972 /* NOTE: We need to enable interrupts if we schedule. We used
973 * to do this earlier but it caused kernel stack overflows. */
974 ssm PSW_SM_I, %r0
975
961#ifdef CONFIG_64BIT 976#ifdef CONFIG_64BIT
962 ldo -16(%r30),%r29 /* Reference param save area */ 977 ldo -16(%r30),%r29 /* Reference param save area */
963#endif 978#endif
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index e3a8e5e4d5de..8d072c44f300 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
305 305
306 306
307 __INITRODATA 307 __INITRODATA
308 .align 4
308 .export os_hpmc_size 309 .export os_hpmc_size
309os_hpmc_size: 310os_hpmc_size:
310 .word .os_hpmc_end-.os_hpmc 311 .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187f8951..2d40c4ff3f69 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -36,6 +36,7 @@
36#include <asm/assembly.h> 36#include <asm/assembly.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/cache.h> 38#include <asm/cache.h>
39#include <asm/ldcw.h>
39#include <linux/linkage.h> 40#include <linux/linkage.h>
40 41
41 .text 42 .text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
333 334
334 .macro tlb_lock la,flags,tmp 335 .macro tlb_lock la,flags,tmp
335#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
336 ldil L%pa_tlb_lock,%r1 337#if __PA_LDCW_ALIGNMENT > 4
337 ldo R%pa_tlb_lock(%r1),\la 338 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
339 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
340#else
341 load32 pa_tlb_lock, \la
342#endif
338 rsm PSW_SM_I,\flags 343 rsm PSW_SM_I,\flags
3391: LDCW 0(\la),\tmp 3441: LDCW 0(\la),\tmp
340 cmpib,<>,n 0,\tmp,3f 345 cmpib,<>,n 0,\tmp,3f
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 30f92391a93e..cad3e8661cd6 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -39,6 +39,7 @@
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/mm.h> 40#include <linux/mm.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/cpu.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/personality.h> 44#include <linux/personality.h>
44#include <linux/ptrace.h> 45#include <linux/ptrace.h>
@@ -184,6 +185,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
184} 185}
185 186
186/* 187/*
188 * Idle thread support
189 *
190 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
191 * QEMU idle the host too.
192 */
193
194int running_on_qemu __read_mostly;
195
196void __cpuidle arch_cpu_idle_dead(void)
197{
198 /* nop on real hardware, qemu will offline CPU. */
199 asm volatile("or %%r31,%%r31,%%r31\n":::);
200}
201
202void __cpuidle arch_cpu_idle(void)
203{
204 local_irq_enable();
205
206 /* nop on real hardware, qemu will idle sleep. */
207 asm volatile("or %%r10,%%r10,%%r10\n":::);
208}
209
210static int __init parisc_idle_init(void)
211{
212 const char *marker;
213
214 /* check QEMU/SeaBIOS marker in PAGE0 */
215 marker = (char *) &PAGE0->pad0;
216 running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
217
218 if (!running_on_qemu)
219 cpu_idle_poll_ctrl(1);
220
221 return 0;
222}
223arch_initcall(parisc_idle_init);
224
225/*
187 * Copy architecture-specific thread state 226 * Copy architecture-specific thread state
188 */ 227 */
189int 228int
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 5a657986ebbf..143f90e2f9f3 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -15,7 +15,6 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18#include <linux/sched.h>
19 18
20#include <linux/uaccess.h> 19#include <linux/uaccess.h>
21#include <asm/assembly.h> 20#include <asm/assembly.h>
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
index 7eab4bb8abe6..66e506520505 100644
--- a/arch/parisc/lib/delay.c
+++ b/arch/parisc/lib/delay.c
@@ -16,9 +16,7 @@
16#include <linux/preempt.h> 16#include <linux/preempt.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19#include <asm/processor.h>
20#include <asm/delay.h> 19#include <asm/delay.h>
21
22#include <asm/special_insns.h> /* for mfctl() */ 20#include <asm/special_insns.h> /* for mfctl() */
23#include <asm/processor.h> /* for boot_cpu_data */ 21#include <asm/processor.h> /* for boot_cpu_data */
24 22
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 13f7854e0d49..48f41399fc0b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -631,11 +631,11 @@ void __init mem_init(void)
631 mem_init_print_info(NULL); 631 mem_init_print_info(NULL);
632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ 632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
633 printk("virtual kernel memory layout:\n" 633 printk("virtual kernel memory layout:\n"
634 " vmalloc : 0x%p - 0x%p (%4ld MB)\n" 634 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
635 " memory : 0x%p - 0x%p (%4ld MB)\n" 635 " memory : 0x%px - 0x%px (%4ld MB)\n"
636 " .init : 0x%p - 0x%p (%4ld kB)\n" 636 " .init : 0x%px - 0x%px (%4ld kB)\n"
637 " .data : 0x%p - 0x%p (%4ld kB)\n" 637 " .data : 0x%px - 0x%px (%4ld kB)\n"
638 " .text : 0x%p - 0x%p (%4ld kB)\n", 638 " .text : 0x%px - 0x%px (%4ld kB)\n",
639 639
640 (void*)VMALLOC_START, (void*)VMALLOC_END, 640 (void*)VMALLOC_START, (void*)VMALLOC_END,
641 (VMALLOC_END - VMALLOC_START) >> 20, 641 (VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c51e6ce42e7a..2ed525a44734 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -166,6 +166,7 @@ config PPC
166 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 166 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
167 select GENERIC_CMOS_UPDATE 167 select GENERIC_CMOS_UPDATE
168 select GENERIC_CPU_AUTOPROBE 168 select GENERIC_CPU_AUTOPROBE
169 select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
169 select GENERIC_IRQ_SHOW 170 select GENERIC_IRQ_SHOW
170 select GENERIC_IRQ_SHOW_LEVEL 171 select GENERIC_IRQ_SHOW_LEVEL
171 select GENERIC_SMP_IDLE_THREAD 172 select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452d67b6..555e22d5e07f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
209 ori r3,r3,vector_offset@l; \ 209 ori r3,r3,vector_offset@l; \
210 mtspr SPRN_IVOR##vector_number,r3; 210 mtspr SPRN_IVOR##vector_number,r3;
211 211
212#define RFI_TO_KERNEL \
213 rfi
214
215#define RFI_TO_USER \
216 rfi
217
212#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 218#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
213 219
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b27205297e1d..7197b179c1b1 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,59 @@
74 */ 74 */
75#define EX_R3 EX_DAR 75#define EX_R3 EX_DAR
76 76
77/*
78 * Macros for annotating the expected destination of (h)rfid
79 *
80 * The nop instructions allow us to insert one or more instructions to flush the
81 * L1-D cache when returning to userspace or a guest.
82 */
83#define RFI_FLUSH_SLOT \
84 RFI_FLUSH_FIXUP_SECTION; \
85 nop; \
86 nop; \
87 nop
88
89#define RFI_TO_KERNEL \
90 rfid
91
92#define RFI_TO_USER \
93 RFI_FLUSH_SLOT; \
94 rfid; \
95 b rfi_flush_fallback
96
97#define RFI_TO_USER_OR_KERNEL \
98 RFI_FLUSH_SLOT; \
99 rfid; \
100 b rfi_flush_fallback
101
102#define RFI_TO_GUEST \
103 RFI_FLUSH_SLOT; \
104 rfid; \
105 b rfi_flush_fallback
106
107#define HRFI_TO_KERNEL \
108 hrfid
109
110#define HRFI_TO_USER \
111 RFI_FLUSH_SLOT; \
112 hrfid; \
113 b hrfi_flush_fallback
114
115#define HRFI_TO_USER_OR_KERNEL \
116 RFI_FLUSH_SLOT; \
117 hrfid; \
118 b hrfi_flush_fallback
119
120#define HRFI_TO_GUEST \
121 RFI_FLUSH_SLOT; \
122 hrfid; \
123 b hrfi_flush_fallback
124
125#define HRFI_TO_UNKNOWN \
126 RFI_FLUSH_SLOT; \
127 hrfid; \
128 b hrfi_flush_fallback
129
77#ifdef CONFIG_RELOCATABLE 130#ifdef CONFIG_RELOCATABLE
78#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 131#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
79 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 132 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
218 mtspr SPRN_##h##SRR0,r12; \ 271 mtspr SPRN_##h##SRR0,r12; \
219 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 272 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
220 mtspr SPRN_##h##SRR1,r10; \ 273 mtspr SPRN_##h##SRR1,r10; \
221 h##rfid; \ 274 h##RFI_TO_KERNEL; \
222 b . /* prevent speculative execution */ 275 b . /* prevent speculative execution */
223#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 276#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
224 __EXCEPTION_PROLOG_PSERIES_1(label, h) 277 __EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
232 mtspr SPRN_##h##SRR0,r12; \ 285 mtspr SPRN_##h##SRR0,r12; \
233 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 286 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
234 mtspr SPRN_##h##SRR1,r10; \ 287 mtspr SPRN_##h##SRR1,r10; \
235 h##rfid; \ 288 h##RFI_TO_KERNEL; \
236 b . /* prevent speculative execution */ 289 b . /* prevent speculative execution */
237 290
238#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ 291#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 8f88f771cc55..1e82eb3caabd 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,7 +187,20 @@ label##3: \
187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 187 FTR_ENTRY_OFFSET label##1b-label##3b; \
188 .popsection; 188 .popsection;
189 189
190#define RFI_FLUSH_FIXUP_SECTION \
191951: \
192 .pushsection __rfi_flush_fixup,"a"; \
193 .align 2; \
194952: \
195 FTR_ENTRY_OFFSET 951b-952b; \
196 .popsection;
197
198
190#ifndef __ASSEMBLY__ 199#ifndef __ASSEMBLY__
200#include <linux/types.h>
201
202extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
203
191void apply_feature_fixups(void); 204void apply_feature_fixups(void);
192void setup_feature_keys(void); 205void setup_feature_keys(void);
193#endif 206#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a409177be8bd..eca3f9c68907 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -241,6 +241,7 @@
241#define H_GET_HCA_INFO 0x1B8 241#define H_GET_HCA_INFO 0x1B8
242#define H_GET_PERF_COUNT 0x1BC 242#define H_GET_PERF_COUNT 0x1BC
243#define H_MANAGE_TRACE 0x1C0 243#define H_MANAGE_TRACE 0x1C0
244#define H_GET_CPU_CHARACTERISTICS 0x1C8
244#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 245#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
245#define H_QUERY_INT_STATE 0x1E4 246#define H_QUERY_INT_STATE 0x1E4
246#define H_POLL_PENDING 0x1D8 247#define H_POLL_PENDING 0x1D8
@@ -330,6 +331,17 @@
330#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 331#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
331/* >= 0 values are CPU number */ 332/* >= 0 values are CPU number */
332 333
334/* H_GET_CPU_CHARACTERISTICS return values */
335#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
336#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
337#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
338#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
339#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
340
341#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
342#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
343#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
344
333/* Flag values used in H_REGISTER_PROC_TBL hcall */ 345/* Flag values used in H_REGISTER_PROC_TBL hcall */
334#define PROC_TABLE_OP_MASK 0x18 346#define PROC_TABLE_OP_MASK 0x18
335#define PROC_TABLE_DEREG 0x10 347#define PROC_TABLE_DEREG 0x10
@@ -341,6 +353,7 @@
341#define PROC_TABLE_GTSE 0x01 353#define PROC_TABLE_GTSE 0x01
342 354
343#ifndef __ASSEMBLY__ 355#ifndef __ASSEMBLY__
356#include <linux/types.h>
344 357
345/** 358/**
346 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments 359 * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
@@ -436,6 +449,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
436 } 449 }
437} 450}
438 451
452struct h_cpu_char_result {
453 u64 character;
454 u64 behaviour;
455};
456
439#endif /* __ASSEMBLY__ */ 457#endif /* __ASSEMBLY__ */
440#endif /* __KERNEL__ */ 458#endif /* __KERNEL__ */
441#endif /* _ASM_POWERPC_HVCALL_H */ 459#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 73b92017b6d7..cd2fc1cc1cc7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -76,6 +76,7 @@ struct machdep_calls {
76 76
77 void __noreturn (*restart)(char *cmd); 77 void __noreturn (*restart)(char *cmd);
78 void __noreturn (*halt)(void); 78 void __noreturn (*halt)(void);
79 void (*panic)(char *str);
79 void (*cpu_die)(void); 80 void (*cpu_die)(void);
80 81
81 long (*time_init)(void); /* Optional, may be NULL */ 82 long (*time_init)(void); /* Optional, may be NULL */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6177d43f0ce8..e2a2b8400490 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -160,9 +160,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
160#endif 160#endif
161} 161}
162 162
163static inline void arch_dup_mmap(struct mm_struct *oldmm, 163static inline int arch_dup_mmap(struct mm_struct *oldmm,
164 struct mm_struct *mm) 164 struct mm_struct *mm)
165{ 165{
166 return 0;
166} 167}
167 168
168#ifndef CONFIG_PPC_BOOK3S_64 169#ifndef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3892db93b837..23ac7fc0af23 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -232,6 +232,16 @@ struct paca_struct {
232 struct sibling_subcore_state *sibling_subcore_state; 232 struct sibling_subcore_state *sibling_subcore_state;
233#endif 233#endif
234#endif 234#endif
235#ifdef CONFIG_PPC_BOOK3S_64
236 /*
237 * rfi fallback flush must be in its own cacheline to prevent
238 * other paca data leaking into the L1d
239 */
240 u64 exrfi[EX_SIZE] __aligned(0x80);
241 void *rfi_flush_fallback_area;
242 u64 l1d_flush_congruence;
243 u64 l1d_flush_sets;
244#endif
235}; 245};
236 246
237extern void copy_mm_to_paca(struct mm_struct *mm); 247extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 7f01b22fa6cb..55eddf50d149 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); 326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
327} 327}
328 328
329static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
330{
331 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 long rc;
333
334 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
335 if (rc == H_SUCCESS) {
336 p->character = retbuf[0];
337 p->behaviour = retbuf[1];
338 }
339
340 return rc;
341}
342
329#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ 343#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 257d23dbf55d..469b7fdc9be4 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long);
24 24
25void check_for_initrd(void); 25void check_for_initrd(void);
26void initmem_init(void); 26void initmem_init(void);
27void setup_panic(void);
27#define ARCH_PANIC_TIMEOUT 180 28#define ARCH_PANIC_TIMEOUT 180
28 29
29#ifdef CONFIG_PPC_PSERIES 30#ifdef CONFIG_PPC_PSERIES
@@ -38,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
38static inline void pseries_little_endian_exceptions(void) {} 39static inline void pseries_little_endian_exceptions(void) {}
39#endif /* CONFIG_PPC_PSERIES */ 40#endif /* CONFIG_PPC_PSERIES */
40 41
42void rfi_flush_enable(bool enable);
43
44/* These are bit flags */
45enum l1d_flush_type {
46 L1D_FLUSH_NONE = 0x1,
47 L1D_FLUSH_FALLBACK = 0x2,
48 L1D_FLUSH_ORI = 0x4,
49 L1D_FLUSH_MTTRIG = 0x8,
50};
51
52void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
53void do_rfi_flush_fixups(enum l1d_flush_type types);
54
41#endif /* !__ASSEMBLY__ */ 55#endif /* !__ASSEMBLY__ */
42 56
43#endif /* _ASM_POWERPC_SETUP_H */ 57#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 0d960ef78a9a..1a6ed5919ffd 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += param.h 5generic-y += param.h
5generic-y += poll.h 6generic-y += poll.h
6generic-y += resource.h 7generic-y += resource.h
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 61d6049f4c1e..637b7263cb86 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
443 __u32 ap_encodings[8]; 443 __u32 ap_encodings[8];
444}; 444};
445 445
446/* For KVM_PPC_GET_CPU_CHAR */
447struct kvm_ppc_cpu_char {
448 __u64 character; /* characteristics of the CPU */
449 __u64 behaviour; /* recommended software behaviour */
450 __u64 character_mask; /* valid bits in character */
451 __u64 behaviour_mask; /* valid bits in behaviour */
452};
453
454/*
455 * Values for character and character_mask.
456 * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
457 */
458#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
459#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
460#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
461#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
462#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
466
467#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
468#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
469#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
470
446/* Per-vcpu XICS interrupt controller state */ 471/* Per-vcpu XICS interrupt controller state */
447#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) 472#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
448 473
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6b958414b4e0..f390d57cf2e1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -237,6 +237,11 @@ int main(void)
237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); 237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
238 OFFSET(PACA_IN_MCE, paca_struct, in_mce); 238 OFFSET(PACA_IN_MCE, paca_struct, in_mce);
239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi); 239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
240 OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
241 OFFSET(PACA_EXRFI, paca_struct, exrfi);
242 OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
243 OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
244
240#endif 245#endif
241 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); 246 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
242 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); 247 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 610955fe8b81..679bbe714e85 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
102 li r0,0 102 li r0,0
103 mtspr SPRN_PSSCR,r0 103 mtspr SPRN_PSSCR,r0
104 mtspr SPRN_LPID,r0 104 mtspr SPRN_LPID,r0
105 mtspr SPRN_PID,r0
105 mfspr r3,SPRN_LPCR 106 mfspr r3,SPRN_LPCR
106 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 107 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
107 or r3, r3, r4 108 or r3, r3, r4
@@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9)
126 li r0,0 127 li r0,0
127 mtspr SPRN_PSSCR,r0 128 mtspr SPRN_PSSCR,r0
128 mtspr SPRN_LPID,r0 129 mtspr SPRN_LPID,r0
130 mtspr SPRN_PID,r0
129 mfspr r3,SPRN_LPCR 131 mfspr r3,SPRN_LPCR
130 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 132 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
131 or r3, r3, r4 133 or r3, r3, r4
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3320bcac7192..2748584b767d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -37,6 +37,11 @@
37#include <asm/tm.h> 37#include <asm/tm.h>
38#include <asm/ppc-opcode.h> 38#include <asm/ppc-opcode.h>
39#include <asm/export.h> 39#include <asm/export.h>
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
40 45
41/* 46/*
42 * System calls. 47 * System calls.
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
262END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 267END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
263 268
264 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 269 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
270 ld r2,GPR2(r1)
271 ld r1,GPR1(r1)
272 mtlr r4
273 mtcr r5
274 mtspr SPRN_SRR0,r7
275 mtspr SPRN_SRR1,r8
276 RFI_TO_USER
277 b . /* prevent speculative execution */
278
279 /* exit to kernel */
2651: ld r2,GPR2(r1) 2801: ld r2,GPR2(r1)
266 ld r1,GPR1(r1) 281 ld r1,GPR1(r1)
267 mtlr r4 282 mtlr r4
268 mtcr r5 283 mtcr r5
269 mtspr SPRN_SRR0,r7 284 mtspr SPRN_SRR0,r7
270 mtspr SPRN_SRR1,r8 285 mtspr SPRN_SRR1,r8
271 RFI 286 RFI_TO_KERNEL
272 b . /* prevent speculative execution */ 287 b . /* prevent speculative execution */
273 288
274.Lsyscall_error: 289.Lsyscall_error:
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
397 mtmsrd r10, 1 412 mtmsrd r10, 1
398 mtspr SPRN_SRR0, r11 413 mtspr SPRN_SRR0, r11
399 mtspr SPRN_SRR1, r12 414 mtspr SPRN_SRR1, r12
400 415 RFI_TO_USER
401 rfid
402 b . /* prevent speculative execution */ 416 b . /* prevent speculative execution */
403#endif 417#endif
404_ASM_NOKPROBE_SYMBOL(system_call_common); 418_ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
878END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 892END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
879 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 893 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
880 REST_GPR(13, r1) 894 REST_GPR(13, r1)
8811: 895
882 mtspr SPRN_SRR1,r3 896 mtspr SPRN_SRR1,r3
883 897
884 ld r2,_CCR(r1) 898 ld r2,_CCR(r1)
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
891 ld r3,GPR3(r1) 905 ld r3,GPR3(r1)
892 ld r4,GPR4(r1) 906 ld r4,GPR4(r1)
893 ld r1,GPR1(r1) 907 ld r1,GPR1(r1)
908 RFI_TO_USER
909 b . /* prevent speculative execution */
894 910
895 rfid 9111: mtspr SPRN_SRR1,r3
912
913 ld r2,_CCR(r1)
914 mtcrf 0xFF,r2
915 ld r2,_NIP(r1)
916 mtspr SPRN_SRR0,r2
917
918 ld r0,GPR0(r1)
919 ld r2,GPR2(r1)
920 ld r3,GPR3(r1)
921 ld r4,GPR4(r1)
922 ld r1,GPR1(r1)
923 RFI_TO_KERNEL
896 b . /* prevent speculative execution */ 924 b . /* prevent speculative execution */
897 925
898#endif /* CONFIG_PPC_BOOK3E */ 926#endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@ __enter_rtas:
1073 1101
1074 mtspr SPRN_SRR0,r5 1102 mtspr SPRN_SRR0,r5
1075 mtspr SPRN_SRR1,r6 1103 mtspr SPRN_SRR1,r6
1076 rfid 1104 RFI_TO_KERNEL
1077 b . /* prevent speculative execution */ 1105 b . /* prevent speculative execution */
1078 1106
1079rtas_return_loc: 1107rtas_return_loc:
@@ -1098,7 +1126,7 @@ rtas_return_loc:
1098 1126
1099 mtspr SPRN_SRR0,r3 1127 mtspr SPRN_SRR0,r3
1100 mtspr SPRN_SRR1,r4 1128 mtspr SPRN_SRR1,r4
1101 rfid 1129 RFI_TO_KERNEL
1102 b . /* prevent speculative execution */ 1130 b . /* prevent speculative execution */
1103_ASM_NOKPROBE_SYMBOL(__enter_rtas) 1131_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1104_ASM_NOKPROBE_SYMBOL(rtas_return_loc) 1132_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
1171 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1199 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1172 andc r11,r11,r12 1200 andc r11,r11,r12
1173 mtsrr1 r11 1201 mtsrr1 r11
1174 rfid 1202 RFI_TO_KERNEL
1175#endif /* CONFIG_PPC_BOOK3E */ 1203#endif /* CONFIG_PPC_BOOK3E */
1176 1204
11771: /* Return from OF */ 12051: /* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e441b469dc8f..2dc10bf646b8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
256 LOAD_HANDLER(r12, machine_check_handle_early) 256 LOAD_HANDLER(r12, machine_check_handle_early)
2571: mtspr SPRN_SRR0,r12 2571: mtspr SPRN_SRR0,r12
258 mtspr SPRN_SRR1,r11 258 mtspr SPRN_SRR1,r11
259 rfid 259 RFI_TO_KERNEL
260 b . /* prevent speculative execution */ 260 b . /* prevent speculative execution */
2612: 2612:
262 /* Stack overflow. Stay on emergency stack and panic. 262 /* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
445 li r3,MSR_ME 445 li r3,MSR_ME
446 andc r10,r10,r3 /* Turn off MSR_ME */ 446 andc r10,r10,r3 /* Turn off MSR_ME */
447 mtspr SPRN_SRR1,r10 447 mtspr SPRN_SRR1,r10
448 rfid 448 RFI_TO_KERNEL
449 b . 449 b .
4502: 4502:
451 /* 451 /*
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
463 */ 463 */
464 bl machine_check_queue_event 464 bl machine_check_queue_event
465 MACHINE_CHECK_HANDLER_WINDUP 465 MACHINE_CHECK_HANDLER_WINDUP
466 rfid 466 RFI_TO_USER_OR_KERNEL
4679: 4679:
468 /* Deliver the machine check to host kernel in V mode. */ 468 /* Deliver the machine check to host kernel in V mode. */
469 MACHINE_CHECK_HANDLER_WINDUP 469 MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
600 600
601 andi. r9,r11,MSR_PR // Check for exception from userspace
602 cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
603
601 /* 604 /*
602 * Test MSR_RI before calling slb_allocate_realmode, because the 605 * Test MSR_RI before calling slb_allocate_realmode, because the
603 * MSR in r11 gets clobbered. However we still want to allocate 606 * MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
624 627
625 /* All done -- return from exception. */ 628 /* All done -- return from exception. */
626 629
630 bne cr4,1f /* returning to kernel */
631
627.machine push 632.machine push
628.machine "power4" 633.machine "power4"
629 mtcrf 0x80,r9 634 mtcrf 0x80,r9
635 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
630 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 636 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
631 mtcrf 0x02,r9 /* I/D indication is in cr6 */ 637 mtcrf 0x02,r9 /* I/D indication is in cr6 */
632 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 638 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
640 ld r11,PACA_EXSLB+EX_R11(r13) 646 ld r11,PACA_EXSLB+EX_R11(r13)
641 ld r12,PACA_EXSLB+EX_R12(r13) 647 ld r12,PACA_EXSLB+EX_R12(r13)
642 ld r13,PACA_EXSLB+EX_R13(r13) 648 ld r13,PACA_EXSLB+EX_R13(r13)
643 rfid 649 RFI_TO_USER
650 b . /* prevent speculative execution */
6511:
652.machine push
653.machine "power4"
654 mtcrf 0x80,r9
655 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
656 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
657 mtcrf 0x02,r9 /* I/D indication is in cr6 */
658 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
659.machine pop
660
661 RESTORE_CTR(r9, PACA_EXSLB)
662 RESTORE_PPR_PACA(PACA_EXSLB, r9)
663 mr r3,r12
664 ld r9,PACA_EXSLB+EX_R9(r13)
665 ld r10,PACA_EXSLB+EX_R10(r13)
666 ld r11,PACA_EXSLB+EX_R11(r13)
667 ld r12,PACA_EXSLB+EX_R12(r13)
668 ld r13,PACA_EXSLB+EX_R13(r13)
669 RFI_TO_KERNEL
644 b . /* prevent speculative execution */ 670 b . /* prevent speculative execution */
645 671
672
6462: std r3,PACA_EXSLB+EX_DAR(r13) 6732: std r3,PACA_EXSLB+EX_DAR(r13)
647 mr r3,r12 674 mr r3,r12
648 mfspr r11,SPRN_SRR0 675 mfspr r11,SPRN_SRR0
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
651 mtspr SPRN_SRR0,r10 678 mtspr SPRN_SRR0,r10
652 ld r10,PACAKMSR(r13) 679 ld r10,PACAKMSR(r13)
653 mtspr SPRN_SRR1,r10 680 mtspr SPRN_SRR1,r10
654 rfid 681 RFI_TO_KERNEL
655 b . 682 b .
656 683
6578: std r3,PACA_EXSLB+EX_DAR(r13) 6848: std r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
662 mtspr SPRN_SRR0,r10 689 mtspr SPRN_SRR0,r10
663 ld r10,PACAKMSR(r13) 690 ld r10,PACAKMSR(r13)
664 mtspr SPRN_SRR1,r10 691 mtspr SPRN_SRR1,r10
665 rfid 692 RFI_TO_KERNEL
666 b . 693 b .
667 694
668EXC_COMMON_BEGIN(unrecov_slb) 695EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
901 mtspr SPRN_SRR0,r10 ; \ 928 mtspr SPRN_SRR0,r10 ; \
902 ld r10,PACAKMSR(r13) ; \ 929 ld r10,PACAKMSR(r13) ; \
903 mtspr SPRN_SRR1,r10 ; \ 930 mtspr SPRN_SRR1,r10 ; \
904 rfid ; \ 931 RFI_TO_KERNEL ; \
905 b . ; /* prevent speculative execution */ 932 b . ; /* prevent speculative execution */
906 933
907#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 934#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
917 xori r12,r12,MSR_LE ; \ 944 xori r12,r12,MSR_LE ; \
918 mtspr SPRN_SRR1,r12 ; \ 945 mtspr SPRN_SRR1,r12 ; \
919 mr r13,r9 ; \ 946 mr r13,r9 ; \
920 rfid ; /* return to userspace */ \ 947 RFI_TO_USER ; /* return to userspace */ \
921 b . ; /* prevent speculative execution */ 948 b . ; /* prevent speculative execution */
922#else 949#else
923#define SYSCALL_FASTENDIAN_TEST 950#define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
1063 mtcr r11 1090 mtcr r11
1064 REST_GPR(11, r1) 1091 REST_GPR(11, r1)
1065 ld r1,GPR1(r1) 1092 ld r1,GPR1(r1)
1066 hrfid 1093 HRFI_TO_USER_OR_KERNEL
1067 1094
10681: mtcr r11 10951: mtcr r11
1069 REST_GPR(11, r1) 1096 REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1314 ld r11,PACA_EXGEN+EX_R11(r13) 1341 ld r11,PACA_EXGEN+EX_R11(r13)
1315 ld r12,PACA_EXGEN+EX_R12(r13) 1342 ld r12,PACA_EXGEN+EX_R12(r13)
1316 ld r13,PACA_EXGEN+EX_R13(r13) 1343 ld r13,PACA_EXGEN+EX_R13(r13)
1317 HRFID 1344 HRFI_TO_UNKNOWN
1318 b . 1345 b .
1319#endif 1346#endif
1320 1347
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \
1418 ld r10,PACA_EXGEN+EX_R10(r13); \ 1445 ld r10,PACA_EXGEN+EX_R10(r13); \
1419 ld r11,PACA_EXGEN+EX_R11(r13); \ 1446 ld r11,PACA_EXGEN+EX_R11(r13); \
1420 /* returns to kernel where r13 must be set up, so don't restore it */ \ 1447 /* returns to kernel where r13 must be set up, so don't restore it */ \
1421 ##_H##rfid; \ 1448 ##_H##RFI_TO_KERNEL; \
1422 b .; \ 1449 b .; \
1423 MASKED_DEC_HANDLER(_H) 1450 MASKED_DEC_HANDLER(_H)
1424 1451
1452TRAMP_REAL_BEGIN(rfi_flush_fallback)
1453 SET_SCRATCH0(r13);
1454 GET_PACA(r13);
1455 std r9,PACA_EXRFI+EX_R9(r13)
1456 std r10,PACA_EXRFI+EX_R10(r13)
1457 std r11,PACA_EXRFI+EX_R11(r13)
1458 std r12,PACA_EXRFI+EX_R12(r13)
1459 std r8,PACA_EXRFI+EX_R13(r13)
1460 mfctr r9
1461 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1462 ld r11,PACA_L1D_FLUSH_SETS(r13)
1463 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1464 /*
1465 * The load adresses are at staggered offsets within cachelines,
1466 * which suits some pipelines better (on others it should not
1467 * hurt).
1468 */
1469 addi r12,r12,8
1470 mtctr r11
1471 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1472
1473 /* order ld/st prior to dcbt stop all streams with flushing */
1474 sync
14751: li r8,0
1476 .rept 8 /* 8-way set associative */
1477 ldx r11,r10,r8
1478 add r8,r8,r12
1479 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1480 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1481 .endr
1482 addi r10,r10,128 /* 128 byte cache line */
1483 bdnz 1b
1484
1485 mtctr r9
1486 ld r9,PACA_EXRFI+EX_R9(r13)
1487 ld r10,PACA_EXRFI+EX_R10(r13)
1488 ld r11,PACA_EXRFI+EX_R11(r13)
1489 ld r12,PACA_EXRFI+EX_R12(r13)
1490 ld r8,PACA_EXRFI+EX_R13(r13)
1491 GET_SCRATCH0(r13);
1492 rfid
1493
1494TRAMP_REAL_BEGIN(hrfi_flush_fallback)
1495 SET_SCRATCH0(r13);
1496 GET_PACA(r13);
1497 std r9,PACA_EXRFI+EX_R9(r13)
1498 std r10,PACA_EXRFI+EX_R10(r13)
1499 std r11,PACA_EXRFI+EX_R11(r13)
1500 std r12,PACA_EXRFI+EX_R12(r13)
1501 std r8,PACA_EXRFI+EX_R13(r13)
1502 mfctr r9
1503 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1504 ld r11,PACA_L1D_FLUSH_SETS(r13)
1505 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1506 /*
1507 * The load adresses are at staggered offsets within cachelines,
1508 * which suits some pipelines better (on others it should not
1509 * hurt).
1510 */
1511 addi r12,r12,8
1512 mtctr r11
1513 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1514
1515 /* order ld/st prior to dcbt stop all streams with flushing */
1516 sync
15171: li r8,0
1518 .rept 8 /* 8-way set associative */
1519 ldx r11,r10,r8
1520 add r8,r8,r12
1521 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1522 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1523 .endr
1524 addi r10,r10,128 /* 128 byte cache line */
1525 bdnz 1b
1526
1527 mtctr r9
1528 ld r9,PACA_EXRFI+EX_R9(r13)
1529 ld r10,PACA_EXRFI+EX_R10(r13)
1530 ld r11,PACA_EXRFI+EX_R11(r13)
1531 ld r12,PACA_EXRFI+EX_R12(r13)
1532 ld r8,PACA_EXRFI+EX_R13(r13)
1533 GET_SCRATCH0(r13);
1534 hrfid
1535
1425/* 1536/*
1426 * Real mode exceptions actually use this too, but alternate 1537 * Real mode exceptions actually use this too, but alternate
1427 * instruction code patches (which end up in the common .text area) 1538 * instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1441 addi r13, r13, 4 1552 addi r13, r13, 4
1442 mtspr SPRN_SRR0, r13 1553 mtspr SPRN_SRR0, r13
1443 GET_SCRATCH0(r13) 1554 GET_SCRATCH0(r13)
1444 rfid 1555 RFI_TO_KERNEL
1445 b . 1556 b .
1446 1557
1447TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) 1558TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1453 addi r13, r13, 4 1564 addi r13, r13, 4
1454 mtspr SPRN_HSRR0, r13 1565 mtspr SPRN_HSRR0, r13
1455 GET_SCRATCH0(r13) 1566 GET_SCRATCH0(r13)
1456 hrfid 1567 HRFI_TO_KERNEL
1457 b . 1568 b .
1458#endif 1569#endif
1459 1570
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 04ea5c04fd24..3c2c2688918f 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1462,25 +1462,6 @@ static void fadump_init_files(void)
1462 return; 1462 return;
1463} 1463}
1464 1464
1465static int fadump_panic_event(struct notifier_block *this,
1466 unsigned long event, void *ptr)
1467{
1468 /*
1469 * If firmware-assisted dump has been registered then trigger
1470 * firmware-assisted dump and let firmware handle everything
1471 * else. If this returns, then fadump was not registered, so
1472 * go through the rest of the panic path.
1473 */
1474 crash_fadump(NULL, ptr);
1475
1476 return NOTIFY_DONE;
1477}
1478
1479static struct notifier_block fadump_panic_block = {
1480 .notifier_call = fadump_panic_event,
1481 .priority = INT_MIN /* may not return; must be done last */
1482};
1483
1484/* 1465/*
1485 * Prepare for firmware-assisted dump. 1466 * Prepare for firmware-assisted dump.
1486 */ 1467 */
@@ -1513,9 +1494,6 @@ int __init setup_fadump(void)
1513 init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); 1494 init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
1514 fadump_init_files(); 1495 fadump_init_files();
1515 1496
1516 atomic_notifier_chain_register(&panic_notifier_list,
1517 &fadump_panic_block);
1518
1519 return 1; 1497 return 1;
1520} 1498}
1521subsys_initcall(setup_fadump); 1499subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 5acb5a176dbe..72be0c32e902 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1403,7 +1403,7 @@ void show_regs(struct pt_regs * regs)
1403 1403
1404 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 1404 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1405 regs->nip, regs->link, regs->ctr); 1405 regs->nip, regs->link, regs->ctr);
1406 printk("REGS: %p TRAP: %04lx %s (%s)\n", 1406 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1407 regs, regs->trap, print_tainted(), init_utsname()->release); 1407 regs, regs->trap, print_tainted(), init_utsname()->release);
1408 printk("MSR: "REG" ", regs->msr); 1408 printk("MSR: "REG" ", regs->msr);
1409 print_msr_bits(regs->msr); 1409 print_msr_bits(regs->msr);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2075322cd225..8fd3a70047f1 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
242 unsigned short maj; 242 unsigned short maj;
243 unsigned short min; 243 unsigned short min;
244 244
245 /* We only show online cpus: disable preempt (overzealous, I
246 * knew) to prevent cpu going down. */
247 preempt_disable();
248 if (!cpu_online(cpu_id)) {
249 preempt_enable();
250 return 0;
251 }
252
253#ifdef CONFIG_SMP 245#ifdef CONFIG_SMP
254 pvr = per_cpu(cpu_pvr, cpu_id); 246 pvr = per_cpu(cpu_pvr, cpu_id);
255#else 247#else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
358#ifdef CONFIG_SMP 350#ifdef CONFIG_SMP
359 seq_printf(m, "\n"); 351 seq_printf(m, "\n");
360#endif 352#endif
361
362 preempt_enable();
363
364 /* If this is the last cpu, print the summary */ 353 /* If this is the last cpu, print the summary */
365 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) 354 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
366 show_cpuinfo_summary(m); 355 show_cpuinfo_summary(m);
@@ -704,6 +693,30 @@ int check_legacy_ioport(unsigned long base_port)
704} 693}
705EXPORT_SYMBOL(check_legacy_ioport); 694EXPORT_SYMBOL(check_legacy_ioport);
706 695
696static int ppc_panic_event(struct notifier_block *this,
697 unsigned long event, void *ptr)
698{
699 /*
700 * If firmware-assisted dump has been registered then trigger
701 * firmware-assisted dump and let firmware handle everything else.
702 */
703 crash_fadump(NULL, ptr);
704 ppc_md.panic(ptr); /* May not return */
705 return NOTIFY_DONE;
706}
707
708static struct notifier_block ppc_panic_block = {
709 .notifier_call = ppc_panic_event,
710 .priority = INT_MIN /* may not return; must be done last */
711};
712
713void __init setup_panic(void)
714{
715 if (!ppc_md.panic)
716 return;
717 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
718}
719
707#ifdef CONFIG_CHECK_CACHE_COHERENCY 720#ifdef CONFIG_CHECK_CACHE_COHERENCY
708/* 721/*
709 * For platforms that have configurable cache-coherency. This function 722 * For platforms that have configurable cache-coherency. This function
@@ -848,6 +861,9 @@ void __init setup_arch(char **cmdline_p)
848 /* Probe the machine type, establish ppc_md. */ 861 /* Probe the machine type, establish ppc_md. */
849 probe_machine(); 862 probe_machine();
850 863
864 /* Setup panic notifier if requested by the platform. */
865 setup_panic();
866
851 /* 867 /*
852 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do 868 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
853 * it from their respective probe() function. 869 * it from their respective probe() function.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8956a9856604..e67413f4a8f0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -38,6 +38,7 @@
38#include <linux/memory.h> 38#include <linux/memory.h>
39#include <linux/nmi.h> 39#include <linux/nmi.h>
40 40
41#include <asm/debugfs.h>
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/kdump.h> 43#include <asm/kdump.h>
43#include <asm/prom.h> 44#include <asm/prom.h>
@@ -801,3 +802,141 @@ static int __init disable_hardlockup_detector(void)
801 return 0; 802 return 0;
802} 803}
803early_initcall(disable_hardlockup_detector); 804early_initcall(disable_hardlockup_detector);
805
806#ifdef CONFIG_PPC_BOOK3S_64
807static enum l1d_flush_type enabled_flush_types;
808static void *l1d_flush_fallback_area;
809static bool no_rfi_flush;
810bool rfi_flush;
811
812static int __init handle_no_rfi_flush(char *p)
813{
814 pr_info("rfi-flush: disabled on command line.");
815 no_rfi_flush = true;
816 return 0;
817}
818early_param("no_rfi_flush", handle_no_rfi_flush);
819
820/*
821 * The RFI flush is not KPTI, but because users will see doco that says to use
822 * nopti we hijack that option here to also disable the RFI flush.
823 */
824static int __init handle_no_pti(char *p)
825{
826 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
827 handle_no_rfi_flush(NULL);
828 return 0;
829}
830early_param("nopti", handle_no_pti);
831
832static void do_nothing(void *unused)
833{
834 /*
835 * We don't need to do the flush explicitly, just enter+exit kernel is
836 * sufficient, the RFI exit handlers will do the right thing.
837 */
838}
839
840void rfi_flush_enable(bool enable)
841{
842 if (rfi_flush == enable)
843 return;
844
845 if (enable) {
846 do_rfi_flush_fixups(enabled_flush_types);
847 on_each_cpu(do_nothing, NULL, 1);
848 } else
849 do_rfi_flush_fixups(L1D_FLUSH_NONE);
850
851 rfi_flush = enable;
852}
853
854static void init_fallback_flush(void)
855{
856 u64 l1d_size, limit;
857 int cpu;
858
859 l1d_size = ppc64_caches.l1d.size;
860 limit = min(safe_stack_limit(), ppc64_rma_size);
861
862 /*
863 * Align to L1d size, and size it at 2x L1d size, to catch possible
864 * hardware prefetch runoff. We don't have a recipe for load patterns to
865 * reliably avoid the prefetcher.
866 */
867 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
868 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
869
870 for_each_possible_cpu(cpu) {
871 /*
872 * The fallback flush is currently coded for 8-way
873 * associativity. Different associativity is possible, but it
874 * will be treated as 8-way and may not evict the lines as
875 * effectively.
876 *
877 * 128 byte lines are mandatory.
878 */
879 u64 c = l1d_size / 8;
880
881 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
882 paca[cpu].l1d_flush_congruence = c;
883 paca[cpu].l1d_flush_sets = c / 128;
884 }
885}
886
887void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
888{
889 if (types & L1D_FLUSH_FALLBACK) {
890 pr_info("rfi-flush: Using fallback displacement flush\n");
891 init_fallback_flush();
892 }
893
894 if (types & L1D_FLUSH_ORI)
895 pr_info("rfi-flush: Using ori type flush\n");
896
897 if (types & L1D_FLUSH_MTTRIG)
898 pr_info("rfi-flush: Using mttrig type flush\n");
899
900 enabled_flush_types = types;
901
902 if (!no_rfi_flush)
903 rfi_flush_enable(enable);
904}
905
906#ifdef CONFIG_DEBUG_FS
907static int rfi_flush_set(void *data, u64 val)
908{
909 if (val == 1)
910 rfi_flush_enable(true);
911 else if (val == 0)
912 rfi_flush_enable(false);
913 else
914 return -EINVAL;
915
916 return 0;
917}
918
919static int rfi_flush_get(void *data, u64 *val)
920{
921 *val = rfi_flush ? 1 : 0;
922 return 0;
923}
924
925DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
926
927static __init int rfi_flush_debugfs_init(void)
928{
929 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
930 return 0;
931}
932device_initcall(rfi_flush_debugfs_init);
933#endif
934
935ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
936{
937 if (rfi_flush)
938 return sprintf(buf, "Mitigation: RFI Flush\n");
939
940 return sprintf(buf, "Vulnerable\n");
941}
942#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0494e1566ee2..307843d23682 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,15 @@ SECTIONS
132 /* Read-only data */ 132 /* Read-only data */
133 RO_DATA(PAGE_SIZE) 133 RO_DATA(PAGE_SIZE)
134 134
135#ifdef CONFIG_PPC64
136 . = ALIGN(8);
137 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
138 __start___rfi_flush_fixup = .;
139 *(__rfi_flush_fixup)
140 __stop___rfi_flush_fixup = .;
141 }
142#endif
143
135 EXCEPTION_TABLE(0) 144 EXCEPTION_TABLE(0)
136 145
137 NOTES :kernel :notes 146 NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 29ebe2fd5867..a93d719edc90 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
235 gpte->may_read = true; 235 gpte->may_read = true;
236 gpte->may_write = true; 236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K; 237 gpte->page_size = MMU_PAGE_4K;
238 gpte->wimg = HPTE_R_M;
238 239
239 return 0; 240 return 0;
240 } 241 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 966097232d21..b73dbc9e797d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
65 u32 order; 65 u32 order;
66 66
67 /* These fields protected by kvm->lock */ 67 /* These fields protected by kvm->lock */
68
69 /* Possible values and their usage:
70 * <0 an error occurred during allocation,
71 * -EBUSY allocation is in the progress,
72 * 0 allocation made successfuly.
73 */
68 int error; 74 int error;
69 bool prepare_done;
70 75
71 /* Private to the work thread, until prepare_done is true, 76 /* Private to the work thread, until error != -EBUSY,
72 * then protected by kvm->resize_hpt_sem */ 77 * then protected by kvm->lock.
78 */
73 struct kvm_hpt_info hpt; 79 struct kvm_hpt_info hpt;
74}; 80};
75 81
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
159 * Reset all the reverse-mapping chains for all memslots 165 * Reset all the reverse-mapping chains for all memslots
160 */ 166 */
161 kvmppc_rmap_reset(kvm); 167 kvmppc_rmap_reset(kvm);
162 /* Ensure that each vcpu will flush its TLB on next entry. */
163 cpumask_setall(&kvm->arch.need_tlb_flush);
164 err = 0; 168 err = 0;
165 goto out; 169 goto out;
166 } 170 }
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
176 kvmppc_set_hpt(kvm, &info); 180 kvmppc_set_hpt(kvm, &info);
177 181
178out: 182out:
183 if (err == 0)
184 /* Ensure that each vcpu will flush its TLB on next entry. */
185 cpumask_setall(&kvm->arch.need_tlb_flush);
186
179 mutex_unlock(&kvm->lock); 187 mutex_unlock(&kvm->lock);
180 return err; 188 return err;
181} 189}
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1413 1421
1414static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) 1422static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1415{ 1423{
1416 BUG_ON(kvm->arch.resize_hpt != resize); 1424 if (WARN_ON(!mutex_is_locked(&kvm->lock)))
1425 return;
1417 1426
1418 if (!resize) 1427 if (!resize)
1419 return; 1428 return;
1420 1429
1421 if (resize->hpt.virt) 1430 if (resize->error != -EBUSY) {
1422 kvmppc_free_hpt(&resize->hpt); 1431 if (resize->hpt.virt)
1432 kvmppc_free_hpt(&resize->hpt);
1433 kfree(resize);
1434 }
1423 1435
1424 kvm->arch.resize_hpt = NULL; 1436 if (kvm->arch.resize_hpt == resize)
1425 kfree(resize); 1437 kvm->arch.resize_hpt = NULL;
1426} 1438}
1427 1439
1428static void resize_hpt_prepare_work(struct work_struct *work) 1440static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1431 struct kvm_resize_hpt, 1443 struct kvm_resize_hpt,
1432 work); 1444 work);
1433 struct kvm *kvm = resize->kvm; 1445 struct kvm *kvm = resize->kvm;
1434 int err; 1446 int err = 0;
1435 1447
1436 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", 1448 if (WARN_ON(resize->error != -EBUSY))
1437 resize->order); 1449 return;
1438
1439 err = resize_hpt_allocate(resize);
1440 1450
1441 mutex_lock(&kvm->lock); 1451 mutex_lock(&kvm->lock);
1442 1452
1453 /* Request is still current? */
1454 if (kvm->arch.resize_hpt == resize) {
1455 /* We may request large allocations here:
1456 * do not sleep with kvm->lock held for a while.
1457 */
1458 mutex_unlock(&kvm->lock);
1459
1460 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1461 resize->order);
1462
1463 err = resize_hpt_allocate(resize);
1464
1465 /* We have strict assumption about -EBUSY
1466 * when preparing for HPT resize.
1467 */
1468 if (WARN_ON(err == -EBUSY))
1469 err = -EINPROGRESS;
1470
1471 mutex_lock(&kvm->lock);
1472 /* It is possible that kvm->arch.resize_hpt != resize
1473 * after we grab kvm->lock again.
1474 */
1475 }
1476
1443 resize->error = err; 1477 resize->error = err;
1444 resize->prepare_done = true; 1478
1479 if (kvm->arch.resize_hpt != resize)
1480 resize_hpt_release(kvm, resize);
1445 1481
1446 mutex_unlock(&kvm->lock); 1482 mutex_unlock(&kvm->lock);
1447} 1483}
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1466 1502
1467 if (resize) { 1503 if (resize) {
1468 if (resize->order == shift) { 1504 if (resize->order == shift) {
1469 /* Suitable resize in progress */ 1505 /* Suitable resize in progress? */
1470 if (resize->prepare_done) { 1506 ret = resize->error;
1471 ret = resize->error; 1507 if (ret == -EBUSY)
1472 if (ret != 0)
1473 resize_hpt_release(kvm, resize);
1474 } else {
1475 ret = 100; /* estimated time in ms */ 1508 ret = 100; /* estimated time in ms */
1476 } 1509 else if (ret)
1510 resize_hpt_release(kvm, resize);
1477 1511
1478 goto out; 1512 goto out;
1479 } 1513 }
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1493 ret = -ENOMEM; 1527 ret = -ENOMEM;
1494 goto out; 1528 goto out;
1495 } 1529 }
1530
1531 resize->error = -EBUSY;
1496 resize->order = shift; 1532 resize->order = shift;
1497 resize->kvm = kvm; 1533 resize->kvm = kvm;
1498 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1534 INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1547 if (!resize || (resize->order != shift)) 1583 if (!resize || (resize->order != shift))
1548 goto out; 1584 goto out;
1549 1585
1550 ret = -EBUSY;
1551 if (!resize->prepare_done)
1552 goto out;
1553
1554 ret = resize->error; 1586 ret = resize->error;
1555 if (ret != 0) 1587 if (ret)
1556 goto out; 1588 goto out;
1557 1589
1558 ret = resize_hpt_rehash(resize); 1590 ret = resize_hpt_rehash(resize);
1559 if (ret != 0) 1591 if (ret)
1560 goto out; 1592 goto out;
1561 1593
1562 resize_hpt_pivot(resize); 1594 resize_hpt_pivot(resize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2659844784b8..9c61f736c75b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
79 mtmsrd r0,1 /* clear RI in MSR */ 79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5 80 mtsrr0 r5
81 mtsrr1 r6 81 mtsrr1 r6
82 RFI 82 RFI_TO_KERNEL
83 83
84kvmppc_call_hv_entry: 84kvmppc_call_hv_entry:
85BEGIN_FTR_SECTION 85BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
199 mtmsrd r6, 1 /* Clear RI in MSR */ 199 mtmsrd r6, 1 /* Clear RI in MSR */
200 mtsrr0 r8 200 mtsrr0 r8
201 mtsrr1 r7 201 mtsrr1 r7
202 RFI 202 RFI_TO_KERNEL
203 203
204 /* Virtual-mode return */ 204 /* Virtual-mode return */
205.Lvirt_return: 205.Lvirt_return:
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1167 1167
1168 ld r0, VCPU_GPR(R0)(r4) 1168 ld r0, VCPU_GPR(R0)(r4)
1169 ld r4, VCPU_GPR(R4)(r4) 1169 ld r4, VCPU_GPR(R4)(r4)
1170 1170 HRFI_TO_GUEST
1171 hrfid
1172 b . 1171 b .
1173 1172
1174secondary_too_late: 1173secondary_too_late:
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3320 ld r4, PACAKMSR(r13) 3319 ld r4, PACAKMSR(r13)
3321 mtspr SPRN_SRR0, r3 3320 mtspr SPRN_SRR0, r3
3322 mtspr SPRN_SRR1, r4 3321 mtspr SPRN_SRR1, r4
3323 rfid 3322 RFI_TO_KERNEL
33249: addi r3, r1, STACK_FRAME_OVERHEAD 33239: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt 3324 bl kvmppc_bad_interrupt
3326 b 9b 3325 b 9b
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d0dc8624198f..7deaeeb14b93 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
60#define MSR_USER32 MSR_USER 60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER 61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE 62#define HW_PAGE_SIZE PAGE_SIZE
63#define HPTE_R_M _PAGE_COHERENT
63#endif 64#endif
64 65
65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) 66static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
557 pte.eaddr = eaddr; 558 pte.eaddr = eaddr;
558 pte.vpage = eaddr >> 12; 559 pte.vpage = eaddr >> 12;
559 pte.page_size = MMU_PAGE_64K; 560 pte.page_size = MMU_PAGE_64K;
561 pte.wimg = HPTE_R_M;
560 } 562 }
561 563
562 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { 564 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 42a4b237df5f..34a5adeff084 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
46 46
47#define FUNC(name) name 47#define FUNC(name) name
48 48
49#define RFI_TO_KERNEL RFI
50#define RFI_TO_GUEST RFI
51
49.macro INTERRUPT_TRAMPOLINE intno 52.macro INTERRUPT_TRAMPOLINE intno
50 53
51.global kvmppc_trampoline_\intno 54.global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
141 GET_SCRATCH0(r13) 144 GET_SCRATCH0(r13)
142 145
143 /* And get back into the code */ 146 /* And get back into the code */
144 RFI 147 RFI_TO_KERNEL
145#endif 148#endif
146 149
147/* 150/*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
164 ori r5, r5, MSR_EE 167 ori r5, r5, MSR_EE
165 mtsrr0 r7 168 mtsrr0 r7
166 mtsrr1 r6 169 mtsrr1 r6
167 RFI 170 RFI_TO_KERNEL
168 171
169#include "book3s_segment.S" 172#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 2a2b96d53999..93a180ceefad 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@ no_dcbz32_on:
156 PPC_LL r9, SVCPU_R9(r3) 156 PPC_LL r9, SVCPU_R9(r3)
157 PPC_LL r3, (SVCPU_R3)(r3) 157 PPC_LL r3, (SVCPU_R3)(r3)
158 158
159 RFI 159 RFI_TO_GUEST
160kvmppc_handler_trampoline_enter_end: 160kvmppc_handler_trampoline_enter_end:
161 161
162 162
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
408 beqa BOOK3S_INTERRUPT_DOORBELL 408 beqa BOOK3S_INTERRUPT_DOORBELL
409 409
410 RFI 410 RFI_TO_KERNEL
411kvmppc_handler_trampoline_exit_end: 411kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index bf457843e032..0d750d274c4e 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
725 725
726 /* Return the per-cpu state for state saving/migration */ 726 /* Return the per-cpu state for state saving/migration */
727 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | 727 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
728 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; 728 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
729 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
729} 730}
730 731
731int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) 732int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
@@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1558 1559
1559 /* 1560 /*
1560 * Restore P and Q. If the interrupt was pending, we 1561 * Restore P and Q. If the interrupt was pending, we
1561 * force both P and Q, which will trigger a resend. 1562 * force Q and !P, which will trigger a resend.
1562 * 1563 *
1563 * That means that a guest that had both an interrupt 1564 * That means that a guest that had both an interrupt
1564 * pending (queued) and Q set will restore with only 1565 * pending (queued) and Q set will restore with only
@@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1566 * is perfectly fine as coalescing interrupts that haven't 1567 * is perfectly fine as coalescing interrupts that haven't
1567 * been presented yet is always allowed. 1568 * been presented yet is always allowed.
1568 */ 1569 */
1569 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) 1570 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1570 state->old_p = true; 1571 state->old_p = true;
1571 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) 1572 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1572 state->old_q = true; 1573 state->old_q = true;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1915e86cef6f..0a7c88786ec0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,6 +39,10 @@
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/switch_to.h> 40#include <asm/switch_to.h>
41#include <asm/xive.h> 41#include <asm/xive.h>
42#ifdef CONFIG_PPC_PSERIES
43#include <asm/hvcall.h>
44#include <asm/plpar_wrappers.h>
45#endif
42 46
43#include "timing.h" 47#include "timing.h"
44#include "irq.h" 48#include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
548#ifdef CONFIG_KVM_XICS 552#ifdef CONFIG_KVM_XICS
549 case KVM_CAP_IRQ_XICS: 553 case KVM_CAP_IRQ_XICS:
550#endif 554#endif
555 case KVM_CAP_PPC_GET_CPU_CHAR:
551 r = 1; 556 r = 1;
552 break; 557 break;
553 558
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1759 return r; 1764 return r;
1760} 1765}
1761 1766
1767#ifdef CONFIG_PPC_BOOK3S_64
1768/*
1769 * These functions check whether the underlying hardware is safe
1770 * against attacks based on observing the effects of speculatively
1771 * executed instructions, and whether it supplies instructions for
1772 * use in workarounds. The information comes from firmware, either
1773 * via the device tree on powernv platforms or from an hcall on
1774 * pseries platforms.
1775 */
1776#ifdef CONFIG_PPC_PSERIES
1777static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1778{
1779 struct h_cpu_char_result c;
1780 unsigned long rc;
1781
1782 if (!machine_is(pseries))
1783 return -ENOTTY;
1784
1785 rc = plpar_get_cpu_characteristics(&c);
1786 if (rc == H_SUCCESS) {
1787 cp->character = c.character;
1788 cp->behaviour = c.behaviour;
1789 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1790 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1791 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1792 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1793 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1794 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
1795 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
1796 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1797 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1798 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1799 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1800 }
1801 return 0;
1802}
1803#else
1804static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1805{
1806 return -ENOTTY;
1807}
1808#endif
1809
1810static inline bool have_fw_feat(struct device_node *fw_features,
1811 const char *state, const char *name)
1812{
1813 struct device_node *np;
1814 bool r = false;
1815
1816 np = of_get_child_by_name(fw_features, name);
1817 if (np) {
1818 r = of_property_read_bool(np, state);
1819 of_node_put(np);
1820 }
1821 return r;
1822}
1823
1824static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1825{
1826 struct device_node *np, *fw_features;
1827 int r;
1828
1829 memset(cp, 0, sizeof(*cp));
1830 r = pseries_get_cpu_char(cp);
1831 if (r != -ENOTTY)
1832 return r;
1833
1834 np = of_find_node_by_name(NULL, "ibm,opal");
1835 if (np) {
1836 fw_features = of_get_child_by_name(np, "fw-features");
1837 of_node_put(np);
1838 if (!fw_features)
1839 return 0;
1840 if (have_fw_feat(fw_features, "enabled",
1841 "inst-spec-barrier-ori31,31,0"))
1842 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
1843 if (have_fw_feat(fw_features, "enabled",
1844 "fw-bcctrl-serialized"))
1845 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
1846 if (have_fw_feat(fw_features, "enabled",
1847 "inst-l1d-flush-ori30,30,0"))
1848 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
1849 if (have_fw_feat(fw_features, "enabled",
1850 "inst-l1d-flush-trig2"))
1851 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
1852 if (have_fw_feat(fw_features, "enabled",
1853 "fw-l1d-thread-split"))
1854 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
1855 if (have_fw_feat(fw_features, "enabled",
1856 "fw-count-cache-disabled"))
1857 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1858 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1859 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1860 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1861 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1862 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1863 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1864
1865 if (have_fw_feat(fw_features, "enabled",
1866 "speculation-policy-favor-security"))
1867 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
1868 if (!have_fw_feat(fw_features, "disabled",
1869 "needs-l1d-flush-msr-pr-0-to-1"))
1870 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
1871 if (!have_fw_feat(fw_features, "disabled",
1872 "needs-spec-barrier-for-bound-checks"))
1873 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1874 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1875 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1876 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1877
1878 of_node_put(fw_features);
1879 }
1880
1881 return 0;
1882}
1883#endif
1884
1762long kvm_arch_vm_ioctl(struct file *filp, 1885long kvm_arch_vm_ioctl(struct file *filp,
1763 unsigned int ioctl, unsigned long arg) 1886 unsigned int ioctl, unsigned long arg)
1764{ 1887{
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1861 r = -EFAULT; 1984 r = -EFAULT;
1862 break; 1985 break;
1863 } 1986 }
1987 case KVM_PPC_GET_CPU_CHAR: {
1988 struct kvm_ppc_cpu_char cpuchar;
1989
1990 r = kvmppc_get_cpu_char(&cpuchar);
1991 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
1992 r = -EFAULT;
1993 break;
1994 }
1864 default: { 1995 default: {
1865 struct kvm *kvm = filp->private_data; 1996 struct kvm *kvm = filp->private_data;
1866 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1997 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 41cf5ae273cf..a95ea007d654 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
116 } 116 }
117} 117}
118 118
119#ifdef CONFIG_PPC_BOOK3S_64
120void do_rfi_flush_fixups(enum l1d_flush_type types)
121{
122 unsigned int instrs[3], *dest;
123 long *start, *end;
124 int i;
125
126 start = PTRRELOC(&__start___rfi_flush_fixup),
127 end = PTRRELOC(&__stop___rfi_flush_fixup);
128
129 instrs[0] = 0x60000000; /* nop */
130 instrs[1] = 0x60000000; /* nop */
131 instrs[2] = 0x60000000; /* nop */
132
133 if (types & L1D_FLUSH_FALLBACK)
134 /* b .+16 to fallback flush */
135 instrs[0] = 0x48000010;
136
137 i = 0;
138 if (types & L1D_FLUSH_ORI) {
139 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
140 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
141 }
142
143 if (types & L1D_FLUSH_MTTRIG)
144 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
145
146 for (i = 0; start < end; start++, i++) {
147 dest = (void *)start + *start;
148
149 pr_devel("patching dest %lx\n", (unsigned long)dest);
150
151 patch_instruction(dest, instrs[0]);
152 patch_instruction(dest + 1, instrs[1]);
153 patch_instruction(dest + 2, instrs[2]);
154 }
155
156 printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
157}
158#endif /* CONFIG_PPC_BOOK3S_64 */
159
119void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 160void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
120{ 161{
121 long *start, *end; 162 long *start, *end;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 4797d08581ce..6e1e39035380 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
145 return __bad_area(regs, address, SEGV_MAPERR); 145 return __bad_area(regs, address, SEGV_MAPERR);
146} 146}
147 147
148static noinline int bad_access(struct pt_regs *regs, unsigned long address)
149{
150 return __bad_area(regs, address, SEGV_ACCERR);
151}
152
148static int do_sigbus(struct pt_regs *regs, unsigned long address, 153static int do_sigbus(struct pt_regs *regs, unsigned long address,
149 unsigned int fault) 154 unsigned int fault)
150{ 155{
@@ -490,7 +495,7 @@ retry:
490 495
491good_area: 496good_area:
492 if (unlikely(access_error(is_write, is_exec, vma))) 497 if (unlikely(access_error(is_write, is_exec, vma)))
493 return bad_area(regs, address); 498 return bad_access(regs, address);
494 499
495 /* 500 /*
496 * If for any reason at all we couldn't handle the fault, 501 * If for any reason at all we couldn't handle the fault,
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 46d74e81aff1..d183b4801bdb 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -763,7 +763,8 @@ emit_clear:
763 func = (u8 *) __bpf_call_base + imm; 763 func = (u8 *) __bpf_call_base + imm;
764 764
765 /* Save skb pointer if we need to re-cache skb data */ 765 /* Save skb pointer if we need to re-cache skb data */
766 if (bpf_helper_changes_pkt_data(func)) 766 if ((ctx->seen & SEEN_SKB) &&
767 bpf_helper_changes_pkt_data(func))
767 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); 768 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
768 769
769 bpf_jit_emit_func_call(image, ctx, (u64)func); 770 bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
772 PPC_MR(b2p[BPF_REG_0], 3); 773 PPC_MR(b2p[BPF_REG_0], 3);
773 774
774 /* refresh skb cache */ 775 /* refresh skb cache */
775 if (bpf_helper_changes_pkt_data(func)) { 776 if ((ctx->seen & SEEN_SKB) &&
777 bpf_helper_changes_pkt_data(func)) {
776 /* reload skb pointer to r3 */ 778 /* reload skb pointer to r3 */
777 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); 779 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
778 bpf_jit_emit_skb_loads(image, ctx); 780 bpf_jit_emit_skb_loads(image, ctx);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 9e3da168d54c..fce545774d50 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
410 int ret; 410 int ret;
411 __u64 target; 411 __u64 target;
412 412
413 if (is_kernel_addr(addr)) 413 if (is_kernel_addr(addr)) {
414 return branch_target((unsigned int *)addr); 414 if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
415 return 0;
416
417 return branch_target(&instr);
418 }
415 419
416 /* Userspace: need copy instruction here then translate it */ 420 /* Userspace: need copy instruction here then translate it */
417 pagefault_disable(); 421 pagefault_disable();
@@ -1415,7 +1419,7 @@ static int collect_events(struct perf_event *group, int max_count,
1415 int n = 0; 1419 int n = 0;
1416 struct perf_event *event; 1420 struct perf_event *event;
1417 1421
1418 if (!is_software_event(group)) { 1422 if (group->pmu->task_ctx_nr == perf_hw_context) {
1419 if (n >= max_count) 1423 if (n >= max_count)
1420 return -1; 1424 return -1;
1421 ctrs[n] = group; 1425 ctrs[n] = group;
@@ -1423,7 +1427,7 @@ static int collect_events(struct perf_event *group, int max_count,
1423 events[n++] = group->hw.config; 1427 events[n++] = group->hw.config;
1424 } 1428 }
1425 list_for_each_entry(event, &group->sibling_list, group_entry) { 1429 list_for_each_entry(event, &group->sibling_list, group_entry) {
1426 if (!is_software_event(event) && 1430 if (event->pmu->task_ctx_nr == perf_hw_context &&
1427 event->state != PERF_EVENT_STATE_OFF) { 1431 event->state != PERF_EVENT_STATE_OFF) {
1428 if (n >= max_count) 1432 if (n >= max_count)
1429 return -1; 1433 return -1;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 0ead3cd73caa..be4e7f84f70a 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -310,6 +310,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
310 return 0; 310 return 0;
311 311
312 /* 312 /*
313 * Check whether nest_imc is registered. We could end up here if the
314 * cpuhotplug callback registration fails. i.e, callback invokes the
315 * offline path for all successfully registered nodes. At this stage,
316 * nest_imc pmu will not be registered and we should return here.
317 *
318 * We return with a zero since this is not an offline failure. And
319 * cpuhp_setup_state() returns the actual failure reason to the caller,
320 * which in turn will call the cleanup routine.
321 */
322 if (!nest_pmus)
323 return 0;
324
325 /*
313 * Now that this cpu is one of the designated, 326 * Now that this cpu is one of the designated,
314 * find a next cpu a) which is online and b) in same chip. 327 * find a next cpu a) which is online and b) in same chip.
315 */ 328 */
@@ -1171,6 +1184,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1171 if (nest_pmus == 1) { 1184 if (nest_pmus == 1) {
1172 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1185 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
1173 kfree(nest_imc_refc); 1186 kfree(nest_imc_refc);
1187 kfree(per_nest_pmu_arr);
1174 } 1188 }
1175 1189
1176 if (nest_pmus > 0) 1190 if (nest_pmus > 0)
@@ -1195,7 +1209,6 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1195 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1209 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1196 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1210 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1197 kfree(pmu_ptr); 1211 kfree(pmu_ptr);
1198 kfree(per_nest_pmu_arr);
1199 return; 1212 return;
1200} 1213}
1201 1214
@@ -1309,6 +1322,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
1309 ret = nest_pmu_cpumask_init(); 1322 ret = nest_pmu_cpumask_init();
1310 if (ret) { 1323 if (ret) {
1311 mutex_unlock(&nest_init_lock); 1324 mutex_unlock(&nest_init_lock);
1325 kfree(nest_imc_refc);
1326 kfree(per_nest_pmu_arr);
1312 goto err_free; 1327 goto err_free;
1313 } 1328 }
1314 } 1329 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 1edfbc1e40f4..4fb21e17504a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -37,13 +37,62 @@
37#include <asm/kexec.h> 37#include <asm/kexec.h>
38#include <asm/smp.h> 38#include <asm/smp.h>
39#include <asm/tm.h> 39#include <asm/tm.h>
40#include <asm/setup.h>
40 41
41#include "powernv.h" 42#include "powernv.h"
42 43
44static void pnv_setup_rfi_flush(void)
45{
46 struct device_node *np, *fw_features;
47 enum l1d_flush_type type;
48 int enable;
49
50 /* Default to fallback in case fw-features are not available */
51 type = L1D_FLUSH_FALLBACK;
52 enable = 1;
53
54 np = of_find_node_by_name(NULL, "ibm,opal");
55 fw_features = of_get_child_by_name(np, "fw-features");
56 of_node_put(np);
57
58 if (fw_features) {
59 np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
60 if (np && of_property_read_bool(np, "enabled"))
61 type = L1D_FLUSH_MTTRIG;
62
63 of_node_put(np);
64
65 np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
66 if (np && of_property_read_bool(np, "enabled"))
67 type = L1D_FLUSH_ORI;
68
69 of_node_put(np);
70
71 /* Enable unless firmware says NOT to */
72 enable = 2;
73 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
74 if (np && of_property_read_bool(np, "disabled"))
75 enable--;
76
77 of_node_put(np);
78
79 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
80 if (np && of_property_read_bool(np, "disabled"))
81 enable--;
82
83 of_node_put(np);
84 of_node_put(fw_features);
85 }
86
87 setup_rfi_flush(type, enable > 0);
88}
89
43static void __init pnv_setup_arch(void) 90static void __init pnv_setup_arch(void)
44{ 91{
45 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 92 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
46 93
94 pnv_setup_rfi_flush();
95
47 /* Initialize SMP */ 96 /* Initialize SMP */
48 pnv_smp_init(); 97 pnv_smp_init();
49 98
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 9dabea6e1443..6244bc849469 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void)
104 ps3_sys_manager_halt(); /* never returns */ 104 ps3_sys_manager_halt(); /* never returns */
105} 105}
106 106
107static void ps3_panic(char *str)
108{
109 DBG("%s:%d %s\n", __func__, __LINE__, str);
110
111 smp_send_stop();
112 printk("\n");
113 printk(" System does not reboot automatically.\n");
114 printk(" Please press POWER button.\n");
115 printk("\n");
116
117 while(1)
118 lv1_pause(1);
119}
120
107#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ 121#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
108 defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) 122 defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
109static void __init prealloc(struct ps3_prealloc *p) 123static void __init prealloc(struct ps3_prealloc *p)
@@ -255,6 +269,7 @@ define_machine(ps3) {
255 .probe = ps3_probe, 269 .probe = ps3_probe,
256 .setup_arch = ps3_setup_arch, 270 .setup_arch = ps3_setup_arch,
257 .init_IRQ = ps3_init_IRQ, 271 .init_IRQ = ps3_init_IRQ,
272 .panic = ps3_panic,
258 .get_boot_time = ps3_get_boot_time, 273 .get_boot_time = ps3_get_boot_time,
259 .set_dabr = ps3_set_dabr, 274 .set_dabr = ps3_set_dabr,
260 .calibrate_decr = ps3_calibrate_decr, 275 .calibrate_decr = ps3_calibrate_decr,
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6e35780c5962..a0b20c03f078 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
574 574
575static CLASS_ATTR_RW(dlpar); 575static CLASS_ATTR_RW(dlpar);
576 576
577static int __init pseries_dlpar_init(void) 577int __init dlpar_workqueue_init(void)
578{ 578{
579 if (pseries_hp_wq)
580 return 0;
581
579 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 582 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
580 WQ_UNBOUND, 1); 583 WQ_UNBOUND, 1);
584
585 return pseries_hp_wq ? 0 : -ENOMEM;
586}
587
588static int __init dlpar_sysfs_init(void)
589{
590 int rc;
591
592 rc = dlpar_workqueue_init();
593 if (rc)
594 return rc;
595
581 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 596 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
582} 597}
583machine_device_initcall(pseries, pseries_dlpar_init); 598machine_device_initcall(pseries, dlpar_sysfs_init);
584 599
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4470a3194311..1ae1d9f4dbe9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
98 return CMO_PageSize; 98 return CMO_PageSize;
99} 99}
100 100
101int dlpar_workqueue_init(void);
102
101#endif /* _PSERIES_PSERIES_H */ 103#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 4923ffe230cf..81d8614e7379 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
69 /* Hotplug Events */ 69 /* Hotplug Events */
70 np = of_find_node_by_path("/event-sources/hot-plug-events"); 70 np = of_find_node_by_path("/event-sources/hot-plug-events");
71 if (np != NULL) { 71 if (np != NULL) {
72 request_event_sources_irqs(np, ras_hotplug_interrupt, 72 if (dlpar_workqueue_init() == 0)
73 request_event_sources_irqs(np, ras_hotplug_interrupt,
73 "RAS_HOTPLUG"); 74 "RAS_HOTPLUG");
74 of_node_put(np); 75 of_node_put(np);
75 } 76 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5f1beb8367ac..ae4f596273b5 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
459 of_pci_check_probe_only(); 459 of_pci_check_probe_only();
460} 460}
461 461
462static void pseries_setup_rfi_flush(void)
463{
464 struct h_cpu_char_result result;
465 enum l1d_flush_type types;
466 bool enable;
467 long rc;
468
469 /* Enable by default */
470 enable = true;
471
472 rc = plpar_get_cpu_characteristics(&result);
473 if (rc == H_SUCCESS) {
474 types = L1D_FLUSH_NONE;
475
476 if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
477 types |= L1D_FLUSH_MTTRIG;
478 if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
479 types |= L1D_FLUSH_ORI;
480
481 /* Use fallback if nothing set in hcall */
482 if (types == L1D_FLUSH_NONE)
483 types = L1D_FLUSH_FALLBACK;
484
485 if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
486 enable = false;
487 } else {
488 /* Default to fallback if case hcall is not available */
489 types = L1D_FLUSH_FALLBACK;
490 }
491
492 setup_rfi_flush(types, enable);
493}
494
462static void __init pSeries_setup_arch(void) 495static void __init pSeries_setup_arch(void)
463{ 496{
464 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 497 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
476 509
477 fwnmi_init(); 510 fwnmi_init();
478 511
512 pseries_setup_rfi_flush();
513
479 /* By default, only probe PCI (can be overridden by rtas_pci) */ 514 /* By default, only probe PCI (can be overridden by rtas_pci) */
480 pci_add_flags(PCI_PROBE_ONLY); 515 pci_add_flags(PCI_PROBE_ONLY);
481 516
@@ -726,6 +761,7 @@ define_machine(pseries) {
726 .pcibios_fixup = pSeries_final_fixup, 761 .pcibios_fixup = pSeries_final_fixup,
727 .restart = rtas_restart, 762 .restart = rtas_restart,
728 .halt = rtas_halt, 763 .halt = rtas_halt,
764 .panic = rtas_os_term,
729 .get_boot_time = rtas_get_boot_time, 765 .get_boot_time = rtas_get_boot_time,
730 .get_rtc_time = rtas_get_rtc_time, 766 .get_rtc_time = rtas_get_rtc_time,
731 .set_rtc_time = rtas_set_rtc_time, 767 .set_rtc_time = rtas_set_rtc_time,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 44cbf4c12ea1..df95102e732c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -354,6 +354,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
354} 354}
355 355
356static struct lock_class_key fsl_msi_irq_class; 356static struct lock_class_key fsl_msi_irq_class;
357static struct lock_class_key fsl_msi_irq_request_class;
357 358
358static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 359static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
359 int offset, int irq_index) 360 int offset, int irq_index)
@@ -373,7 +374,8 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
373 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 374 dev_err(&dev->dev, "No memory for MSI cascade data\n");
374 return -ENOMEM; 375 return -ENOMEM;
375 } 376 }
376 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 377 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
378 &fsl_msi_irq_request_class);
377 cascade_data->index = offset; 379 cascade_data->index = offset;
378 cascade_data->msi_data = msi; 380 cascade_data->msi_data = msi;
379 cascade_data->virq = virt_msir; 381 cascade_data->virq = virt_msir;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1b2d8cb49abb..0ddc7ac6c5f1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1590,7 +1590,7 @@ static void print_bug_trap(struct pt_regs *regs)
1590 printf("kernel BUG at %s:%u!\n", 1590 printf("kernel BUG at %s:%u!\n",
1591 bug->file, bug->line); 1591 bug->file, bug->line);
1592#else 1592#else
1593 printf("kernel BUG at %p!\n", (void *)bug->bug_addr); 1593 printf("kernel BUG at %px!\n", (void *)bug->bug_addr);
1594#endif 1594#endif
1595#endif /* CONFIG_BUG */ 1595#endif /* CONFIG_BUG */
1596} 1596}
@@ -2329,7 +2329,7 @@ static void dump_one_paca(int cpu)
2329 2329
2330 p = &paca[cpu]; 2330 p = &paca[cpu];
2331 2331
2332 printf("paca for cpu 0x%x @ %p:\n", cpu, p); 2332 printf("paca for cpu 0x%x @ %px:\n", cpu, p);
2333 2333
2334 printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no"); 2334 printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no");
2335 printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no"); 2335 printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no");
@@ -2344,10 +2344,10 @@ static void dump_one_paca(int cpu)
2344 DUMP(p, kernel_toc, "lx"); 2344 DUMP(p, kernel_toc, "lx");
2345 DUMP(p, kernelbase, "lx"); 2345 DUMP(p, kernelbase, "lx");
2346 DUMP(p, kernel_msr, "lx"); 2346 DUMP(p, kernel_msr, "lx");
2347 DUMP(p, emergency_sp, "p"); 2347 DUMP(p, emergency_sp, "px");
2348#ifdef CONFIG_PPC_BOOK3S_64 2348#ifdef CONFIG_PPC_BOOK3S_64
2349 DUMP(p, nmi_emergency_sp, "p"); 2349 DUMP(p, nmi_emergency_sp, "px");
2350 DUMP(p, mc_emergency_sp, "p"); 2350 DUMP(p, mc_emergency_sp, "px");
2351 DUMP(p, in_nmi, "x"); 2351 DUMP(p, in_nmi, "x");
2352 DUMP(p, in_mce, "x"); 2352 DUMP(p, in_mce, "x");
2353 DUMP(p, hmi_event_available, "x"); 2353 DUMP(p, hmi_event_available, "x");
@@ -2375,17 +2375,21 @@ static void dump_one_paca(int cpu)
2375 DUMP(p, slb_cache_ptr, "x"); 2375 DUMP(p, slb_cache_ptr, "x");
2376 for (i = 0; i < SLB_CACHE_ENTRIES; i++) 2376 for (i = 0; i < SLB_CACHE_ENTRIES; i++)
2377 printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); 2377 printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]);
2378
2379 DUMP(p, rfi_flush_fallback_area, "px");
2380 DUMP(p, l1d_flush_congruence, "llx");
2381 DUMP(p, l1d_flush_sets, "llx");
2378#endif 2382#endif
2379 DUMP(p, dscr_default, "llx"); 2383 DUMP(p, dscr_default, "llx");
2380#ifdef CONFIG_PPC_BOOK3E 2384#ifdef CONFIG_PPC_BOOK3E
2381 DUMP(p, pgd, "p"); 2385 DUMP(p, pgd, "px");
2382 DUMP(p, kernel_pgd, "p"); 2386 DUMP(p, kernel_pgd, "px");
2383 DUMP(p, tcd_ptr, "p"); 2387 DUMP(p, tcd_ptr, "px");
2384 DUMP(p, mc_kstack, "p"); 2388 DUMP(p, mc_kstack, "px");
2385 DUMP(p, crit_kstack, "p"); 2389 DUMP(p, crit_kstack, "px");
2386 DUMP(p, dbg_kstack, "p"); 2390 DUMP(p, dbg_kstack, "px");
2387#endif 2391#endif
2388 DUMP(p, __current, "p"); 2392 DUMP(p, __current, "px");
2389 DUMP(p, kstack, "lx"); 2393 DUMP(p, kstack, "lx");
2390 printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1)); 2394 printf(" kstack_base = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1));
2391 DUMP(p, stab_rr, "lx"); 2395 DUMP(p, stab_rr, "lx");
@@ -2403,7 +2407,7 @@ static void dump_one_paca(int cpu)
2403#endif 2407#endif
2404 2408
2405#ifdef CONFIG_PPC_POWERNV 2409#ifdef CONFIG_PPC_POWERNV
2406 DUMP(p, core_idle_state_ptr, "p"); 2410 DUMP(p, core_idle_state_ptr, "px");
2407 DUMP(p, thread_idle_state, "x"); 2411 DUMP(p, thread_idle_state, "x");
2408 DUMP(p, thread_mask, "x"); 2412 DUMP(p, thread_mask, "x");
2409 DUMP(p, subcore_sibling_mask, "x"); 2413 DUMP(p, subcore_sibling_mask, "x");
@@ -2945,7 +2949,7 @@ static void show_task(struct task_struct *tsk)
2945 (tsk->exit_state & EXIT_DEAD) ? 'E' : 2949 (tsk->exit_state & EXIT_DEAD) ? 'E' :
2946 (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; 2950 (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
2947 2951
2948 printf("%p %016lx %6d %6d %c %2d %s\n", tsk, 2952 printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
2949 tsk->thread.ksp, 2953 tsk->thread.ksp,
2950 tsk->pid, tsk->parent->pid, 2954 tsk->pid, tsk->parent->pid,
2951 state, task_thread_info(tsk)->cpu, 2955 state, task_thread_info(tsk)->cpu,
@@ -2988,7 +2992,7 @@ static void show_pte(unsigned long addr)
2988 2992
2989 if (setjmp(bus_error_jmp) != 0) { 2993 if (setjmp(bus_error_jmp) != 0) {
2990 catch_memory_errors = 0; 2994 catch_memory_errors = 0;
2991 printf("*** Error dumping pte for task %p\n", tsk); 2995 printf("*** Error dumping pte for task %px\n", tsk);
2992 return; 2996 return;
2993 } 2997 }
2994 2998
@@ -3074,7 +3078,7 @@ static void show_tasks(void)
3074 3078
3075 if (setjmp(bus_error_jmp) != 0) { 3079 if (setjmp(bus_error_jmp) != 0) {
3076 catch_memory_errors = 0; 3080 catch_memory_errors = 0;
3077 printf("*** Error dumping task %p\n", tsk); 3081 printf("*** Error dumping task %px\n", tsk);
3078 return; 3082 return;
3079 } 3083 }
3080 3084
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e69de29bb2d1..47dacf06c679 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -0,0 +1,75 @@
1CONFIG_SMP=y
2CONFIG_PCI=y
3CONFIG_PCIE_XILINX=y
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_CGROUPS=y
9CONFIG_CGROUP_SCHED=y
10CONFIG_CFS_BANDWIDTH=y
11CONFIG_CGROUP_BPF=y
12CONFIG_NAMESPACES=y
13CONFIG_USER_NS=y
14CONFIG_BLK_DEV_INITRD=y
15CONFIG_EXPERT=y
16CONFIG_CHECKPOINT_RESTORE=y
17CONFIG_BPF_SYSCALL=y
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_UNIX=y
21CONFIG_INET=y
22CONFIG_IP_MULTICAST=y
23CONFIG_IP_ADVANCED_ROUTER=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27CONFIG_IP_PNP_RARP=y
28CONFIG_NETLINK_DIAG=y
29CONFIG_DEVTMPFS=y
30CONFIG_BLK_DEV_LOOP=y
31CONFIG_VIRTIO_BLK=y
32CONFIG_BLK_DEV_SD=y
33CONFIG_BLK_DEV_SR=y
34CONFIG_ATA=y
35CONFIG_SATA_AHCI=y
36CONFIG_SATA_AHCI_PLATFORM=y
37CONFIG_NETDEVICES=y
38CONFIG_VIRTIO_NET=y
39CONFIG_MACB=y
40CONFIG_E1000E=y
41CONFIG_R8169=y
42CONFIG_MICROSEMI_PHY=y
43CONFIG_INPUT_MOUSEDEV=y
44CONFIG_SERIAL_8250=y
45CONFIG_SERIAL_8250_CONSOLE=y
46CONFIG_SERIAL_OF_PLATFORM=y
47# CONFIG_PTP_1588_CLOCK is not set
48CONFIG_DRM=y
49CONFIG_DRM_RADEON=y
50CONFIG_FRAMEBUFFER_CONSOLE=y
51CONFIG_USB=y
52CONFIG_USB_XHCI_HCD=y
53CONFIG_USB_XHCI_PLATFORM=y
54CONFIG_USB_EHCI_HCD=y
55CONFIG_USB_EHCI_HCD_PLATFORM=y
56CONFIG_USB_OHCI_HCD=y
57CONFIG_USB_OHCI_HCD_PLATFORM=y
58CONFIG_USB_STORAGE=y
59CONFIG_USB_UAS=y
60CONFIG_VIRTIO_MMIO=y
61CONFIG_RAS=y
62CONFIG_EXT4_FS=y
63CONFIG_EXT4_FS_POSIX_ACL=y
64CONFIG_AUTOFS4_FS=y
65CONFIG_MSDOS_FS=y
66CONFIG_VFAT_FS=y
67CONFIG_TMPFS=y
68CONFIG_TMPFS_POSIX_ACL=y
69CONFIG_NFS_FS=y
70CONFIG_NFS_V4=y
71CONFIG_NFS_V4_1=y
72CONFIG_NFS_V4_2=y
73CONFIG_ROOT_NFS=y
74# CONFIG_RCU_TRACE is not set
75CONFIG_CRYPTO_USER_API_HASH=y
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 773c4e039cd7..c0319cbf1eec 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -38,6 +38,25 @@
38#define smp_rmb() RISCV_FENCE(r,r) 38#define smp_rmb() RISCV_FENCE(r,r)
39#define smp_wmb() RISCV_FENCE(w,w) 39#define smp_wmb() RISCV_FENCE(w,w)
40 40
41/*
42 * This is a very specific barrier: it's currently only used in two places in
43 * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
44 * orderings it guarantees, but the "critical section is RCsc" guarantee
45 * mandates a barrier on RISC-V. The sequence looks like:
46 *
47 * lr.aq lock
48 * sc lock <= LOCKED
49 * smp_mb__after_spinlock()
50 * // critical section
51 * lr lock
52 * sc.rl lock <= UNLOCKED
53 *
54 * The AQ/RL pair provides a RCpc critical section, but there's not really any
55 * way we can take advantage of that here because the ordering is only enforced
56 * on that one lock. Thus, we're just doing a full fence.
57 */
58#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
59
41#include <asm-generic/barrier.h> 60#include <asm-generic/barrier.h>
42 61
43#endif /* __ASSEMBLY__ */ 62#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0d64bc9f4f91..3c7a2c97e377 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -17,10 +17,10 @@
17#include <linux/const.h> 17#include <linux/const.h>
18 18
19/* Status register flags */ 19/* Status register flags */
20#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */ 20#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
21#define SR_PIE _AC(0x00000020, UL) /* Previous IE */ 21#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
22#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */ 22#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */ 23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
24 24
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ 25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL) 26#define SR_FS_OFF _AC(0x00000000, UL)
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index a82ce599b639..b269451e7e85 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -21,8 +21,6 @@
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24#ifdef CONFIG_MMU
25
26extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); 24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
27 25
28/* 26/*
@@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
36 34
37extern void iounmap(volatile void __iomem *addr); 35extern void iounmap(volatile void __iomem *addr);
38 36
39#endif /* CONFIG_MMU */
40
41/* Generic IO read/write. These perform native-endian accesses. */ 37/* Generic IO read/write. These perform native-endian accesses. */
42#define __raw_writeb __raw_writeb 38#define __raw_writeb __raw_writeb
43static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 39static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 6fdc860d7f84..07a3c6d5706f 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
27/* unconditionally enable interrupts */ 27/* unconditionally enable interrupts */
28static inline void arch_local_irq_enable(void) 28static inline void arch_local_irq_enable(void)
29{ 29{
30 csr_set(sstatus, SR_IE); 30 csr_set(sstatus, SR_SIE);
31} 31}
32 32
33/* unconditionally disable interrupts */ 33/* unconditionally disable interrupts */
34static inline void arch_local_irq_disable(void) 34static inline void arch_local_irq_disable(void)
35{ 35{
36 csr_clear(sstatus, SR_IE); 36 csr_clear(sstatus, SR_SIE);
37} 37}
38 38
39/* get status and disable interrupts */ 39/* get status and disable interrupts */
40static inline unsigned long arch_local_irq_save(void) 40static inline unsigned long arch_local_irq_save(void)
41{ 41{
42 return csr_read_clear(sstatus, SR_IE); 42 return csr_read_clear(sstatus, SR_SIE);
43} 43}
44 44
45/* test flags */ 45/* test flags */
46static inline int arch_irqs_disabled_flags(unsigned long flags) 46static inline int arch_irqs_disabled_flags(unsigned long flags)
47{ 47{
48 return !(flags & SR_IE); 48 return !(flags & SR_SIE);
49} 49}
50 50
51/* test hardware interrupt enable bit */ 51/* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
57/* set interrupt enabled status */ 57/* set interrupt enabled status */
58static inline void arch_local_irq_restore(unsigned long flags) 58static inline void arch_local_irq_restore(unsigned long flags)
59{ 59{
60 csr_set(sstatus, flags & SR_IE); 60 csr_set(sstatus, flags & SR_SIE);
61} 61}
62 62
63#endif /* _ASM_RISCV_IRQFLAGS_H */ 63#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2cbd92ed1629..16301966d65b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -20,8 +20,6 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#ifdef CONFIG_MMU
24
25/* Page Upper Directory not used in RISC-V */ 23/* Page Upper Directory not used in RISC-V */
26#include <asm-generic/pgtable-nopud.h> 24#include <asm-generic/pgtable-nopud.h>
27#include <asm/page.h> 25#include <asm/page.h>
@@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
413 /* No page table caches to initialize */ 411 /* No page table caches to initialize */
414} 412}
415 413
416#endif /* CONFIG_MMU */
417
418#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 414#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
419#define VMALLOC_END (PAGE_OFFSET - 1) 415#define VMALLOC_END (PAGE_OFFSET - 1)
420#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 416#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 93b8956e25e4..2c5df945d43c 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -66,7 +66,7 @@ struct pt_regs {
66#define REG_FMT "%08lx" 66#define REG_FMT "%08lx"
67#endif 67#endif
68 68
69#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0) 69#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
70 70
71 71
72/* Helpers for working with the instruction pointer */ 72/* Helpers for working with the instruction pointer */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 715b0f10af58..7b9c24ebdf52 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,8 +15,6 @@
15#ifndef _ASM_RISCV_TLBFLUSH_H 15#ifndef _ASM_RISCV_TLBFLUSH_H
16#define _ASM_RISCV_TLBFLUSH_H 16#define _ASM_RISCV_TLBFLUSH_H
17 17
18#ifdef CONFIG_MMU
19
20#include <linux/mm_types.h> 18#include <linux/mm_types.h>
21 19
22/* 20/*
@@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
64 flush_tlb_all(); 62 flush_tlb_all();
65} 63}
66 64
67#endif /* CONFIG_MMU */
68
69#endif /* _ASM_RISCV_TLBFLUSH_H */ 65#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 27b90d64814b..14b0b22fb578 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
127 * call. 127 * call.
128 */ 128 */
129 129
130#ifdef CONFIG_MMU
131#define __get_user_asm(insn, x, ptr, err) \ 130#define __get_user_asm(insn, x, ptr, err) \
132do { \ 131do { \
133 uintptr_t __tmp; \ 132 uintptr_t __tmp; \
@@ -153,13 +152,11 @@ do { \
153 __disable_user_access(); \ 152 __disable_user_access(); \
154 (x) = __x; \ 153 (x) = __x; \
155} while (0) 154} while (0)
156#endif /* CONFIG_MMU */
157 155
158#ifdef CONFIG_64BIT 156#ifdef CONFIG_64BIT
159#define __get_user_8(x, ptr, err) \ 157#define __get_user_8(x, ptr, err) \
160 __get_user_asm("ld", x, ptr, err) 158 __get_user_asm("ld", x, ptr, err)
161#else /* !CONFIG_64BIT */ 159#else /* !CONFIG_64BIT */
162#ifdef CONFIG_MMU
163#define __get_user_8(x, ptr, err) \ 160#define __get_user_8(x, ptr, err) \
164do { \ 161do { \
165 u32 __user *__ptr = (u32 __user *)(ptr); \ 162 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -193,7 +190,6 @@ do { \
193 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ 190 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
194 (((u64)__hi << 32) | __lo))); \ 191 (((u64)__hi << 32) | __lo))); \
195} while (0) 192} while (0)
196#endif /* CONFIG_MMU */
197#endif /* CONFIG_64BIT */ 193#endif /* CONFIG_64BIT */
198 194
199 195
@@ -267,8 +263,6 @@ do { \
267 ((x) = 0, -EFAULT); \ 263 ((x) = 0, -EFAULT); \
268}) 264})
269 265
270
271#ifdef CONFIG_MMU
272#define __put_user_asm(insn, x, ptr, err) \ 266#define __put_user_asm(insn, x, ptr, err) \
273do { \ 267do { \
274 uintptr_t __tmp; \ 268 uintptr_t __tmp; \
@@ -292,14 +286,11 @@ do { \
292 : "rJ" (__x), "i" (-EFAULT)); \ 286 : "rJ" (__x), "i" (-EFAULT)); \
293 __disable_user_access(); \ 287 __disable_user_access(); \
294} while (0) 288} while (0)
295#endif /* CONFIG_MMU */
296
297 289
298#ifdef CONFIG_64BIT 290#ifdef CONFIG_64BIT
299#define __put_user_8(x, ptr, err) \ 291#define __put_user_8(x, ptr, err) \
300 __put_user_asm("sd", x, ptr, err) 292 __put_user_asm("sd", x, ptr, err)
301#else /* !CONFIG_64BIT */ 293#else /* !CONFIG_64BIT */
302#ifdef CONFIG_MMU
303#define __put_user_8(x, ptr, err) \ 294#define __put_user_8(x, ptr, err) \
304do { \ 295do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \ 296 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -329,7 +320,6 @@ do { \
329 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \ 320 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
330 __disable_user_access(); \ 321 __disable_user_access(); \
331} while (0) 322} while (0)
332#endif /* CONFIG_MMU */
333#endif /* CONFIG_64BIT */ 323#endif /* CONFIG_64BIT */
334 324
335 325
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
438 * will set "err" to -EFAULT, while successful accesses return the previous 428 * will set "err" to -EFAULT, while successful accesses return the previous
439 * value. 429 * value.
440 */ 430 */
441#ifdef CONFIG_MMU
442#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \ 431#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
443({ \ 432({ \
444 __typeof__(ptr) __ptr = (ptr); \ 433 __typeof__(ptr) __ptr = (ptr); \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
508 (err) = __err; \ 497 (err) = __err; \
509 __ret; \ 498 __ret; \
510}) 499})
511#endif /* CONFIG_MMU */
512 500
513#endif /* _ASM_RISCV_UACCESS_H */ 501#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 9f250ed007cd..2f704a5c4196 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -14,3 +14,4 @@
14#define __ARCH_HAVE_MMU 14#define __ARCH_HAVE_MMU
15#define __ARCH_WANT_SYS_CLONE 15#define __ARCH_WANT_SYS_CLONE
16#include <uapi/asm/unistd.h> 16#include <uapi/asm/unistd.h>
17#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
deleted file mode 100644
index a2ccf1894929..000000000000
--- a/arch/riscv/include/asm/vdso-syscalls.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) 2017 SiFive
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
18#define _ASM_RISCV_VDSO_SYSCALLS_H
19
20#ifdef CONFIG_SMP
21
22/* These syscalls are only used by the vDSO and are not in the uapi. */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
27
28#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
index 5ded96b06352..7e91f4850475 100644
--- a/arch/riscv/include/uapi/asm/Kbuild
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += setup.h 4generic-y += setup.h
5generic-y += unistd.h 5generic-y += unistd.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644
index 000000000000..818655b0d535
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/syscalls.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017 SiFive
4 */
5
6#ifndef _ASM__UAPI__SYSCALLS_H
7#define _ASM__UAPI__SYSCALLS_H
8
9/*
10 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
11 * having a direct 'fence.i' instruction available to userspace (which we
12 * can't trap!), that's not actually viable when running on Linux because the
13 * kernel might schedule a process on another hart. There is no way for
14 * userspace to handle this without invoking the kernel (as it doesn't know the
15 * thread->hart mappings), so we've defined a RISC-V specific system call to
16 * flush the instruction cache.
17 *
18 * __NR_riscv_flush_icache is defined to flush the instruction cache over an
19 * address range, with the flush applying to either all threads or just the
20 * caller. We don't currently do anything with the address range, that's just
21 * in there for forwards compatibility.
22 */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 20ee86f782a9..7404ec222406 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -196,7 +196,7 @@ handle_syscall:
196 addi s2, s2, 0x4 196 addi s2, s2, 0x4
197 REG_S s2, PT_SEPC(sp) 197 REG_S s2, PT_SEPC(sp)
198 /* System calls run with interrupts enabled */ 198 /* System calls run with interrupts enabled */
199 csrs sstatus, SR_IE 199 csrs sstatus, SR_SIE
200 /* Trace syscalls, but only if requested by the user. */ 200 /* Trace syscalls, but only if requested by the user. */
201 REG_L t0, TASK_TI_FLAGS(tp) 201 REG_L t0, TASK_TI_FLAGS(tp)
202 andi t0, t0, _TIF_SYSCALL_TRACE 202 andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
224 224
225ret_from_exception: 225ret_from_exception:
226 REG_L s0, PT_SSTATUS(sp) 226 REG_L s0, PT_SSTATUS(sp)
227 csrc sstatus, SR_IE 227 csrc sstatus, SR_SIE
228 andi s0, s0, SR_PS 228 andi s0, s0, SR_SPP
229 bnez s0, restore_all 229 bnez s0, restore_all
230 230
231resume_userspace: 231resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
255 bnez s1, work_resched 255 bnez s1, work_resched
256work_notifysig: 256work_notifysig:
257 /* Handle pending signals and notify-resume requests */ 257 /* Handle pending signals and notify-resume requests */
258 csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */ 258 csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
259 move a0, sp /* pt_regs */ 259 move a0, sp /* pt_regs */
260 move a1, s0 /* current_thread_info->flags */ 260 move a1, s0 /* current_thread_info->flags */
261 tail do_notify_resume 261 tail do_notify_resume
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 0d90dcc1fbd3..d74d4adf2d54 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
76void start_thread(struct pt_regs *regs, unsigned long pc, 76void start_thread(struct pt_regs *regs, unsigned long pc,
77 unsigned long sp) 77 unsigned long sp)
78{ 78{
79 regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL; 79 regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
80 regs->sepc = pc; 80 regs->sepc = pc;
81 regs->sp = sp; 81 regs->sp = sp;
82 set_fs(USER_DS); 82 set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
110 const register unsigned long gp __asm__ ("gp"); 110 const register unsigned long gp __asm__ ("gp");
111 memset(childregs, 0, sizeof(struct pt_regs)); 111 memset(childregs, 0, sizeof(struct pt_regs));
112 childregs->gp = gp; 112 childregs->gp = gp;
113 childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */ 113 childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
114 114
115 p->thread.ra = (unsigned long)ret_from_kernel_thread; 115 p->thread.ra = (unsigned long)ret_from_kernel_thread;
116 p->thread.s[0] = usp; /* fn */ 116 p->thread.s[0] = usp; /* fn */
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 8fbb6749910d..cb7b0c63014e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -38,10 +38,6 @@
38#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
39#include <asm/thread_info.h> 39#include <asm/thread_info.h>
40 40
41#ifdef CONFIG_HVC_RISCV_SBI
42#include <asm/hvc_riscv_sbi.h>
43#endif
44
45#ifdef CONFIG_DUMMY_CONSOLE 41#ifdef CONFIG_DUMMY_CONSOLE
46struct screen_info screen_info = { 42struct screen_info screen_info = {
47 .orig_video_lines = 30, 43 .orig_video_lines = 30,
@@ -212,13 +208,6 @@ static void __init setup_bootmem(void)
212 208
213void __init setup_arch(char **cmdline_p) 209void __init setup_arch(char **cmdline_p)
214{ 210{
215#if defined(CONFIG_HVC_RISCV_SBI)
216 if (likely(early_console == NULL)) {
217 early_console = &riscv_sbi_early_console_dev;
218 register_console(early_console);
219 }
220#endif
221
222#ifdef CONFIG_CMDLINE_BOOL 211#ifdef CONFIG_CMDLINE_BOOL
223#ifdef CONFIG_CMDLINE_OVERRIDE 212#ifdef CONFIG_CMDLINE_OVERRIDE
224 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 213 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index a2ae936a093e..79c78668258e 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -70,7 +70,7 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
70 bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; 70 bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
71 71
72 /* Check the reserved flags. */ 72 /* Check the reserved flags. */
73 if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL)) 73 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 flush_icache_mm(mm, local); 76 flush_icache_mm(mm, local);
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a5bd6401f95e..ade52b903a43 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -23,5 +23,4 @@
23void *sys_call_table[__NR_syscalls] = { 23void *sys_call_table[__NR_syscalls] = {
24 [0 ... __NR_syscalls - 1] = sys_ni_syscall, 24 [0 ... __NR_syscalls - 1] = sys_ni_syscall,
25#include <asm/unistd.h> 25#include <asm/unistd.h>
26#include <asm/vdso-syscalls.h>
27}; 26};
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index b0fbad74e873..023e4d4aef58 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -13,7 +13,6 @@
13 13
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
16#include <asm/vdso-syscalls.h>
17 16
18 .text 17 .text
19/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ 18/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index df2ca3c65048..0713f3c67ab4 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
63 goto vmalloc_fault; 63 goto vmalloc_fault;
64 64
65 /* Enable interrupts if they were enabled in the parent context. */ 65 /* Enable interrupts if they were enabled in the parent context. */
66 if (likely(regs->sstatus & SR_PIE)) 66 if (likely(regs->sstatus & SR_SPIE))
67 local_irq_enable(); 67 local_irq_enable();
68 68
69 /* 69 /*
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index eae2c64cf69d..9fdff3fe1a42 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1obj-y += kernel/ 2obj-y += kernel/
2obj-y += mm/ 3obj-y += mm/
3obj-$(CONFIG_KVM) += kvm/ 4obj-$(CONFIG_KVM) += kvm/
diff --git a/arch/s390/appldata/Makefile b/arch/s390/appldata/Makefile
index 99f1cf071304..b06def4a4f2f 100644
--- a/arch/s390/appldata/Makefile
+++ b/arch/s390/appldata/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the Linux - z/VM Monitor Stream. 3# Makefile for the Linux - z/VM Monitor Stream.
3# 4#
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr
index f02382ae5c48..42a242597f34 100644
--- a/arch/s390/boot/compressed/vmlinux.scr
+++ b/arch/s390/boot/compressed/vmlinux.scr
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1SECTIONS 2SECTIONS
2{ 3{
3 .rodata.compressed : { 4 .rodata.compressed : {
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index c7de53d8da75..a00c17f761c1 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Cryptographic API. 3 * Cryptographic API.
3 * 4 *
@@ -16,12 +17,6 @@
16 * Copyright (c) Alan Smithee. 17 * Copyright (c) Alan Smithee.
17 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 18 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
18 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> 19 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the Free
22 * Software Foundation; either version 2 of the License, or (at your option)
23 * any later version.
24 *
25 */ 20 */
26#include <crypto/internal/hash.h> 21#include <crypto/internal/hash.h>
27#include <linux/init.h> 22#include <linux/init.h>
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 2ee25ba252d6..06f601509ce9 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the linux hypfs filesystem routines. 3# Makefile for the linux hypfs filesystem routines.
3# 4#
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 41c211a4d8b1..048450869328 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1generic-y += asm-offsets.h 2generic-y += asm-offsets.h
2generic-y += cacheflush.h 3generic-y += cacheflush.h
3generic-y += clkdev.h 4generic-y += clkdev.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index a72002056b54..c2cf7bcdef9b 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1#ifndef _ASM_S390_ALTERNATIVE_H 2#ifndef _ASM_S390_ALTERNATIVE_H
2#define _ASM_S390_ALTERNATIVE_H 3#define _ASM_S390_ALTERNATIVE_H
3 4
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c02f4aba88a6..cfce6835b109 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Adjunct processor (AP) interfaces 3 * Adjunct processor (AP) interfaces
3 * 4 *
4 * Copyright IBM Corp. 2017 5 * Copyright IBM Corp. 2017
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Tony Krowiak <akrowia@linux.vnet.ibm.com> 7 * Author(s): Tony Krowiak <akrowia@linux.vnet.ibm.com>
11 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com> 9 * Harald Freudenberger <freude@de.ibm.com>
diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h
index 0f5bd894f4dc..aa42a179be33 100644
--- a/arch/s390/include/asm/bugs.h
+++ b/arch/s390/include/asm/bugs.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * S390 version 3 * S390 version
3 * Copyright IBM Corp. 1999 4 * Copyright IBM Corp. 1999
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e14f381757f6..c1b0a9ac1dc8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
207 __u16 ipa; /* 0x0056 */ 207 __u16 ipa; /* 0x0056 */
208 __u32 ipb; /* 0x0058 */ 208 __u32 ipb; /* 0x0058 */
209 __u32 scaoh; /* 0x005c */ 209 __u32 scaoh; /* 0x005c */
210 __u8 reserved60; /* 0x0060 */ 210#define FPF_BPBC 0x20
211 __u8 fpf; /* 0x0060 */
211#define ECB_GS 0x40 212#define ECB_GS 0x40
212#define ECB_TE 0x10 213#define ECB_TE 0x10
213#define ECB_SRSI 0x04 214#define ECB_SRSI 0x04
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index d6c9d1e0dc2d..b9c0e361748b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -40,6 +40,7 @@ struct pt_regs;
40extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 40extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
41extern unsigned long perf_misc_flags(struct pt_regs *regs); 41extern unsigned long perf_misc_flags(struct pt_regs *regs);
42#define perf_misc_flags(regs) perf_misc_flags(regs) 42#define perf_misc_flags(regs) perf_misc_flags(regs)
43#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
43 44
44/* Perf pt_regs extension for sample-data-entry indicators */ 45/* Perf pt_regs extension for sample-data-entry indicators */
45struct perf_sf_sde_regs { 46struct perf_sf_sde_regs {
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57d7bc92e0b8..0a6b0286c32e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1264,12 +1264,6 @@ static inline pud_t pud_mkwrite(pud_t pud)
1264 return pud; 1264 return pud;
1265} 1265}
1266 1266
1267#define pud_write pud_write
1268static inline int pud_write(pud_t pud)
1269{
1270 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
1271}
1272
1273static inline pud_t pud_mkclean(pud_t pud) 1267static inline pud_t pud_mkclean(pud_t pud)
1274{ 1268{
1275 if (pud_large(pud)) { 1269 if (pud_large(pud)) {
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index a3788dafc0e1..6f70d81c40f2 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -74,9 +74,14 @@ enum {
74 */ 74 */
75struct pt_regs 75struct pt_regs
76{ 76{
77 unsigned long args[1]; 77 union {
78 psw_t psw; 78 user_pt_regs user_regs;
79 unsigned long gprs[NUM_GPRS]; 79 struct {
80 unsigned long args[1];
81 psw_t psw;
82 unsigned long gprs[NUM_GPRS];
83 };
84 };
80 unsigned long orig_gpr2; 85 unsigned long orig_gpr2;
81 unsigned int int_code; 86 unsigned int int_code;
82 unsigned int int_parm; 87 unsigned int int_parm;
diff --git a/arch/s390/include/asm/segment.h b/arch/s390/include/asm/segment.h
index 8bfce3475b1c..97a0582b8d0f 100644
--- a/arch/s390/include/asm/segment.h
+++ b/arch/s390/include/asm/segment.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1#ifndef _ASM_SEGMENT_H 2#ifndef _ASM_SEGMENT_H
2#define _ASM_SEGMENT_H 3#define _ASM_SEGMENT_H
3 4
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index ec7b476c1ac5..c61b2cc1a8a8 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
30 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); 30 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
31} 31}
32 32
33#define switch_to(prev,next,last) do { \ 33#define switch_to(prev, next, last) do { \
34 if (prev->mm) { \ 34 /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
35 save_fpu_regs(); \ 35 * a restore of the floating point / vector registers as \
36 save_access_regs(&prev->thread.acrs[0]); \ 36 * soon as the next task returns to user space \
37 save_ri_cb(prev->thread.ri_cb); \ 37 */ \
38 save_gs_cb(prev->thread.gs_cb); \ 38 save_fpu_regs(); \
39 } \ 39 save_access_regs(&prev->thread.acrs[0]); \
40 save_ri_cb(prev->thread.ri_cb); \
41 save_gs_cb(prev->thread.gs_cb); \
40 update_cr_regs(next); \ 42 update_cr_regs(next); \
41 if (next->mm) { \ 43 restore_access_regs(&next->thread.acrs[0]); \
42 set_cpu_flag(CIF_FPU); \ 44 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
43 restore_access_regs(&next->thread.acrs[0]); \ 45 restore_gs_cb(next->thread.gs_cb); \
44 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 46 prev = __switch_to(prev, next); \
45 restore_gs_cb(next->thread.gs_cb); \
46 } \
47 prev = __switch_to(prev,next); \
48} while (0) 47} while (0)
49 48
50#endif /* __ASM_SWITCH_TO_H */ 49#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
index d375526c261f..605dc46bac5e 100644
--- a/arch/s390/include/asm/vga.h
+++ b/arch/s390/include/asm/vga.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1#ifndef _ASM_S390_VGA_H 2#ifndef _ASM_S390_VGA_H
2#define _ASM_S390_VGA_H 3#define _ASM_S390_VGA_H
3 4
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 098f28778a13..92b7c9b3e641 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# UAPI Header export list 2# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 3include include/uapi/asm-generic/Kbuild.asm
3 4
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..cefe7c7cd4f6
--- /dev/null
+++ b/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 38535a57fef8..4cdaa55fabfe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
224#define KVM_SYNC_RICCB (1UL << 7) 224#define KVM_SYNC_RICCB (1UL << 7)
225#define KVM_SYNC_FPRS (1UL << 8) 225#define KVM_SYNC_FPRS (1UL << 8)
226#define KVM_SYNC_GSCB (1UL << 9) 226#define KVM_SYNC_GSCB (1UL << 9)
227#define KVM_SYNC_BPBC (1UL << 10)
227/* length and alignment of the sdnx as a power of two */ 228/* length and alignment of the sdnx as a power of two */
228#define SDNXC 8 229#define SDNXC 8
229#define SDNXL (1UL << SDNXC) 230#define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
247 }; 248 };
248 __u8 reserved[512]; /* for future vector expansion */ 249 __u8 reserved[512]; /* for future vector expansion */
249 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ 250 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
250 __u8 padding1[52]; /* riccb needs to be 64byte aligned */ 251 __u8 bpbc : 1; /* bp mode */
252 __u8 reserved2 : 7;
253 __u8 padding1[51]; /* riccb needs to be 64byte aligned */
251 __u8 riccb[64]; /* runtime instrumentation controls block */ 254 __u8 riccb[64]; /* runtime instrumentation controls block */
252 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ 255 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
253 union { 256 union {
diff --git a/arch/s390/include/uapi/asm/perf_regs.h b/arch/s390/include/uapi/asm/perf_regs.h
index 7c8564f98205..d17dd9e5d516 100644
--- a/arch/s390/include/uapi/asm/perf_regs.h
+++ b/arch/s390/include/uapi/asm/perf_regs.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _ASM_S390_PERF_REGS_H 2#ifndef _ASM_S390_PERF_REGS_H
2#define _ASM_S390_PERF_REGS_H 3#define _ASM_S390_PERF_REGS_H
3 4
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 0d23c8ff2900..543dd70e12c8 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -162,7 +162,7 @@
162#define GPR_SIZE 8 162#define GPR_SIZE 8
163#define CR_SIZE 8 163#define CR_SIZE 8
164 164
165#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */ 165#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
166 166
167#endif /* __s390x__ */ 167#endif /* __s390x__ */
168 168
@@ -179,17 +179,16 @@
179#define ACR_SIZE 4 179#define ACR_SIZE 4
180 180
181 181
182#define PTRACE_OLDSETOPTIONS 21 182#define PTRACE_OLDSETOPTIONS 21
183 183
184#ifndef __ASSEMBLY__ 184#ifndef __ASSEMBLY__
185#include <linux/stddef.h> 185#include <linux/stddef.h>
186#include <linux/types.h> 186#include <linux/types.h>
187 187
188typedef union 188typedef union {
189{ 189 float f;
190 float f; 190 double d;
191 double d; 191 __u64 ui;
192 __u64 ui;
193 struct 192 struct
194 { 193 {
195 __u32 hi; 194 __u32 hi;
@@ -197,23 +196,21 @@ typedef union
197 } fp; 196 } fp;
198} freg_t; 197} freg_t;
199 198
200typedef struct 199typedef struct {
201{ 200 __u32 fpc;
202 __u32 fpc;
203 __u32 pad; 201 __u32 pad;
204 freg_t fprs[NUM_FPRS]; 202 freg_t fprs[NUM_FPRS];
205} s390_fp_regs; 203} s390_fp_regs;
206 204
207#define FPC_EXCEPTION_MASK 0xF8000000 205#define FPC_EXCEPTION_MASK 0xF8000000
208#define FPC_FLAGS_MASK 0x00F80000 206#define FPC_FLAGS_MASK 0x00F80000
209#define FPC_DXC_MASK 0x0000FF00 207#define FPC_DXC_MASK 0x0000FF00
210#define FPC_RM_MASK 0x00000003 208#define FPC_RM_MASK 0x00000003
211 209
212/* this typedef defines how a Program Status Word looks like */ 210/* this typedef defines how a Program Status Word looks like */
213typedef struct 211typedef struct {
214{ 212 unsigned long mask;
215 unsigned long mask; 213 unsigned long addr;
216 unsigned long addr;
217} __attribute__ ((aligned(8))) psw_t; 214} __attribute__ ((aligned(8))) psw_t;
218 215
219#ifndef __s390x__ 216#ifndef __s390x__
@@ -282,8 +279,7 @@ typedef struct
282/* 279/*
283 * The s390_regs structure is used to define the elf_gregset_t. 280 * The s390_regs structure is used to define the elf_gregset_t.
284 */ 281 */
285typedef struct 282typedef struct {
286{
287 psw_t psw; 283 psw_t psw;
288 unsigned long gprs[NUM_GPRS]; 284 unsigned long gprs[NUM_GPRS];
289 unsigned int acrs[NUM_ACRS]; 285 unsigned int acrs[NUM_ACRS];
@@ -291,24 +287,32 @@ typedef struct
291} s390_regs; 287} s390_regs;
292 288
293/* 289/*
290 * The user_pt_regs structure exports the beginning of
291 * the in-kernel pt_regs structure to user space.
292 */
293typedef struct {
294 unsigned long args[1];
295 psw_t psw;
296 unsigned long gprs[NUM_GPRS];
297} user_pt_regs;
298
299/*
294 * Now for the user space program event recording (trace) definitions. 300 * Now for the user space program event recording (trace) definitions.
295 * The following structures are used only for the ptrace interface, don't 301 * The following structures are used only for the ptrace interface, don't
296 * touch or even look at it if you don't want to modify the user-space 302 * touch or even look at it if you don't want to modify the user-space
297 * ptrace interface. In particular stay away from it for in-kernel PER. 303 * ptrace interface. In particular stay away from it for in-kernel PER.
298 */ 304 */
299typedef struct 305typedef struct {
300{
301 unsigned long cr[NUM_CR_WORDS]; 306 unsigned long cr[NUM_CR_WORDS];
302} per_cr_words; 307} per_cr_words;
303 308
304#define PER_EM_MASK 0xE8000000UL 309#define PER_EM_MASK 0xE8000000UL
305 310
306typedef struct 311typedef struct {
307{
308#ifdef __s390x__ 312#ifdef __s390x__
309 unsigned : 32; 313 unsigned : 32;
310#endif /* __s390x__ */ 314#endif /* __s390x__ */
311 unsigned em_branching : 1; 315 unsigned em_branching : 1;
312 unsigned em_instruction_fetch : 1; 316 unsigned em_instruction_fetch : 1;
313 /* 317 /*
314 * Switching on storage alteration automatically fixes 318 * Switching on storage alteration automatically fixes
@@ -317,44 +321,41 @@ typedef struct
317 unsigned em_storage_alteration : 1; 321 unsigned em_storage_alteration : 1;
318 unsigned em_gpr_alt_unused : 1; 322 unsigned em_gpr_alt_unused : 1;
319 unsigned em_store_real_address : 1; 323 unsigned em_store_real_address : 1;
320 unsigned : 3; 324 unsigned : 3;
321 unsigned branch_addr_ctl : 1; 325 unsigned branch_addr_ctl : 1;
322 unsigned : 1; 326 unsigned : 1;
323 unsigned storage_alt_space_ctl : 1; 327 unsigned storage_alt_space_ctl : 1;
324 unsigned : 21; 328 unsigned : 21;
325 unsigned long starting_addr; 329 unsigned long starting_addr;
326 unsigned long ending_addr; 330 unsigned long ending_addr;
327} per_cr_bits; 331} per_cr_bits;
328 332
329typedef struct 333typedef struct {
330{
331 unsigned short perc_atmid; 334 unsigned short perc_atmid;
332 unsigned long address; 335 unsigned long address;
333 unsigned char access_id; 336 unsigned char access_id;
334} per_lowcore_words; 337} per_lowcore_words;
335 338
336typedef struct 339typedef struct {
337{ 340 unsigned perc_branching : 1;
338 unsigned perc_branching : 1;
339 unsigned perc_instruction_fetch : 1; 341 unsigned perc_instruction_fetch : 1;
340 unsigned perc_storage_alteration : 1; 342 unsigned perc_storage_alteration : 1;
341 unsigned perc_gpr_alt_unused : 1; 343 unsigned perc_gpr_alt_unused : 1;
342 unsigned perc_store_real_address : 1; 344 unsigned perc_store_real_address : 1;
343 unsigned : 3; 345 unsigned : 3;
344 unsigned atmid_psw_bit_31 : 1; 346 unsigned atmid_psw_bit_31 : 1;
345 unsigned atmid_validity_bit : 1; 347 unsigned atmid_validity_bit : 1;
346 unsigned atmid_psw_bit_32 : 1; 348 unsigned atmid_psw_bit_32 : 1;
347 unsigned atmid_psw_bit_5 : 1; 349 unsigned atmid_psw_bit_5 : 1;
348 unsigned atmid_psw_bit_16 : 1; 350 unsigned atmid_psw_bit_16 : 1;
349 unsigned atmid_psw_bit_17 : 1; 351 unsigned atmid_psw_bit_17 : 1;
350 unsigned si : 2; 352 unsigned si : 2;
351 unsigned long address; 353 unsigned long address;
352 unsigned : 4; 354 unsigned : 4;
353 unsigned access_id : 4; 355 unsigned access_id : 4;
354} per_lowcore_bits; 356} per_lowcore_bits;
355 357
356typedef struct 358typedef struct {
357{
358 union { 359 union {
359 per_cr_words words; 360 per_cr_words words;
360 per_cr_bits bits; 361 per_cr_bits bits;
@@ -364,9 +365,9 @@ typedef struct
364 * the kernel always sets them to zero. To enable single 365 * the kernel always sets them to zero. To enable single
365 * stepping use ptrace(PTRACE_SINGLESTEP) instead. 366 * stepping use ptrace(PTRACE_SINGLESTEP) instead.
366 */ 367 */
367 unsigned single_step : 1; 368 unsigned single_step : 1;
368 unsigned instruction_fetch : 1; 369 unsigned instruction_fetch : 1;
369 unsigned : 30; 370 unsigned : 30;
370 /* 371 /*
371 * These addresses are copied into cr10 & cr11 if single 372 * These addresses are copied into cr10 & cr11 if single
372 * stepping is switched off 373 * stepping is switched off
@@ -376,11 +377,10 @@ typedef struct
376 union { 377 union {
377 per_lowcore_words words; 378 per_lowcore_words words;
378 per_lowcore_bits bits; 379 per_lowcore_bits bits;
379 } lowcore; 380 } lowcore;
380} per_struct; 381} per_struct;
381 382
382typedef struct 383typedef struct {
383{
384 unsigned int len; 384 unsigned int len;
385 unsigned long kernel_addr; 385 unsigned long kernel_addr;
386 unsigned long process_addr; 386 unsigned long process_addr;
@@ -390,12 +390,12 @@ typedef struct
390 * S/390 specific non posix ptrace requests. I chose unusual values so 390 * S/390 specific non posix ptrace requests. I chose unusual values so
391 * they are unlikely to clash with future ptrace definitions. 391 * they are unlikely to clash with future ptrace definitions.
392 */ 392 */
393#define PTRACE_PEEKUSR_AREA 0x5000 393#define PTRACE_PEEKUSR_AREA 0x5000
394#define PTRACE_POKEUSR_AREA 0x5001 394#define PTRACE_POKEUSR_AREA 0x5001
395#define PTRACE_PEEKTEXT_AREA 0x5002 395#define PTRACE_PEEKTEXT_AREA 0x5002
396#define PTRACE_PEEKDATA_AREA 0x5003 396#define PTRACE_PEEKDATA_AREA 0x5003
397#define PTRACE_POKETEXT_AREA 0x5004 397#define PTRACE_POKETEXT_AREA 0x5004
398#define PTRACE_POKEDATA_AREA 0x5005 398#define PTRACE_POKEDATA_AREA 0x5005
399#define PTRACE_GET_LAST_BREAK 0x5006 399#define PTRACE_GET_LAST_BREAK 0x5006
400#define PTRACE_PEEK_SYSTEM_CALL 0x5007 400#define PTRACE_PEEK_SYSTEM_CALL 0x5007
401#define PTRACE_POKE_SYSTEM_CALL 0x5008 401#define PTRACE_POKE_SYSTEM_CALL 0x5008
@@ -413,21 +413,19 @@ typedef struct
413 * PT_PROT definition is loosely based on hppa bsd definition in 413 * PT_PROT definition is loosely based on hppa bsd definition in
414 * gdb/hppab-nat.c 414 * gdb/hppab-nat.c
415 */ 415 */
416#define PTRACE_PROT 21 416#define PTRACE_PROT 21
417 417
418typedef enum 418typedef enum {
419{
420 ptprot_set_access_watchpoint, 419 ptprot_set_access_watchpoint,
421 ptprot_set_write_watchpoint, 420 ptprot_set_write_watchpoint,
422 ptprot_disable_watchpoint 421 ptprot_disable_watchpoint
423} ptprot_flags; 422} ptprot_flags;
424 423
425typedef struct 424typedef struct {
426{
427 unsigned long lowaddr; 425 unsigned long lowaddr;
428 unsigned long hiaddr; 426 unsigned long hiaddr;
429 ptprot_flags prot; 427 ptprot_flags prot;
430} ptprot_area; 428} ptprot_area;
431 429
432/* Sequence of bytes for breakpoint illegal instruction. */ 430/* Sequence of bytes for breakpoint illegal instruction. */
433#define S390_BREAKPOINT {0x0,0x1} 431#define S390_BREAKPOINT {0x0,0x1}
@@ -439,8 +437,7 @@ typedef struct
439 * The user_regs_struct defines the way the user registers are 437 * The user_regs_struct defines the way the user registers are
440 * store on the stack for signal handling. 438 * store on the stack for signal handling.
441 */ 439 */
442struct user_regs_struct 440struct user_regs_struct {
443{
444 psw_t psw; 441 psw_t psw;
445 unsigned long gprs[NUM_GPRS]; 442 unsigned long gprs[NUM_GPRS];
446 unsigned int acrs[NUM_ACRS]; 443 unsigned int acrs[NUM_ACRS];
diff --git a/arch/s390/include/uapi/asm/sthyi.h b/arch/s390/include/uapi/asm/sthyi.h
index ec113db4eb7e..b1b022316983 100644
--- a/arch/s390/include/uapi/asm/sthyi.h
+++ b/arch/s390/include/uapi/asm/sthyi.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _UAPI_ASM_STHYI_H 2#ifndef _UAPI_ASM_STHYI_H
2#define _UAPI_ASM_STHYI_H 3#define _UAPI_ASM_STHYI_H
3 4
diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h
index 3a77833c74dc..2b605f7e8483 100644
--- a/arch/s390/include/uapi/asm/virtio-ccw.h
+++ b/arch/s390/include/uapi/asm/virtio-ccw.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * Definitions for virtio-ccw devices. 3 * Definitions for virtio-ccw devices.
4 * 4 *
diff --git a/arch/s390/include/uapi/asm/vmcp.h b/arch/s390/include/uapi/asm/vmcp.h
index 4caf71714a55..aeaaa030030e 100644
--- a/arch/s390/include/uapi/asm/vmcp.h
+++ b/arch/s390/include/uapi/asm/vmcp.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* 2/*
2 * Copyright IBM Corp. 2004, 2005 3 * Copyright IBM Corp. 2004, 2005
3 * Interface implementation for communication with the z/VM control program 4 * Interface implementation for communication with the z/VM control program
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 315986a06cf5..574e77622c04 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1#include <linux/module.h> 2#include <linux/module.h>
2#include <asm/alternative.h> 3#include <asm/alternative.h>
3#include <asm/facility.h> 4#include <asm/facility.h>
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index f04db3779b34..59eea9c65d3e 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
263 return retval; 263 return retval;
264 } 264 }
265 265
266 groups_sort(group_info);
266 retval = set_current_groups(group_info); 267 retval = set_current_groups(group_info);
267 put_group_info(group_info); 268 put_group_info(group_info);
268 269
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index f8603ebed669..54e2d634b849 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1#include <linux/perf_event.h> 2#include <linux/perf_event.h>
2#include <linux/perf_regs.h> 3#include <linux/perf_regs.h>
3#include <linux/kernel.h> 4#include <linux/kernel.h>
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 308a7b63348b..f7fc63385553 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
370SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) 370SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
371SYSCALL(sys_socket,sys_socket) 371SYSCALL(sys_socket,sys_socket)
372SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ 372SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
373SYSCALL(sys_bind,sys_bind) 373SYSCALL(sys_bind,compat_sys_bind)
374SYSCALL(sys_connect,sys_connect) 374SYSCALL(sys_connect,compat_sys_connect)
375SYSCALL(sys_listen,sys_listen) 375SYSCALL(sys_listen,sys_listen)
376SYSCALL(sys_accept4,sys_accept4) 376SYSCALL(sys_accept4,compat_sys_accept4)
377SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ 377SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
378SYSCALL(sys_setsockopt,compat_sys_setsockopt) 378SYSCALL(sys_setsockopt,compat_sys_setsockopt)
379SYSCALL(sys_getsockname,compat_sys_getsockname) 379SYSCALL(sys_getsockname,compat_sys_getsockname)
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S
index 79a071e4357e..db19d0680a0a 100644
--- a/arch/s390/kernel/vdso64/note.S
+++ b/arch/s390/kernel/vdso64/note.S
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 3 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland. 4 * Here we can supply some information useful to userland.
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 6048b1c6e580..05ee90a5ea08 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -1,10 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0
1# Makefile for kernel virtual machines on s390 2# Makefile for kernel virtual machines on s390
2# 3#
3# Copyright IBM Corp. 2008 4# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8 5
9KVM := ../../../virt/kvm 6KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o 7common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index d93a2c0474bf..89aa114a2cba 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling diagnose instructions 3 * handling diagnose instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2011 5 * Copyright IBM Corp. 2008, 2011
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index bec42b852246..f4c51756c462 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * access guest memory 3 * access guest memory
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index bcbd86621d01..b5f3e82006d0 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm guest debug support 3 * kvm guest debug support
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 8fe034beb623..9c7d70715862 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * in-kernel handling for sie intercepts 3 * in-kernel handling for sie intercepts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fa557372d600..024ad8bcc516 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling kvm guest interrupts 3 * handling kvm guest interrupts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2015 5 * Copyright IBM Corp. 2008, 2015
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h
index d98e4159643d..484608c71dd0 100644
--- a/arch/s390/kvm/irq.h
+++ b/arch/s390/kvm/irq.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * s390 irqchip routines 3 * s390 irqchip routines
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */ 8 */
12#ifndef __KVM_IRQ_H 9#ifndef __KVM_IRQ_H
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9614aea5839b..1371dff2b90d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,11 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * hosting zSeries kernel virtual machines 3 * hosting IBM Z kernel virtual machines (s390x)
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2017
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 * 6 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
@@ -424,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
424 case KVM_CAP_S390_GS: 421 case KVM_CAP_S390_GS:
425 r = test_facility(133); 422 r = test_facility(133);
426 break; 423 break;
424 case KVM_CAP_S390_BPB:
425 r = test_facility(82);
426 break;
427 default: 427 default:
428 r = 0; 428 r = 0;
429 } 429 }
@@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
769 769
770/* 770/*
771 * Must be called with kvm->srcu held to avoid races on memslots, and with 771 * Must be called with kvm->srcu held to avoid races on memslots, and with
772 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. 772 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
773 */ 773 */
774static int kvm_s390_vm_start_migration(struct kvm *kvm) 774static int kvm_s390_vm_start_migration(struct kvm *kvm)
775{ 775{
@@ -795,11 +795,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
795 795
796 if (kvm->arch.use_cmma) { 796 if (kvm->arch.use_cmma) {
797 /* 797 /*
798 * Get the last slot. They should be sorted by base_gfn, so the 798 * Get the first slot. They are reverse sorted by base_gfn, so
799 * last slot is also the one at the end of the address space. 799 * the first slot is also the one at the end of the address
800 * We have verified above that at least one slot is present. 800 * space. We have verified above that at least one slot is
801 * present.
801 */ 802 */
802 ms = slots->memslots + slots->used_slots - 1; 803 ms = slots->memslots;
803 /* round up so we only use full longs */ 804 /* round up so we only use full longs */
804 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); 805 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
805 /* allocate enough bytes to store all the bits */ 806 /* allocate enough bytes to store all the bits */
@@ -824,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
824} 825}
825 826
826/* 827/*
827 * Must be called with kvm->lock to avoid races with ourselves and 828 * Must be called with kvm->slots_lock to avoid races with ourselves and
828 * kvm_s390_vm_start_migration. 829 * kvm_s390_vm_start_migration.
829 */ 830 */
830static int kvm_s390_vm_stop_migration(struct kvm *kvm) 831static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -839,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
839 840
840 if (kvm->arch.use_cmma) { 841 if (kvm->arch.use_cmma) {
841 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); 842 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
843 /* We have to wait for the essa emulation to finish */
844 synchronize_srcu(&kvm->srcu);
842 vfree(mgs->pgste_bitmap); 845 vfree(mgs->pgste_bitmap);
843 } 846 }
844 kfree(mgs); 847 kfree(mgs);
@@ -848,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
848static int kvm_s390_vm_set_migration(struct kvm *kvm, 851static int kvm_s390_vm_set_migration(struct kvm *kvm,
849 struct kvm_device_attr *attr) 852 struct kvm_device_attr *attr)
850{ 853{
851 int idx, res = -ENXIO; 854 int res = -ENXIO;
852 855
853 mutex_lock(&kvm->lock); 856 mutex_lock(&kvm->slots_lock);
854 switch (attr->attr) { 857 switch (attr->attr) {
855 case KVM_S390_VM_MIGRATION_START: 858 case KVM_S390_VM_MIGRATION_START:
856 idx = srcu_read_lock(&kvm->srcu);
857 res = kvm_s390_vm_start_migration(kvm); 859 res = kvm_s390_vm_start_migration(kvm);
858 srcu_read_unlock(&kvm->srcu, idx);
859 break; 860 break;
860 case KVM_S390_VM_MIGRATION_STOP: 861 case KVM_S390_VM_MIGRATION_STOP:
861 res = kvm_s390_vm_stop_migration(kvm); 862 res = kvm_s390_vm_stop_migration(kvm);
@@ -863,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
863 default: 864 default:
864 break; 865 break;
865 } 866 }
866 mutex_unlock(&kvm->lock); 867 mutex_unlock(&kvm->slots_lock);
867 868
868 return res; 869 return res;
869} 870}
@@ -1753,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
1753 r = -EFAULT; 1754 r = -EFAULT;
1754 if (copy_from_user(&args, argp, sizeof(args))) 1755 if (copy_from_user(&args, argp, sizeof(args)))
1755 break; 1756 break;
1757 mutex_lock(&kvm->slots_lock);
1756 r = kvm_s390_get_cmma_bits(kvm, &args); 1758 r = kvm_s390_get_cmma_bits(kvm, &args);
1759 mutex_unlock(&kvm->slots_lock);
1757 if (!r) { 1760 if (!r) {
1758 r = copy_to_user(argp, &args, sizeof(args)); 1761 r = copy_to_user(argp, &args, sizeof(args));
1759 if (r) 1762 if (r)
@@ -1767,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
1767 r = -EFAULT; 1770 r = -EFAULT;
1768 if (copy_from_user(&args, argp, sizeof(args))) 1771 if (copy_from_user(&args, argp, sizeof(args)))
1769 break; 1772 break;
1773 mutex_lock(&kvm->slots_lock);
1770 r = kvm_s390_set_cmma_bits(kvm, &args); 1774 r = kvm_s390_set_cmma_bits(kvm, &args);
1775 mutex_unlock(&kvm->slots_lock);
1771 break; 1776 break;
1772 } 1777 }
1773 default: 1778 default:
@@ -2200,6 +2205,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2200 kvm_s390_set_prefix(vcpu, 0); 2205 kvm_s390_set_prefix(vcpu, 0);
2201 if (test_kvm_facility(vcpu->kvm, 64)) 2206 if (test_kvm_facility(vcpu->kvm, 64))
2202 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 2207 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2208 if (test_kvm_facility(vcpu->kvm, 82))
2209 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2203 if (test_kvm_facility(vcpu->kvm, 133)) 2210 if (test_kvm_facility(vcpu->kvm, 133))
2204 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; 2211 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2205 /* fprs can be synchronized via vrs, even if the guest has no vx. With 2212 /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2341,6 +2348,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2341 current->thread.fpu.fpc = 0; 2348 current->thread.fpu.fpc = 0;
2342 vcpu->arch.sie_block->gbea = 1; 2349 vcpu->arch.sie_block->gbea = 1;
2343 vcpu->arch.sie_block->pp = 0; 2350 vcpu->arch.sie_block->pp = 0;
2351 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2344 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 2352 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2345 kvm_clear_async_pf_completion_queue(vcpu); 2353 kvm_clear_async_pf_completion_queue(vcpu);
2346 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 2354 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3300,6 +3308,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3300 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 3308 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3301 vcpu->arch.gs_enabled = 1; 3309 vcpu->arch.gs_enabled = 1;
3302 } 3310 }
3311 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3312 test_kvm_facility(vcpu->kvm, 82)) {
3313 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3314 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3315 }
3303 save_access_regs(vcpu->arch.host_acrs); 3316 save_access_regs(vcpu->arch.host_acrs);
3304 restore_access_regs(vcpu->run->s.regs.acrs); 3317 restore_access_regs(vcpu->run->s.regs.acrs);
3305 /* save host (userspace) fprs/vrs */ 3318 /* save host (userspace) fprs/vrs */
@@ -3346,6 +3359,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3346 kvm_run->s.regs.pft = vcpu->arch.pfault_token; 3359 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3347 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 3360 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3348 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 3361 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3362 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3349 save_access_regs(vcpu->run->s.regs.acrs); 3363 save_access_regs(vcpu->run->s.regs.acrs);
3350 restore_access_regs(vcpu->arch.host_acrs); 3364 restore_access_regs(vcpu->arch.host_acrs);
3351 /* Save guest register state */ 3365 /* Save guest register state */
@@ -3808,6 +3822,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3808 r = -EINVAL; 3822 r = -EINVAL;
3809 break; 3823 break;
3810 } 3824 }
3825 /* do not use irq_state.flags, it will break old QEMUs */
3811 r = kvm_s390_set_irq_state(vcpu, 3826 r = kvm_s390_set_irq_state(vcpu,
3812 (void __user *) irq_state.buf, 3827 (void __user *) irq_state.buf,
3813 irq_state.len); 3828 irq_state.len);
@@ -3823,6 +3838,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3823 r = -EINVAL; 3838 r = -EINVAL;
3824 break; 3839 break;
3825 } 3840 }
3841 /* do not use irq_state.flags, it will break old QEMUs */
3826 r = kvm_s390_get_irq_state(vcpu, 3842 r = kvm_s390_get_irq_state(vcpu,
3827 (__u8 __user *) irq_state.buf, 3843 (__u8 __user *) irq_state.buf,
3828 irq_state.len); 3844 irq_state.len);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 10d65dfbc306..5e46ba429bcb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * definition for kvm on s390 3 * definition for kvm on s390
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2009
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c954ac49eee4..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling privileged instructions 3 * handling privileged instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 232 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
236 return -EAGAIN; 233 return -EAGAIN;
237 } 234 }
238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
240 return 0; 235 return 0;
241} 236}
242 237
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
247 int reg1, reg2; 242 int reg1, reg2;
248 int rc; 243 int rc;
249 244
245 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
246 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
247
250 rc = try_handle_skey(vcpu); 248 rc = try_handle_skey(vcpu);
251 if (rc) 249 if (rc)
252 return rc != -EAGAIN ? rc : 0; 250 return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
276 int reg1, reg2; 274 int reg1, reg2;
277 int rc; 275 int rc;
278 276
277 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
278 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
279
279 rc = try_handle_skey(vcpu); 280 rc = try_handle_skey(vcpu);
280 if (rc) 281 if (rc)
281 return rc != -EAGAIN ? rc : 0; 282 return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
311 int reg1, reg2; 312 int reg1, reg2;
312 int rc; 313 int rc;
313 314
315 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
316 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
317
314 rc = try_handle_skey(vcpu); 318 rc = try_handle_skey(vcpu);
315 if (rc) 319 if (rc)
316 return rc != -EAGAIN ? rc : 0; 320 return rc != -EAGAIN ? rc : 0;
@@ -1002,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
1002 cbrlo[entries] = gfn << PAGE_SHIFT; 1006 cbrlo[entries] = gfn << PAGE_SHIFT;
1003 } 1007 }
1004 1008
1005 if (orc) { 1009 if (orc && gfn < ms->bitmap_size) {
1006 /* increment only if we are really flipping the bit to 1 */ 1010 /* increment only if we are really flipping the bit to 1 */
1007 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) 1011 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1008 atomic64_inc(&ms->dirty_pages); 1012 atomic64_inc(&ms->dirty_pages);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 9d592ef4104b..c1f5cde2c878 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling interprocessor communication 3 * handling interprocessor communication
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a311938b63b3..751348348477 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm nested virtualization support for s390x 3 * kvm nested virtualization support for s390x
3 * 4 *
4 * Copyright IBM Corp. 2016 5 * Copyright IBM Corp. 2016
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
@@ -226,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
226 memcpy(scb_o->gcr, scb_s->gcr, 128); 223 memcpy(scb_o->gcr, scb_s->gcr, 128);
227 scb_o->pp = scb_s->pp; 224 scb_o->pp = scb_s->pp;
228 225
226 /* branch prediction */
227 if (test_kvm_facility(vcpu->kvm, 82)) {
228 scb_o->fpf &= ~FPF_BPBC;
229 scb_o->fpf |= scb_s->fpf & FPF_BPBC;
230 }
231
229 /* interrupt intercept */ 232 /* interrupt intercept */
230 switch (scb_s->icptcode) { 233 switch (scb_s->icptcode) {
231 case ICPT_PROGI: 234 case ICPT_PROGI:
@@ -268,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
268 scb_s->ecb3 = 0; 271 scb_s->ecb3 = 0;
269 scb_s->ecd = 0; 272 scb_s->ecd = 0;
270 scb_s->fac = 0; 273 scb_s->fac = 0;
274 scb_s->fpf = 0;
271 275
272 rc = prepare_cpuflags(vcpu, vsie_page); 276 rc = prepare_cpuflags(vcpu, vsie_page);
273 if (rc) 277 if (rc)
@@ -327,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
327 prefix_unmapped(vsie_page); 331 prefix_unmapped(vsie_page);
328 scb_s->ecb |= scb_o->ecb & ECB_TE; 332 scb_s->ecb |= scb_o->ecb & ECB_TE;
329 } 333 }
334 /* branch prediction */
335 if (test_kvm_facility(vcpu->kvm, 82))
336 scb_s->fpf |= scb_o->fpf & FPF_BPBC;
330 /* SIMD */ 337 /* SIMD */
331 if (test_kvm_facility(vcpu->kvm, 129)) { 338 if (test_kvm_facility(vcpu->kvm, 129)) {
332 scb_s->eca |= scb_o->eca & ECA_VX; 339 scb_s->eca |= scb_o->eca & ECA_VX;
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index cae5a1e16cbd..c4f8039a35e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess);
89 89
90void disable_sacf_uaccess(mm_segment_t old_fs) 90void disable_sacf_uaccess(mm_segment_t old_fs)
91{ 91{
92 current->thread.mm_segment = old_fs;
92 if (old_fs == USER_DS && test_facility(27)) { 93 if (old_fs == USER_DS && test_facility(27)) {
93 __ctl_load(S390_lowcore.user_asce, 1, 1); 94 __ctl_load(S390_lowcore.user_asce, 1, 1);
94 clear_cpu_flag(CIF_ASCE_PRIMARY); 95 clear_cpu_flag(CIF_ASCE_PRIMARY);
95 } 96 }
96 current->thread.mm_segment = old_fs;
97} 97}
98EXPORT_SYMBOL(disable_sacf_uaccess); 98EXPORT_SYMBOL(disable_sacf_uaccess);
99 99
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 434a9564917b..cb364153c43c 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -83,8 +83,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
83 83
84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); 85 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
86 if (end >= TASK_SIZE_MAX)
87 return -ENOMEM;
88 rc = 0; 86 rc = 0;
89 notify = 0; 87 notify = 0;
90 while (mm->context.asce_limit < end) { 88 while (mm->context.asce_limit < end) {
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
index 90568c33ddb0..e0d5f245e42b 100644
--- a/arch/s390/net/Makefile
+++ b/arch/s390/net/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Arch-specific network modules 3# Arch-specific network modules
3# 4#
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e81c16838b90..9557d8b516df 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -55,8 +55,7 @@ struct bpf_jit {
55#define SEEN_LITERAL 8 /* code uses literals */ 55#define SEEN_LITERAL 8 /* code uses literals */
56#define SEEN_FUNC 16 /* calls C functions */ 56#define SEEN_FUNC 16 /* calls C functions */
57#define SEEN_TAIL_CALL 32 /* code uses tail calls */ 57#define SEEN_TAIL_CALL 32 /* code uses tail calls */
58#define SEEN_SKB_CHANGE 64 /* code changes skb data */ 58#define SEEN_REG_AX 64 /* code uses constant blinding */
59#define SEEN_REG_AX 128 /* code uses constant blinding */
60#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) 59#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
61 60
62/* 61/*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
448 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, 447 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
449 REG_15, 152); 448 REG_15, 152);
450 } 449 }
451 if (jit->seen & SEEN_SKB) 450 if (jit->seen & SEEN_SKB) {
452 emit_load_skb_data_hlen(jit); 451 emit_load_skb_data_hlen(jit);
453 if (jit->seen & SEEN_SKB_CHANGE)
454 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ 452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
455 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, 453 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
456 STK_OFF_SKBP); 454 STK_OFF_SKBP);
455 }
457} 456}
458 457
459/* 458/*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
983 EMIT2(0x0d00, REG_14, REG_W1); 982 EMIT2(0x0d00, REG_14, REG_W1);
984 /* lgr %b0,%r2: load return value into %b0 */ 983 /* lgr %b0,%r2: load return value into %b0 */
985 EMIT4(0xb9040000, BPF_REG_0, REG_2); 984 EMIT4(0xb9040000, BPF_REG_0, REG_2);
986 if (bpf_helper_changes_pkt_data((void *)func)) { 985 if ((jit->seen & SEEN_SKB) &&
987 jit->seen |= SEEN_SKB_CHANGE; 986 bpf_helper_changes_pkt_data((void *)func)) {
988 /* lg %b1,ST_OFF_SKBP(%r15) */ 987 /* lg %b1,ST_OFF_SKBP(%r15) */
989 EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, 988 EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
990 REG_15, STK_OFF_SKBP); 989 REG_15, STK_OFF_SKBP);
diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile
index f94ecaffa71b..66c2dff74895 100644
--- a/arch/s390/numa/Makefile
+++ b/arch/s390/numa/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1obj-y += numa.o 2obj-y += numa.o
2obj-y += toptree.o 3obj-y += toptree.o
3obj-$(CONFIG_NUMA_EMU) += mode_emu.o 4obj-$(CONFIG_NUMA_EMU) += mode_emu.o
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 805d8b29193a..22d0871291ee 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the s390 PCI subsystem. 3# Makefile for the s390 PCI subsystem.
3# 4#
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f7aa5a77827e..2d15d84c20ed 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,6 +181,9 @@ out_unlock:
181static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, 181static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
182 size_t size, int flags) 182 size_t size, int flags)
183{ 183{
184 unsigned long irqflags;
185 int ret;
186
184 /* 187 /*
185 * With zdev->tlb_refresh == 0, rpcit is not required to establish new 188 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
186 * translations when previously invalid translation-table entries are 189 * translations when previously invalid translation-table entries are
@@ -196,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
196 return 0; 199 return 0;
197 } 200 }
198 201
199 return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, 202 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
200 PAGE_ALIGN(size)); 203 PAGE_ALIGN(size));
204 if (ret == -ENOMEM && !s390_iommu_strict) {
205 /* enable the hypervisor to free some resources */
206 if (zpci_refresh_global(zdev))
207 goto out;
208
209 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
210 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
211 zdev->lazy_bitmap, zdev->iommu_pages);
212 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
213 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
214 ret = 0;
215 }
216out:
217 return ret;
201} 218}
202 219
203static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, 220static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 19bcb3b45a70..f069929e8211 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -89,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
89 if (cc) 89 if (cc)
90 zpci_err_insn(cc, status, addr, range); 90 zpci_err_insn(cc, status, addr, range);
91 91
92 if (cc == 1 && (status == 4 || status == 16))
93 return -ENOMEM;
94
92 return (cc) ? -EIO : 0; 95 return (cc) ? -EIO : 0;
93} 96}
94 97
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index 01d4c5a4bfe9..357d42681cef 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Generate opcode table initializers for the in-kernel disassembler. 3 * Generate opcode table initializers for the in-kernel disassembler.
3 * 4 *
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index c94ee54210bc..81271d3af47c 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += siginfo.h 5generic-y += siginfo.h
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 77c35350ee77..412326d59e6f 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/sh_eth.h>
12#include <mach-se/mach/se.h> 13#include <mach-se/mach/se.h>
13#include <mach-se/mach/mrshpc.h> 14#include <mach-se/mach/mrshpc.h>
14#include <asm/machvec.h> 15#include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
115#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ 116#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
116 defined(CONFIG_CPU_SUBTYPE_SH7712) 117 defined(CONFIG_CPU_SUBTYPE_SH7712)
117/* SH771X Ethernet driver */ 118/* SH771X Ethernet driver */
119static struct sh_eth_plat_data sh_eth_plat = {
120 .phy = PHY_ID,
121 .phy_interface = PHY_INTERFACE_MODE_MII,
122};
123
118static struct resource sh_eth0_resources[] = { 124static struct resource sh_eth0_resources[] = {
119 [0] = { 125 [0] = {
120 .start = SH_ETH0_BASE, 126 .start = SH_ETH0_BASE,
121 .end = SH_ETH0_BASE + 0x1B8, 127 .end = SH_ETH0_BASE + 0x1B8 - 1,
122 .flags = IORESOURCE_MEM, 128 .flags = IORESOURCE_MEM,
123 }, 129 },
124 [1] = { 130 [1] = {
131 .start = SH_TSU_BASE,
132 .end = SH_TSU_BASE + 0x200 - 1,
133 .flags = IORESOURCE_MEM,
134 },
135 [2] = {
125 .start = SH_ETH0_IRQ, 136 .start = SH_ETH0_IRQ,
126 .end = SH_ETH0_IRQ, 137 .end = SH_ETH0_IRQ,
127 .flags = IORESOURCE_IRQ, 138 .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
132 .name = "sh771x-ether", 143 .name = "sh771x-ether",
133 .id = 0, 144 .id = 0,
134 .dev = { 145 .dev = {
135 .platform_data = PHY_ID, 146 .platform_data = &sh_eth_plat,
136 }, 147 },
137 .num_resources = ARRAY_SIZE(sh_eth0_resources), 148 .num_resources = ARRAY_SIZE(sh_eth0_resources),
138 .resource = sh_eth0_resources, 149 .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
141static struct resource sh_eth1_resources[] = { 152static struct resource sh_eth1_resources[] = {
142 [0] = { 153 [0] = {
143 .start = SH_ETH1_BASE, 154 .start = SH_ETH1_BASE,
144 .end = SH_ETH1_BASE + 0x1B8, 155 .end = SH_ETH1_BASE + 0x1B8 - 1,
145 .flags = IORESOURCE_MEM, 156 .flags = IORESOURCE_MEM,
146 }, 157 },
147 [1] = { 158 [1] = {
159 .start = SH_TSU_BASE,
160 .end = SH_TSU_BASE + 0x200 - 1,
161 .flags = IORESOURCE_MEM,
162 },
163 [2] = {
148 .start = SH_ETH1_IRQ, 164 .start = SH_ETH1_IRQ,
149 .end = SH_ETH1_IRQ, 165 .end = SH_ETH1_IRQ,
150 .flags = IORESOURCE_IRQ, 166 .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
155 .name = "sh771x-ether", 171 .name = "sh771x-ether",
156 .id = 1, 172 .id = 1,
157 .dev = { 173 .dev = {
158 .platform_data = PHY_ID, 174 .platform_data = &sh_eth_plat,
159 }, 175 },
160 .num_resources = ARRAY_SIZE(sh_eth1_resources), 176 .num_resources = ARRAY_SIZE(sh_eth1_resources),
161 .resource = sh_eth1_resources, 177 .resource = sh_eth1_resources,
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h
index 4246ef9b07a3..aa83fe1ff0b1 100644
--- a/arch/sh/include/mach-se/mach/se.h
+++ b/arch/sh/include/mach-se/mach/se.h
@@ -100,6 +100,7 @@
100/* Base address */ 100/* Base address */
101#define SH_ETH0_BASE 0xA7000000 101#define SH_ETH0_BASE 0xA7000000
102#define SH_ETH1_BASE 0xA7000400 102#define SH_ETH1_BASE 0xA7000400
103#define SH_TSU_BASE 0xA7000800
103/* PHY ID */ 104/* PHY ID */
104#if defined(CONFIG_CPU_SUBTYPE_SH7710) 105#if defined(CONFIG_CPU_SUBTYPE_SH7710)
105# define PHY_ID 0x00 106# define PHY_ID 0x00
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index e28531333efa..ba4d39cb321d 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bitsperlong.h 4generic-y += bitsperlong.h
5generic-y += bpf_perf_event.h
5generic-y += errno.h 6generic-y += errno.h
6generic-y += fcntl.h 7generic-y += fcntl.h
7generic-y += ioctl.h 8generic-y += ioctl.h
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index 818d3aa5172e..d257186c27d1 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
10 10
11obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o 11obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
12obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o 12obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
13obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o 13obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
14 14
15obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o 15obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
16 16
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index 2178c78c7c1a..4680ba246b55 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += types.h 5generic-y += types.h
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index e5547b22cd18..0ddbbb031822 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32)
44 .previous 44 .previous
45 45
46ENTRY(__arch_hweight64) 46ENTRY(__arch_hweight64)
47 sethi %hi(__sw_hweight16), %g1 47 sethi %hi(__sw_hweight64), %g1
48 jmpl %g1 + %lo(__sw_hweight16), %g0 48 jmpl %g1 + %lo(__sw_hweight64), %g0
49 nop 49 nop
50ENDPROC(__arch_hweight64) 50ENDPROC(__arch_hweight64)
51EXPORT_SYMBOL(__arch_hweight64) 51EXPORT_SYMBOL(__arch_hweight64)
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index be3136f142a9..a8103a84b4ac 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
113 if (!printk_ratelimit()) 113 if (!printk_ratelimit())
114 return; 114 return;
115 115
116 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", 116 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 117 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118 tsk->comm, task_pid_nr(tsk), address, 118 tsk->comm, task_pid_nr(tsk), address,
119 (void *)regs->pc, (void *)regs->u_regs[UREG_I7], 119 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 815c03d7a765..41363f46797b 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
154 if (!printk_ratelimit()) 154 if (!printk_ratelimit())
155 return; 155 return;
156 156
157 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", 157 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
158 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 158 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
159 tsk->comm, task_pid_nr(tsk), address, 159 tsk->comm, task_pid_nr(tsk), address,
160 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7], 160 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 33c0f8bb0f33..5335ba3c850e 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
75 if (!(pmd_val(pmd) & _PAGE_VALID)) 75 if (!(pmd_val(pmd) & _PAGE_VALID))
76 return 0; 76 return 0;
77 77
78 if (!pmd_access_permitted(pmd, write)) 78 if (write && !pmd_write(pmd))
79 return 0; 79 return 0;
80 80
81 refs = 0; 81 refs = 0;
@@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
114 if (!(pud_val(pud) & _PAGE_VALID)) 114 if (!(pud_val(pud) & _PAGE_VALID))
115 return 0; 115 return 0;
116 116
117 if (!pud_access_permitted(pud, write)) 117 if (write && !pud_write(pud))
118 return 0; 118 return 0;
119 119
120 refs = 0; 120 refs = 0;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 5765e7e711f7..ff5f9cb3039a 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1245 u8 *func = ((u8 *)__bpf_call_base) + imm; 1245 u8 *func = ((u8 *)__bpf_call_base) + imm;
1246 1246
1247 ctx->saw_call = true; 1247 ctx->saw_call = true;
1248 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1249 emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
1248 1250
1249 emit_call((u32 *)func, ctx); 1251 emit_call((u32 *)func, ctx);
1250 emit_nop(ctx); 1252 emit_nop(ctx);
1251 1253
1252 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); 1254 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
1253 1255
1254 if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) 1256 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1255 load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); 1257 load_skb_regs(ctx, L7);
1256 break; 1258 break;
1257 } 1259 }
1258 1260
diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild
index 5711de0a1b5e..cc439612bcd5 100644
--- a/arch/tile/include/uapi/asm/Kbuild
+++ b/arch/tile/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generic-y += errno.h 5generic-y += errno.h
5generic-y += fcntl.h 6generic-y += fcntl.h
6generic-y += ioctl.h 7generic-y += ioctl.h
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 50a32c33d729..73c57f614c9e 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,4 +1,5 @@
1generic-y += barrier.h 1generic-y += barrier.h
2generic-y += bpf_perf_event.h
2generic-y += bug.h 3generic-y += bug.h
3generic-y += clkdev.h 4generic-y += clkdev.h
4generic-y += current.h 5generic-y += current.h
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index b668e351fd6c..fca34b2177e2 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm);
15/* 15/*
16 * Needed since we do not use the asm-generic/mm_hooks.h: 16 * Needed since we do not use the asm-generic/mm_hooks.h:
17 */ 17 */
18static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 18static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
19{ 19{
20 uml_setup_stubs(mm); 20 uml_setup_stubs(mm);
21 return 0;
21} 22}
22extern void arch_exit_mmap(struct mm_struct *mm); 23extern void arch_exit_mmap(struct mm_struct *mm);
23static inline void arch_unmap(struct mm_struct *mm, 24static inline void arch_unmap(struct mm_struct *mm,
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 4e6fcb32620f..428644175956 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
150 if (!printk_ratelimit()) 150 if (!printk_ratelimit())
151 return; 151 return;
152 152
153 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x", 153 printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
154 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 154 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
155 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi), 155 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
156 (void *)UPT_IP(regs), (void *)UPT_SP(regs), 156 (void *)UPT_IP(regs), (void *)UPT_SP(regs),
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index 59b06b48f27d..5c205a9cb5a6 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -81,9 +81,10 @@ do { \
81 } \ 81 } \
82} while (0) 82} while (0)
83 83
84static inline void arch_dup_mmap(struct mm_struct *oldmm, 84static inline int arch_dup_mmap(struct mm_struct *oldmm,
85 struct mm_struct *mm) 85 struct mm_struct *mm)
86{ 86{
87 return 0;
87} 88}
88 89
89static inline void arch_unmap(struct mm_struct *mm, 90static inline void arch_unmap(struct mm_struct *mm,
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 759a71411169..8611ef980554 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += bitsperlong.h 5generic-y += bitsperlong.h
6generic-y += bpf_perf_event.h
6generic-y += errno.h 7generic-y += errno.h
7generic-y += fcntl.h 8generic-y += fcntl.h
8generic-y += ioctl.h 9generic-y += ioctl.h
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 5f25b39f04d4..c4ac6043ebb0 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -298,7 +298,6 @@ void abort(void)
298 /* if that doesn't kill us, halt */ 298 /* if that doesn't kill us, halt */
299 panic("Oops failed to kill thread"); 299 panic("Oops failed to kill thread");
300} 300}
301EXPORT_SYMBOL(abort);
302 301
303void __init trap_init(void) 302void __init trap_init(void)
304{ 303{
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8eed3f94bfc7..20da391b5f32 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -55,7 +55,6 @@ config X86
55 select ARCH_HAS_GCOV_PROFILE_ALL 55 select ARCH_HAS_GCOV_PROFILE_ALL
56 select ARCH_HAS_KCOV if X86_64 56 select ARCH_HAS_KCOV if X86_64
57 select ARCH_HAS_PMEM_API if X86_64 57 select ARCH_HAS_PMEM_API if X86_64
58 # Causing hangs/crashes, see the commit that added this change for details.
59 select ARCH_HAS_REFCOUNT 58 select ARCH_HAS_REFCOUNT
60 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 59 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
61 select ARCH_HAS_SET_MEMORY 60 select ARCH_HAS_SET_MEMORY
@@ -89,6 +88,7 @@ config X86
89 select GENERIC_CLOCKEVENTS_MIN_ADJUST 88 select GENERIC_CLOCKEVENTS_MIN_ADJUST
90 select GENERIC_CMOS_UPDATE 89 select GENERIC_CMOS_UPDATE
91 select GENERIC_CPU_AUTOPROBE 90 select GENERIC_CPU_AUTOPROBE
91 select GENERIC_CPU_VULNERABILITIES
92 select GENERIC_EARLY_IOREMAP 92 select GENERIC_EARLY_IOREMAP
93 select GENERIC_FIND_FIRST_BIT 93 select GENERIC_FIND_FIRST_BIT
94 select GENERIC_IOMAP 94 select GENERIC_IOMAP
@@ -429,6 +429,19 @@ config GOLDFISH
429 def_bool y 429 def_bool y
430 depends on X86_GOLDFISH 430 depends on X86_GOLDFISH
431 431
432config RETPOLINE
433 bool "Avoid speculative indirect branches in kernel"
434 default y
435 help
436 Compile kernel with the retpoline compiler options to guard against
437 kernel-to-user data leaks by avoiding speculative indirect
438 branches. Requires a compiler with -mindirect-branch=thunk-extern
439 support for full protection. The kernel may run slower.
440
441 Without compiler support, at least indirect branches in assembler
442 code are eliminated. Since this includes the syscall entry path,
443 it is not entirely pointless.
444
432config INTEL_RDT 445config INTEL_RDT
433 bool "Intel Resource Director Technology support" 446 bool "Intel Resource Director Technology support"
434 default n 447 default n
@@ -926,7 +939,8 @@ config MAXSMP
926config NR_CPUS 939config NR_CPUS
927 int "Maximum number of CPUs" if SMP && !MAXSMP 940 int "Maximum number of CPUs" if SMP && !MAXSMP
928 range 2 8 if SMP && X86_32 && !X86_BIGSMP 941 range 2 8 if SMP && X86_32 && !X86_BIGSMP
929 range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK 942 range 2 64 if SMP && X86_32 && X86_BIGSMP
943 range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
930 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 944 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
931 default "1" if !SMP 945 default "1" if !SMP
932 default "8192" if MAXSMP 946 default "8192" if MAXSMP
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 6293a8768a91..672441c008c7 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -400,6 +400,7 @@ config UNWINDER_FRAME_POINTER
400config UNWINDER_GUESS 400config UNWINDER_GUESS
401 bool "Guess unwinder" 401 bool "Guess unwinder"
402 depends on EXPERT 402 depends on EXPERT
403 depends on !STACKDEPOT
403 ---help--- 404 ---help---
404 This option enables the "guess" unwinder for unwinding kernel stack 405 This option enables the "guess" unwinder for unwinding kernel stack
405 traces. It scans the stack and reports every kernel text address it 406 traces. It scans the stack and reports every kernel text address it
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3e73bc255e4e..fad55160dcb9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare
230# 230#
231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
232 232
233# Avoid indirect branches in kernel to deal with Spectre
234ifdef CONFIG_RETPOLINE
235 RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
236 ifneq ($(RETPOLINE_CFLAGS),)
237 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
238 endif
239endif
240
233archscripts: scripts_basic 241archscripts: scripts_basic
234 $(Q)$(MAKE) $(build)=arch/x86/tools relocs 242 $(Q)$(MAKE) $(build)=arch/x86/tools relocs
235 243
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 1e9c322e973a..f25e1530e064 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -80,6 +80,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
80ifdef CONFIG_X86_64 80ifdef CONFIG_X86_64
81 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o 81 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
82 vmlinux-objs-y += $(obj)/mem_encrypt.o 82 vmlinux-objs-y += $(obj)/mem_encrypt.o
83 vmlinux-objs-y += $(obj)/pgtable_64.o
83endif 84endif
84 85
85$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 86$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 20919b4f3133..fc313e29fe2c 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -305,10 +305,18 @@ ENTRY(startup_64)
305 leaq boot_stack_end(%rbx), %rsp 305 leaq boot_stack_end(%rbx), %rsp
306 306
307#ifdef CONFIG_X86_5LEVEL 307#ifdef CONFIG_X86_5LEVEL
308 /* Check if 5-level paging has already enabled */ 308 /*
309 movq %cr4, %rax 309 * Check if we need to enable 5-level paging.
310 testl $X86_CR4_LA57, %eax 310 * RSI holds real mode data and need to be preserved across
311 jnz lvl5 311 * a function call.
312 */
313 pushq %rsi
314 call l5_paging_required
315 popq %rsi
316
317 /* If l5_paging_required() returned zero, we're done here. */
318 cmpq $0, %rax
319 je lvl5
312 320
313 /* 321 /*
314 * At this point we are in long mode with 4-level paging enabled, 322 * At this point we are in long mode with 4-level paging enabled,
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b50c42455e25..98761a1576ce 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -169,6 +169,16 @@ void __puthex(unsigned long value)
169 } 169 }
170} 170}
171 171
172static bool l5_supported(void)
173{
174 /* Check if leaf 7 is supported. */
175 if (native_cpuid_eax(0) < 7)
176 return 0;
177
178 /* Check if la57 is supported. */
179 return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31));
180}
181
172#if CONFIG_X86_NEED_RELOCS 182#if CONFIG_X86_NEED_RELOCS
173static void handle_relocations(void *output, unsigned long output_len, 183static void handle_relocations(void *output, unsigned long output_len,
174 unsigned long virt_addr) 184 unsigned long virt_addr)
@@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
362 console_init(); 372 console_init();
363 debug_putstr("early console in extract_kernel\n"); 373 debug_putstr("early console in extract_kernel\n");
364 374
375 if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) {
376 error("This linux kernel as configured requires 5-level paging\n"
377 "This CPU does not support the required 'cr4.la57' feature\n"
378 "Unable to boot - please use a kernel appropriate for your CPU\n");
379 }
380
365 free_mem_ptr = heap; /* Heap */ 381 free_mem_ptr = heap; /* Heap */
366 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 382 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
367 383
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index d5364ca2e3f9..b5e5e02f8cde 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -23,6 +23,9 @@
23 */ 23 */
24#undef CONFIG_AMD_MEM_ENCRYPT 24#undef CONFIG_AMD_MEM_ENCRYPT
25 25
26/* No PAGE_TABLE_ISOLATION support needed either: */
27#undef CONFIG_PAGE_TABLE_ISOLATION
28
26#include "misc.h" 29#include "misc.h"
27 30
28/* These actually do the work of building the kernel identity maps. */ 31/* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
new file mode 100644
index 000000000000..b4469a37e9a1
--- /dev/null
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -0,0 +1,28 @@
1#include <asm/processor.h>
2
3/*
4 * __force_order is used by special_insns.h asm code to force instruction
5 * serialization.
6 *
7 * It is not referenced from the code, but GCC < 5 with -fPIE would fail
8 * due to an undefined symbol. Define it to make these ancient GCCs work.
9 */
10unsigned long __force_order;
11
12int l5_paging_required(void)
13{
14 /* Check if leaf 7 is supported. */
15
16 if (native_cpuid_eax(0) < 7)
17 return 0;
18
19 /* Check if la57 is supported. */
20 if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
21 return 0;
22
23 /* Check if 5-level paging has already been enabled. */
24 if (native_read_cr4() & X86_CR4_LA57)
25 return 0;
26
27 return 1;
28}
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index 49f4970f693b..6a10d52a4145 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -44,9 +44,9 @@ FDINITRD=$6
44 44
45# Make sure the files actually exist 45# Make sure the files actually exist
46verify "$FBZIMAGE" 46verify "$FBZIMAGE"
47verify "$MTOOLSRC"
48 47
49genbzdisk() { 48genbzdisk() {
49 verify "$MTOOLSRC"
50 mformat a: 50 mformat a:
51 syslinux $FIMAGE 51 syslinux $FIMAGE
52 echo "$KCMDLINE" | mcopy - a:syslinux.cfg 52 echo "$KCMDLINE" | mcopy - a:syslinux.cfg
@@ -57,6 +57,7 @@ genbzdisk() {
57} 57}
58 58
59genfdimage144() { 59genfdimage144() {
60 verify "$MTOOLSRC"
60 dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null 61 dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
61 mformat v: 62 mformat v:
62 syslinux $FIMAGE 63 syslinux $FIMAGE
@@ -68,6 +69,7 @@ genfdimage144() {
68} 69}
69 70
70genfdimage288() { 71genfdimage288() {
72 verify "$MTOOLSRC"
71 dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null 73 dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
72 mformat w: 74 mformat w:
73 syslinux $FIMAGE 75 syslinux $FIMAGE
@@ -78,39 +80,43 @@ genfdimage288() {
78 mcopy $FBZIMAGE w:linux 80 mcopy $FBZIMAGE w:linux
79} 81}
80 82
81genisoimage() { 83geniso() {
82 tmp_dir=`dirname $FIMAGE`/isoimage 84 tmp_dir=`dirname $FIMAGE`/isoimage
83 rm -rf $tmp_dir 85 rm -rf $tmp_dir
84 mkdir $tmp_dir 86 mkdir $tmp_dir
85 for i in lib lib64 share end ; do 87 for i in lib lib64 share ; do
86 for j in syslinux ISOLINUX ; do 88 for j in syslinux ISOLINUX ; do
87 if [ -f /usr/$i/$j/isolinux.bin ] ; then 89 if [ -f /usr/$i/$j/isolinux.bin ] ; then
88 isolinux=/usr/$i/$j/isolinux.bin 90 isolinux=/usr/$i/$j/isolinux.bin
89 cp $isolinux $tmp_dir
90 fi 91 fi
91 done 92 done
92 for j in syslinux syslinux/modules/bios ; do 93 for j in syslinux syslinux/modules/bios ; do
93 if [ -f /usr/$i/$j/ldlinux.c32 ]; then 94 if [ -f /usr/$i/$j/ldlinux.c32 ]; then
94 ldlinux=/usr/$i/$j/ldlinux.c32 95 ldlinux=/usr/$i/$j/ldlinux.c32
95 cp $ldlinux $tmp_dir
96 fi 96 fi
97 done 97 done
98 if [ -n "$isolinux" -a -n "$ldlinux" ] ; then 98 if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
99 break 99 break
100 fi 100 fi
101 if [ $i = end -a -z "$isolinux" ] ; then
102 echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
103 exit 1
104 fi
105 done 101 done
102 if [ -z "$isolinux" ] ; then
103 echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
104 exit 1
105 fi
106 if [ -z "$ldlinux" ] ; then
107 echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
108 exit 1
109 fi
110 cp $isolinux $tmp_dir
111 cp $ldlinux $tmp_dir
106 cp $FBZIMAGE $tmp_dir/linux 112 cp $FBZIMAGE $tmp_dir/linux
107 echo "$KCMDLINE" > $tmp_dir/isolinux.cfg 113 echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
108 if [ -f "$FDINITRD" ] ; then 114 if [ -f "$FDINITRD" ] ; then
109 cp "$FDINITRD" $tmp_dir/initrd.img 115 cp "$FDINITRD" $tmp_dir/initrd.img
110 fi 116 fi
111 mkisofs -J -r -input-charset=utf-8 -quiet -o $FIMAGE -b isolinux.bin \ 117 genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
112 -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table \ 118 -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
113 $tmp_dir 119 -boot-info-table $tmp_dir
114 isohybrid $FIMAGE 2>/dev/null || true 120 isohybrid $FIMAGE 2>/dev/null || true
115 rm -rf $tmp_dir 121 rm -rf $tmp_dir
116} 122}
@@ -119,6 +125,6 @@ case $1 in
119 bzdisk) genbzdisk;; 125 bzdisk) genbzdisk;;
120 fdimage144) genfdimage144;; 126 fdimage144) genfdimage144;;
121 fdimage288) genfdimage288;; 127 fdimage288) genfdimage288;;
122 isoimage) genisoimage;; 128 isoimage) geniso;;
123 *) echo 'Unknown image format'; exit 1; 129 *) echo 'Unknown image format'; exit 1;
124esac 130esac
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..3d09e3aca18d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33#include <asm/inst.h> 33#include <asm/inst.h>
34#include <asm/frame.h> 34#include <asm/frame.h>
35#include <asm/nospec-branch.h>
35 36
36/* 37/*
37 * The following macros are used to move an (un)aligned 16 byte value to/from 38 * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
2884 pxor INC, STATE4 2885 pxor INC, STATE4
2885 movdqu IV, 0x30(OUTP) 2886 movdqu IV, 0x30(OUTP)
2886 2887
2887 call *%r11 2888 CALL_NOSPEC %r11
2888 2889
2889 movdqu 0x00(OUTP), INC 2890 movdqu 0x00(OUTP), INC
2890 pxor INC, STATE1 2891 pxor INC, STATE1
@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
2929 _aesni_gf128mul_x_ble() 2930 _aesni_gf128mul_x_ble()
2930 movups IV, (IVP) 2931 movups IV, (IVP)
2931 2932
2932 call *%r11 2933 CALL_NOSPEC %r11
2933 2934
2934 movdqu 0x40(OUTP), INC 2935 movdqu 0x40(OUTP), INC
2935 pxor INC, STATE1 2936 pxor INC, STATE1
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..a14af6eb09cb 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/frame.h> 19#include <asm/frame.h>
20#include <asm/nospec-branch.h>
20 21
21#define CAMELLIA_TABLE_BYTE_LEN 272 22#define CAMELLIA_TABLE_BYTE_LEN 272
22 23
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
1227 vpxor 14 * 16(%rax), %xmm15, %xmm14; 1228 vpxor 14 * 16(%rax), %xmm15, %xmm14;
1228 vpxor 15 * 16(%rax), %xmm15, %xmm15; 1229 vpxor 15 * 16(%rax), %xmm15, %xmm15;
1229 1230
1230 call *%r9; 1231 CALL_NOSPEC %r9;
1231 1232
1232 addq $(16 * 16), %rsp; 1233 addq $(16 * 16), %rsp;
1233 1234
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index eee5b3982cfd..b66bbfa62f50 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/frame.h> 14#include <asm/frame.h>
15#include <asm/nospec-branch.h>
15 16
16#define CAMELLIA_TABLE_BYTE_LEN 272 17#define CAMELLIA_TABLE_BYTE_LEN 272
17 18
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
1343 vpxor 14 * 32(%rax), %ymm15, %ymm14; 1344 vpxor 14 * 32(%rax), %ymm15, %ymm14;
1344 vpxor 15 * 32(%rax), %ymm15, %ymm15; 1345 vpxor 15 * 32(%rax), %ymm15, %ymm15;
1345 1346
1346 call *%r9; 1347 CALL_NOSPEC %r9;
1347 1348
1348 addq $(16 * 32), %rsp; 1349 addq $(16 * 32), %rsp;
1349 1350
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 7a7de27c6f41..d9b734d0c8cc 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
45 45
46#include <asm/inst.h> 46#include <asm/inst.h>
47#include <linux/linkage.h> 47#include <linux/linkage.h>
48#include <asm/nospec-branch.h>
48 49
49## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction 50## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
50 51
@@ -172,7 +173,7 @@ continue_block:
172 movzxw (bufp, %rax, 2), len 173 movzxw (bufp, %rax, 2), len
173 lea crc_array(%rip), bufp 174 lea crc_array(%rip), bufp
174 lea (bufp, len, 1), bufp 175 lea (bufp, len, 1), bufp
175 jmp *bufp 176 JMP_NOSPEC bufp
176 177
177 ################################################################ 178 ################################################################
178 ## 2a) PROCESS FULL BLOCKS: 179 ## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 399a29d067d6..cb91a64a99e7 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
59 59
60 salsa20_ivsetup(ctx, walk.iv); 60 salsa20_ivsetup(ctx, walk.iv);
61 61
62 if (likely(walk.nbytes == nbytes))
63 {
64 salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
65 walk.dst.virt.addr, nbytes);
66 return blkcipher_walk_done(desc, &walk, 0);
67 }
68
69 while (walk.nbytes >= 64) { 62 while (walk.nbytes >= 64) {
70 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, 63 salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
71 walk.dst.virt.addr, 64 walk.dst.virt.addr,
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 3fd8bc560fae..3f48f695d5e6 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -1,6 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/jump_label.h> 2#include <linux/jump_label.h>
3#include <asm/unwind_hints.h> 3#include <asm/unwind_hints.h>
4#include <asm/cpufeatures.h>
5#include <asm/page_types.h>
6#include <asm/percpu.h>
7#include <asm/asm-offsets.h>
8#include <asm/processor-flags.h>
4 9
5/* 10/*
6 11
@@ -187,6 +192,148 @@ For 32-bit we have the following conventions - kernel is built with
187#endif 192#endif
188.endm 193.endm
189 194
195#ifdef CONFIG_PAGE_TABLE_ISOLATION
196
197/*
198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
199 * halves:
200 */
201#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
202#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
203#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
204#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
205#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
206
207.macro SET_NOFLUSH_BIT reg:req
208 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
209.endm
210
211.macro ADJUST_KERNEL_CR3 reg:req
212 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
213 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
214 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
215.endm
216
217.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
218 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
219 mov %cr3, \scratch_reg
220 ADJUST_KERNEL_CR3 \scratch_reg
221 mov \scratch_reg, %cr3
222.Lend_\@:
223.endm
224
225#define THIS_CPU_user_pcid_flush_mask \
226 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
227
228.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
229 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
230 mov %cr3, \scratch_reg
231
232 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
233
234 /*
235 * Test if the ASID needs a flush.
236 */
237 movq \scratch_reg, \scratch_reg2
238 andq $(0x7FF), \scratch_reg /* mask ASID */
239 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
240 jnc .Lnoflush_\@
241
242 /* Flush needed, clear the bit */
243 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
244 movq \scratch_reg2, \scratch_reg
245 jmp .Lwrcr3_pcid_\@
246
247.Lnoflush_\@:
248 movq \scratch_reg2, \scratch_reg
249 SET_NOFLUSH_BIT \scratch_reg
250
251.Lwrcr3_pcid_\@:
252 /* Flip the ASID to the user version */
253 orq $(PTI_USER_PCID_MASK), \scratch_reg
254
255.Lwrcr3_\@:
256 /* Flip the PGD to the user version */
257 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
258 mov \scratch_reg, %cr3
259.Lend_\@:
260.endm
261
262.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
263 pushq %rax
264 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
265 popq %rax
266.endm
267
268.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
269 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
270 movq %cr3, \scratch_reg
271 movq \scratch_reg, \save_reg
272 /*
273 * Test the user pagetable bit. If set, then the user page tables
274 * are active. If clear CR3 already has the kernel page table
275 * active.
276 */
277 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
278 jnc .Ldone_\@
279
280 ADJUST_KERNEL_CR3 \scratch_reg
281 movq \scratch_reg, %cr3
282
283.Ldone_\@:
284.endm
285
286.macro RESTORE_CR3 scratch_reg:req save_reg:req
287 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
288
289 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
290
291 /*
292 * KERNEL pages can always resume with NOFLUSH as we do
293 * explicit flushes.
294 */
295 bt $PTI_USER_PGTABLE_BIT, \save_reg
296 jnc .Lnoflush_\@
297
298 /*
299 * Check if there's a pending flush for the user ASID we're
300 * about to set.
301 */
302 movq \save_reg, \scratch_reg
303 andq $(0x7FF), \scratch_reg
304 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
305 jnc .Lnoflush_\@
306
307 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
308 jmp .Lwrcr3_\@
309
310.Lnoflush_\@:
311 SET_NOFLUSH_BIT \save_reg
312
313.Lwrcr3_\@:
314 /*
315 * The CR3 write could be avoided when not changing its value,
316 * but would require a CR3 read *and* a scratch register.
317 */
318 movq \save_reg, %cr3
319.Lend_\@:
320.endm
321
322#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
323
324.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
325.endm
326.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
327.endm
328.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
329.endm
330.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
331.endm
332.macro RESTORE_CR3 scratch_reg:req save_reg:req
333.endm
334
335#endif
336
190#endif /* CONFIG_X86_64 */ 337#endif /* CONFIG_X86_64 */
191 338
192/* 339/*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4838037f97f6..60c4c342316c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
44#include <asm/asm.h> 44#include <asm/asm.h>
45#include <asm/smap.h> 45#include <asm/smap.h>
46#include <asm/frame.h> 46#include <asm/frame.h>
47#include <asm/nospec-branch.h>
47 48
48 .section .entry.text, "ax" 49 .section .entry.text, "ax"
49 50
@@ -243,6 +244,17 @@ ENTRY(__switch_to_asm)
243 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 244 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
244#endif 245#endif
245 246
247#ifdef CONFIG_RETPOLINE
248 /*
249 * When switching from a shallower to a deeper call stack
250 * the RSB may either underflow or use entries populated
251 * with userspace addresses. On CPUs where those concerns
252 * exist, overwrite the RSB with entries which capture
253 * speculative execution to prevent attack.
254 */
255 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
256#endif
257
246 /* restore callee-saved registers */ 258 /* restore callee-saved registers */
247 popl %esi 259 popl %esi
248 popl %edi 260 popl %edi
@@ -290,7 +302,7 @@ ENTRY(ret_from_fork)
290 302
291 /* kernel thread */ 303 /* kernel thread */
2921: movl %edi, %eax 3041: movl %edi, %eax
293 call *%ebx 305 CALL_NOSPEC %ebx
294 /* 306 /*
295 * A kernel thread is allowed to return here after successfully 307 * A kernel thread is allowed to return here after successfully
296 * calling do_execve(). Exit to userspace to complete the execve() 308 * calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +931,7 @@ common_exception:
919 movl %ecx, %es 931 movl %ecx, %es
920 TRACE_IRQS_OFF 932 TRACE_IRQS_OFF
921 movl %esp, %eax # pt_regs pointer 933 movl %esp, %eax # pt_regs pointer
922 call *%edi 934 CALL_NOSPEC %edi
923 jmp ret_from_exception 935 jmp ret_from_exception
924END(common_exception) 936END(common_exception)
925 937
@@ -941,9 +953,10 @@ ENTRY(debug)
941 movl %esp, %eax # pt_regs pointer 953 movl %esp, %eax # pt_regs pointer
942 954
943 /* Are we currently on the SYSENTER stack? */ 955 /* Are we currently on the SYSENTER stack? */
944 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) 956 movl PER_CPU_VAR(cpu_entry_area), %ecx
945 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ 957 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
946 cmpl $SIZEOF_SYSENTER_stack, %ecx 958 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
959 cmpl $SIZEOF_entry_stack, %ecx
947 jb .Ldebug_from_sysenter_stack 960 jb .Ldebug_from_sysenter_stack
948 961
949 TRACE_IRQS_OFF 962 TRACE_IRQS_OFF
@@ -984,9 +997,10 @@ ENTRY(nmi)
984 movl %esp, %eax # pt_regs pointer 997 movl %esp, %eax # pt_regs pointer
985 998
986 /* Are we currently on the SYSENTER stack? */ 999 /* Are we currently on the SYSENTER stack? */
987 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) 1000 movl PER_CPU_VAR(cpu_entry_area), %ecx
988 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ 1001 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
989 cmpl $SIZEOF_SYSENTER_stack, %ecx 1002 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1003 cmpl $SIZEOF_entry_stack, %ecx
990 jb .Lnmi_from_sysenter_stack 1004 jb .Lnmi_from_sysenter_stack
991 1005
992 /* Not on SYSENTER stack. */ 1006 /* Not on SYSENTER stack. */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f81d50d7ceac..ff6f8022612c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -23,7 +23,6 @@
23#include <asm/segment.h> 23#include <asm/segment.h>
24#include <asm/cache.h> 24#include <asm/cache.h>
25#include <asm/errno.h> 25#include <asm/errno.h>
26#include "calling.h"
27#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
28#include <asm/msr.h> 27#include <asm/msr.h>
29#include <asm/unistd.h> 28#include <asm/unistd.h>
@@ -38,8 +37,11 @@
38#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
39#include <asm/export.h> 38#include <asm/export.h>
40#include <asm/frame.h> 39#include <asm/frame.h>
40#include <asm/nospec-branch.h>
41#include <linux/err.h> 41#include <linux/err.h>
42 42
43#include "calling.h"
44
43.code64 45.code64
44.section .entry.text, "ax" 46.section .entry.text, "ax"
45 47
@@ -140,6 +142,67 @@ END(native_usergs_sysret64)
140 * with them due to bugs in both AMD and Intel CPUs. 142 * with them due to bugs in both AMD and Intel CPUs.
141 */ 143 */
142 144
145 .pushsection .entry_trampoline, "ax"
146
147/*
148 * The code in here gets remapped into cpu_entry_area's trampoline. This means
149 * that the assembler and linker have the wrong idea as to where this code
150 * lives (and, in fact, it's mapped more than once, so it's not even at a
151 * fixed address). So we can't reference any symbols outside the entry
152 * trampoline and expect it to work.
153 *
154 * Instead, we carefully abuse %rip-relative addressing.
155 * _entry_trampoline(%rip) refers to the start of the remapped) entry
156 * trampoline. We can thus find cpu_entry_area with this macro:
157 */
158
159#define CPU_ENTRY_AREA \
160 _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
161
162/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
163#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \
164 SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
165
166ENTRY(entry_SYSCALL_64_trampoline)
167 UNWIND_HINT_EMPTY
168 swapgs
169
170 /* Stash the user RSP. */
171 movq %rsp, RSP_SCRATCH
172
173 /* Note: using %rsp as a scratch reg. */
174 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
175
176 /* Load the top of the task stack into RSP */
177 movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
178
179 /* Start building the simulated IRET frame. */
180 pushq $__USER_DS /* pt_regs->ss */
181 pushq RSP_SCRATCH /* pt_regs->sp */
182 pushq %r11 /* pt_regs->flags */
183 pushq $__USER_CS /* pt_regs->cs */
184 pushq %rcx /* pt_regs->ip */
185
186 /*
187 * x86 lacks a near absolute jump, and we can't jump to the real
188 * entry text with a relative jump. We could push the target
189 * address and then use retq, but this destroys the pipeline on
190 * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead,
191 * spill RDI and restore it in a second-stage trampoline.
192 */
193 pushq %rdi
194 movq $entry_SYSCALL_64_stage2, %rdi
195 JMP_NOSPEC %rdi
196END(entry_SYSCALL_64_trampoline)
197
198 .popsection
199
200ENTRY(entry_SYSCALL_64_stage2)
201 UNWIND_HINT_EMPTY
202 popq %rdi
203 jmp entry_SYSCALL_64_after_hwframe
204END(entry_SYSCALL_64_stage2)
205
143ENTRY(entry_SYSCALL_64) 206ENTRY(entry_SYSCALL_64)
144 UNWIND_HINT_EMPTY 207 UNWIND_HINT_EMPTY
145 /* 208 /*
@@ -149,6 +212,10 @@ ENTRY(entry_SYSCALL_64)
149 */ 212 */
150 213
151 swapgs 214 swapgs
215 /*
216 * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
217 * is not required to switch CR3.
218 */
152 movq %rsp, PER_CPU_VAR(rsp_scratch) 219 movq %rsp, PER_CPU_VAR(rsp_scratch)
153 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 220 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
154 221
@@ -204,7 +271,12 @@ entry_SYSCALL_64_fastpath:
204 * It might end up jumping to the slow path. If it jumps, RAX 271 * It might end up jumping to the slow path. If it jumps, RAX
205 * and all argument registers are clobbered. 272 * and all argument registers are clobbered.
206 */ 273 */
274#ifdef CONFIG_RETPOLINE
275 movq sys_call_table(, %rax, 8), %rax
276 call __x86_indirect_thunk_rax
277#else
207 call *sys_call_table(, %rax, 8) 278 call *sys_call_table(, %rax, 8)
279#endif
208.Lentry_SYSCALL_64_after_fastpath_call: 280.Lentry_SYSCALL_64_after_fastpath_call:
209 281
210 movq %rax, RAX(%rsp) 282 movq %rax, RAX(%rsp)
@@ -330,8 +402,25 @@ syscall_return_via_sysret:
330 popq %rsi /* skip rcx */ 402 popq %rsi /* skip rcx */
331 popq %rdx 403 popq %rdx
332 popq %rsi 404 popq %rsi
405
406 /*
407 * Now all regs are restored except RSP and RDI.
408 * Save old stack pointer and switch to trampoline stack.
409 */
410 movq %rsp, %rdi
411 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
412
413 pushq RSP-RDI(%rdi) /* RSP */
414 pushq (%rdi) /* RDI */
415
416 /*
417 * We are on the trampoline stack. All regs except RDI are live.
418 * We can do future final exit work right here.
419 */
420 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
421
333 popq %rdi 422 popq %rdi
334 movq RSP-ORIG_RAX(%rsp), %rsp 423 popq %rsp
335 USERGS_SYSRET64 424 USERGS_SYSRET64
336END(entry_SYSCALL_64) 425END(entry_SYSCALL_64)
337 426
@@ -359,7 +448,7 @@ ENTRY(stub_ptregs_64)
359 jmp entry_SYSCALL64_slow_path 448 jmp entry_SYSCALL64_slow_path
360 449
3611: 4501:
362 jmp *%rax /* Called from C */ 451 JMP_NOSPEC %rax /* Called from C */
363END(stub_ptregs_64) 452END(stub_ptregs_64)
364 453
365.macro ptregs_stub func 454.macro ptregs_stub func
@@ -402,6 +491,17 @@ ENTRY(__switch_to_asm)
402 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 491 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
403#endif 492#endif
404 493
494#ifdef CONFIG_RETPOLINE
495 /*
496 * When switching from a shallower to a deeper call stack
497 * the RSB may either underflow or use entries populated
498 * with userspace addresses. On CPUs where those concerns
499 * exist, overwrite the RSB with entries which capture
500 * speculative execution to prevent attack.
501 */
502 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
503#endif
504
405 /* restore callee-saved registers */ 505 /* restore callee-saved registers */
406 popq %r15 506 popq %r15
407 popq %r14 507 popq %r14
@@ -438,7 +538,7 @@ ENTRY(ret_from_fork)
4381: 5381:
439 /* kernel thread */ 539 /* kernel thread */
440 movq %r12, %rdi 540 movq %r12, %rdi
441 call *%rbx 541 CALL_NOSPEC %rbx
442 /* 542 /*
443 * A kernel thread is allowed to return here after successfully 543 * A kernel thread is allowed to return here after successfully
444 * calling do_execve(). Exit to userspace to complete the execve() 544 * calling do_execve(). Exit to userspace to complete the execve()
@@ -466,12 +566,13 @@ END(irq_entries_start)
466 566
467.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 567.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
468#ifdef CONFIG_DEBUG_ENTRY 568#ifdef CONFIG_DEBUG_ENTRY
469 pushfq 569 pushq %rax
470 testl $X86_EFLAGS_IF, (%rsp) 570 SAVE_FLAGS(CLBR_RAX)
571 testl $X86_EFLAGS_IF, %eax
471 jz .Lokay_\@ 572 jz .Lokay_\@
472 ud2 573 ud2
473.Lokay_\@: 574.Lokay_\@:
474 addq $8, %rsp 575 popq %rax
475#endif 576#endif
476.endm 577.endm
477 578
@@ -563,6 +664,13 @@ END(irq_entries_start)
563/* 0(%rsp): ~(interrupt number) */ 664/* 0(%rsp): ~(interrupt number) */
564 .macro interrupt func 665 .macro interrupt func
565 cld 666 cld
667
668 testb $3, CS-ORIG_RAX(%rsp)
669 jz 1f
670 SWAPGS
671 call switch_to_thread_stack
6721:
673
566 ALLOC_PT_GPREGS_ON_STACK 674 ALLOC_PT_GPREGS_ON_STACK
567 SAVE_C_REGS 675 SAVE_C_REGS
568 SAVE_EXTRA_REGS 676 SAVE_EXTRA_REGS
@@ -572,12 +680,8 @@ END(irq_entries_start)
572 jz 1f 680 jz 1f
573 681
574 /* 682 /*
575 * IRQ from user mode. Switch to kernel gsbase and inform context 683 * IRQ from user mode.
576 * tracking that we're in kernel mode. 684 *
577 */
578 SWAPGS
579
580 /*
581 * We need to tell lockdep that IRQs are off. We can't do this until 685 * We need to tell lockdep that IRQs are off. We can't do this until
582 * we fix gsbase, and we should do it before enter_from_user_mode 686 * we fix gsbase, and we should do it before enter_from_user_mode
583 * (which can take locks). Since TRACE_IRQS_OFF idempotent, 687 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
@@ -630,10 +734,43 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
630 ud2 734 ud2
6311: 7351:
632#endif 736#endif
633 SWAPGS
634 POP_EXTRA_REGS 737 POP_EXTRA_REGS
635 POP_C_REGS 738 popq %r11
636 addq $8, %rsp /* skip regs->orig_ax */ 739 popq %r10
740 popq %r9
741 popq %r8
742 popq %rax
743 popq %rcx
744 popq %rdx
745 popq %rsi
746
747 /*
748 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
749 * Save old stack pointer and switch to trampoline stack.
750 */
751 movq %rsp, %rdi
752 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
753
754 /* Copy the IRET frame to the trampoline stack. */
755 pushq 6*8(%rdi) /* SS */
756 pushq 5*8(%rdi) /* RSP */
757 pushq 4*8(%rdi) /* EFLAGS */
758 pushq 3*8(%rdi) /* CS */
759 pushq 2*8(%rdi) /* RIP */
760
761 /* Push user RDI on the trampoline stack. */
762 pushq (%rdi)
763
764 /*
765 * We are on the trampoline stack. All regs except RDI are live.
766 * We can do future final exit work right here.
767 */
768
769 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
770
771 /* Restore RDI. */
772 popq %rdi
773 SWAPGS
637 INTERRUPT_RETURN 774 INTERRUPT_RETURN
638 775
639 776
@@ -713,7 +850,9 @@ native_irq_return_ldt:
713 */ 850 */
714 851
715 pushq %rdi /* Stash user RDI */ 852 pushq %rdi /* Stash user RDI */
716 SWAPGS 853 SWAPGS /* to kernel GS */
854 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
855
717 movq PER_CPU_VAR(espfix_waddr), %rdi 856 movq PER_CPU_VAR(espfix_waddr), %rdi
718 movq %rax, (0*8)(%rdi) /* user RAX */ 857 movq %rax, (0*8)(%rdi) /* user RAX */
719 movq (1*8)(%rsp), %rax /* user RIP */ 858 movq (1*8)(%rsp), %rax /* user RIP */
@@ -729,7 +868,6 @@ native_irq_return_ldt:
729 /* Now RAX == RSP. */ 868 /* Now RAX == RSP. */
730 869
731 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 870 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
732 popq %rdi /* Restore user RDI */
733 871
734 /* 872 /*
735 * espfix_stack[31:16] == 0. The page tables are set up such that 873 * espfix_stack[31:16] == 0. The page tables are set up such that
@@ -740,7 +878,11 @@ native_irq_return_ldt:
740 * still points to an RO alias of the ESPFIX stack. 878 * still points to an RO alias of the ESPFIX stack.
741 */ 879 */
742 orq PER_CPU_VAR(espfix_stack), %rax 880 orq PER_CPU_VAR(espfix_stack), %rax
743 SWAPGS 881
882 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
883 SWAPGS /* to user GS */
884 popq %rdi /* Restore user RDI */
885
744 movq %rax, %rsp 886 movq %rax, %rsp
745 UNWIND_HINT_IRET_REGS offset=8 887 UNWIND_HINT_IRET_REGS offset=8
746 888
@@ -829,7 +971,35 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
829/* 971/*
830 * Exception entry points. 972 * Exception entry points.
831 */ 973 */
832#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) 974#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
975
976/*
977 * Switch to the thread stack. This is called with the IRET frame and
978 * orig_ax on the stack. (That is, RDI..R12 are not on the stack and
979 * space has not been allocated for them.)
980 */
981ENTRY(switch_to_thread_stack)
982 UNWIND_HINT_FUNC
983
984 pushq %rdi
985 /* Need to switch before accessing the thread stack. */
986 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
987 movq %rsp, %rdi
988 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
989 UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
990
991 pushq 7*8(%rdi) /* regs->ss */
992 pushq 6*8(%rdi) /* regs->rsp */
993 pushq 5*8(%rdi) /* regs->eflags */
994 pushq 4*8(%rdi) /* regs->cs */
995 pushq 3*8(%rdi) /* regs->ip */
996 pushq 2*8(%rdi) /* regs->orig_ax */
997 pushq 8(%rdi) /* return address */
998 UNWIND_HINT_FUNC
999
1000 movq (%rdi), %rdi
1001 ret
1002END(switch_to_thread_stack)
833 1003
834.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 1004.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
835ENTRY(\sym) 1005ENTRY(\sym)
@@ -848,11 +1018,12 @@ ENTRY(\sym)
848 1018
849 ALLOC_PT_GPREGS_ON_STACK 1019 ALLOC_PT_GPREGS_ON_STACK
850 1020
851 .if \paranoid 1021 .if \paranoid < 2
852 .if \paranoid == 1
853 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 1022 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
854 jnz 1f 1023 jnz .Lfrom_usermode_switch_stack_\@
855 .endif 1024 .endif
1025
1026 .if \paranoid
856 call paranoid_entry 1027 call paranoid_entry
857 .else 1028 .else
858 call error_entry 1029 call error_entry
@@ -894,20 +1065,15 @@ ENTRY(\sym)
894 jmp error_exit 1065 jmp error_exit
895 .endif 1066 .endif
896 1067
897 .if \paranoid == 1 1068 .if \paranoid < 2
898 /* 1069 /*
899 * Paranoid entry from userspace. Switch stacks and treat it 1070 * Entry from userspace. Switch stacks and treat it
900 * as a normal entry. This means that paranoid handlers 1071 * as a normal entry. This means that paranoid handlers
901 * run in real process context if user_mode(regs). 1072 * run in real process context if user_mode(regs).
902 */ 1073 */
9031: 1074.Lfrom_usermode_switch_stack_\@:
904 call error_entry 1075 call error_entry
905 1076
906
907 movq %rsp, %rdi /* pt_regs pointer */
908 call sync_regs
909 movq %rax, %rsp /* switch stack */
910
911 movq %rsp, %rdi /* pt_regs pointer */ 1077 movq %rsp, %rdi /* pt_regs pointer */
912 1078
913 .if \has_error_code 1079 .if \has_error_code
@@ -1098,7 +1264,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
1098#endif 1264#endif
1099 1265
1100#ifdef CONFIG_X86_MCE 1266#ifdef CONFIG_X86_MCE
1101idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1267idtentry machine_check do_mce has_error_code=0 paranoid=1
1102#endif 1268#endif
1103 1269
1104/* 1270/*
@@ -1119,7 +1285,11 @@ ENTRY(paranoid_entry)
1119 js 1f /* negative -> in kernel */ 1285 js 1f /* negative -> in kernel */
1120 SWAPGS 1286 SWAPGS
1121 xorl %ebx, %ebx 1287 xorl %ebx, %ebx
11221: ret 1288
12891:
1290 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
1291
1292 ret
1123END(paranoid_entry) 1293END(paranoid_entry)
1124 1294
1125/* 1295/*
@@ -1141,6 +1311,7 @@ ENTRY(paranoid_exit)
1141 testl %ebx, %ebx /* swapgs needed? */ 1311 testl %ebx, %ebx /* swapgs needed? */
1142 jnz .Lparanoid_exit_no_swapgs 1312 jnz .Lparanoid_exit_no_swapgs
1143 TRACE_IRQS_IRETQ 1313 TRACE_IRQS_IRETQ
1314 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1144 SWAPGS_UNSAFE_STACK 1315 SWAPGS_UNSAFE_STACK
1145 jmp .Lparanoid_exit_restore 1316 jmp .Lparanoid_exit_restore
1146.Lparanoid_exit_no_swapgs: 1317.Lparanoid_exit_no_swapgs:
@@ -1168,8 +1339,18 @@ ENTRY(error_entry)
1168 * from user mode due to an IRET fault. 1339 * from user mode due to an IRET fault.
1169 */ 1340 */
1170 SWAPGS 1341 SWAPGS
1342 /* We have user CR3. Change to kernel CR3. */
1343 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1171 1344
1172.Lerror_entry_from_usermode_after_swapgs: 1345.Lerror_entry_from_usermode_after_swapgs:
1346 /* Put us onto the real thread stack. */
1347 popq %r12 /* save return addr in %12 */
1348 movq %rsp, %rdi /* arg0 = pt_regs pointer */
1349 call sync_regs
1350 movq %rax, %rsp /* switch stack */
1351 ENCODE_FRAME_POINTER
1352 pushq %r12
1353
1173 /* 1354 /*
1174 * We need to tell lockdep that IRQs are off. We can't do this until 1355 * We need to tell lockdep that IRQs are off. We can't do this until
1175 * we fix gsbase, and we should do it before enter_from_user_mode 1356 * we fix gsbase, and we should do it before enter_from_user_mode
@@ -1206,6 +1387,7 @@ ENTRY(error_entry)
1206 * .Lgs_change's error handler with kernel gsbase. 1387 * .Lgs_change's error handler with kernel gsbase.
1207 */ 1388 */
1208 SWAPGS 1389 SWAPGS
1390 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1209 jmp .Lerror_entry_done 1391 jmp .Lerror_entry_done
1210 1392
1211.Lbstep_iret: 1393.Lbstep_iret:
@@ -1215,10 +1397,11 @@ ENTRY(error_entry)
1215 1397
1216.Lerror_bad_iret: 1398.Lerror_bad_iret:
1217 /* 1399 /*
1218 * We came from an IRET to user mode, so we have user gsbase. 1400 * We came from an IRET to user mode, so we have user
1219 * Switch to kernel gsbase: 1401 * gsbase and CR3. Switch to kernel gsbase and CR3:
1220 */ 1402 */
1221 SWAPGS 1403 SWAPGS
1404 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1222 1405
1223 /* 1406 /*
1224 * Pretend that the exception came from user mode: set up pt_regs 1407 * Pretend that the exception came from user mode: set up pt_regs
@@ -1250,6 +1433,10 @@ END(error_exit)
1250/* 1433/*
1251 * Runs on exception stack. Xen PV does not go through this path at all, 1434 * Runs on exception stack. Xen PV does not go through this path at all,
1252 * so we can use real assembly here. 1435 * so we can use real assembly here.
1436 *
1437 * Registers:
1438 * %r14: Used to save/restore the CR3 of the interrupted context
1439 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1253 */ 1440 */
1254ENTRY(nmi) 1441ENTRY(nmi)
1255 UNWIND_HINT_IRET_REGS 1442 UNWIND_HINT_IRET_REGS
@@ -1313,6 +1500,7 @@ ENTRY(nmi)
1313 1500
1314 swapgs 1501 swapgs
1315 cld 1502 cld
1503 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1316 movq %rsp, %rdx 1504 movq %rsp, %rdx
1317 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1505 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1318 UNWIND_HINT_IRET_REGS base=%rdx offset=8 1506 UNWIND_HINT_IRET_REGS base=%rdx offset=8
@@ -1565,6 +1753,8 @@ end_repeat_nmi:
1565 movq $-1, %rsi 1753 movq $-1, %rsi
1566 call do_nmi 1754 call do_nmi
1567 1755
1756 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1757
1568 testl %ebx, %ebx /* swapgs needed? */ 1758 testl %ebx, %ebx /* swapgs needed? */
1569 jnz nmi_restore 1759 jnz nmi_restore
1570nmi_swapgs: 1760nmi_swapgs:
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 568e130d932c..98d5358e4041 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -48,7 +48,11 @@
48 */ 48 */
49ENTRY(entry_SYSENTER_compat) 49ENTRY(entry_SYSENTER_compat)
50 /* Interrupts are off on entry. */ 50 /* Interrupts are off on entry. */
51 SWAPGS_UNSAFE_STACK 51 SWAPGS
52
53 /* We are about to clobber %rsp anyway, clobbering here is OK */
54 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
55
52 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 56 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
53 57
54 /* 58 /*
@@ -186,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
186 /* Interrupts are off on entry. */ 190 /* Interrupts are off on entry. */
187 swapgs 191 swapgs
188 192
189 /* Stash user ESP and switch to the kernel stack. */ 193 /* Stash user ESP */
190 movl %esp, %r8d 194 movl %esp, %r8d
195
196 /* Use %rsp as scratch reg. User ESP is stashed in r8 */
197 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
198
199 /* Switch to the kernel stack */
191 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 200 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
192 201
193 /* Construct struct pt_regs on stack */ 202 /* Construct struct pt_regs on stack */
@@ -256,10 +265,22 @@ sysret32_from_system_call:
256 * when the system call started, which is already known to user 265 * when the system call started, which is already known to user
257 * code. We zero R8-R10 to avoid info leaks. 266 * code. We zero R8-R10 to avoid info leaks.
258 */ 267 */
268 movq RSP-ORIG_RAX(%rsp), %rsp
269
270 /*
271 * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
272 * on the process stack which is not mapped to userspace and
273 * not readable after we SWITCH_TO_USER_CR3. Delay the CR3
274 * switch until after after the last reference to the process
275 * stack.
276 *
277 * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
278 */
279 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
280
259 xorq %r8, %r8 281 xorq %r8, %r8
260 xorq %r9, %r9 282 xorq %r9, %r9
261 xorq %r10, %r10 283 xorq %r10, %r10
262 movq RSP-ORIG_RAX(%rsp), %rsp
263 swapgs 284 swapgs
264 sysretl 285 sysretl
265END(entry_SYSCALL_compat) 286END(entry_SYSCALL_compat)
@@ -306,8 +327,11 @@ ENTRY(entry_INT80_compat)
306 */ 327 */
307 movl %eax, %eax 328 movl %eax, %eax
308 329
309 /* Construct struct pt_regs on stack (iret frame is already on stack) */
310 pushq %rax /* pt_regs->orig_ax */ 330 pushq %rax /* pt_regs->orig_ax */
331
332 /* switch to thread stack expects orig_ax to be pushed */
333 call switch_to_thread_stack
334
311 pushq %rdi /* pt_regs->di */ 335 pushq %rdi /* pt_regs->di */
312 pushq %rsi /* pt_regs->si */ 336 pushq %rsi /* pt_regs->si */
313 pushq %rdx /* pt_regs->dx */ 337 pushq %rdx /* pt_regs->dx */
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 11b13c4b43d5..f19856d95c60 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
324 *t = result; 324 *t = result;
325 return result; 325 return result;
326} 326}
327int time(time_t *t) 327time_t time(time_t *t)
328 __attribute__((weak, alias("__vdso_time"))); 328 __attribute__((weak, alias("__vdso_time")));
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index f279ba2643dc..577fa8adb785 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -37,6 +37,7 @@
37#include <asm/unistd.h> 37#include <asm/unistd.h>
38#include <asm/fixmap.h> 38#include <asm/fixmap.h>
39#include <asm/traps.h> 39#include <asm/traps.h>
40#include <asm/paravirt.h>
40 41
41#define CREATE_TRACE_POINTS 42#define CREATE_TRACE_POINTS
42#include "vsyscall_trace.h" 43#include "vsyscall_trace.h"
@@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
138 139
139 WARN_ON_ONCE(address != regs->ip); 140 WARN_ON_ONCE(address != regs->ip);
140 141
142 /* This should be unreachable in NATIVE mode. */
143 if (WARN_ON(vsyscall_mode == NATIVE))
144 return false;
145
141 if (vsyscall_mode == NONE) { 146 if (vsyscall_mode == NONE) {
142 warn_bad_vsyscall(KERN_INFO, regs, 147 warn_bad_vsyscall(KERN_INFO, regs,
143 "vsyscall attempted with vsyscall=none"); 148 "vsyscall attempted with vsyscall=none");
@@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr)
329 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; 334 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
330} 335}
331 336
337/*
338 * The VSYSCALL page is the only user-accessible page in the kernel address
339 * range. Normally, the kernel page tables can have _PAGE_USER clear, but
340 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
341 * are enabled.
342 *
343 * Some day we may create a "minimal" vsyscall mode in which we emulate
344 * vsyscalls but leave the page not present. If so, we skip calling
345 * this.
346 */
347void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
348{
349 pgd_t *pgd;
350 p4d_t *p4d;
351 pud_t *pud;
352 pmd_t *pmd;
353
354 pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
355 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
356 p4d = p4d_offset(pgd, VSYSCALL_ADDR);
357#if CONFIG_PGTABLE_LEVELS >= 5
358 p4d->p4d |= _PAGE_USER;
359#endif
360 pud = pud_offset(p4d, VSYSCALL_ADDR);
361 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
362 pmd = pmd_offset(pud, VSYSCALL_ADDR);
363 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
364}
365
332void __init map_vsyscall(void) 366void __init map_vsyscall(void)
333{ 367{
334 extern char __vsyscall_page; 368 extern char __vsyscall_page;
335 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); 369 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
336 370
337 if (vsyscall_mode != NONE) 371 if (vsyscall_mode != NONE) {
338 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 372 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
339 vsyscall_mode == NATIVE 373 vsyscall_mode == NATIVE
340 ? PAGE_KERNEL_VSYSCALL 374 ? PAGE_KERNEL_VSYSCALL
341 : PAGE_KERNEL_VVAR); 375 : PAGE_KERNEL_VVAR);
376 set_vsyscall_pgtable_user_bits(swapper_pg_dir);
377 }
342 378
343 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != 379 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
344 (unsigned long)VSYSCALL_ADDR); 380 (unsigned long)VSYSCALL_ADDR);
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index a6eee5ac4f58..2aefacf5c5b2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
277 int ret; 277 int ret;
278 278
279 if (!x86_match_cpu(cpu_match)) 279 if (!x86_match_cpu(cpu_match))
280 return 0; 280 return -ENODEV;
281 281
282 if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) 282 if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
283 return -ENODEV; 283 return -ENODEV;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 141e07b06216..24ffa1e88cf9 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -582,6 +582,24 @@ static __init int bts_init(void)
582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
583 return -ENODEV; 583 return -ENODEV;
584 584
585 if (boot_cpu_has(X86_FEATURE_PTI)) {
586 /*
587 * BTS hardware writes through a virtual memory map we must
588 * either use the kernel physical map, or the user mapping of
589 * the AUX buffer.
590 *
591 * However, since this driver supports per-CPU and per-task inherit
592 * we cannot use the user mapping since it will not be availble
593 * if we're not running the owning process.
594 *
595 * With PTI we can't use the kernal map either, because its not
596 * there when we run userspace.
597 *
598 * For now, disable this driver when using PTI.
599 */
600 return -ENODEV;
601 }
602
585 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | 603 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
586 PERF_PMU_CAP_EXCLUSIVE; 604 PERF_PMU_CAP_EXCLUSIVE;
587 bts_pmu.task_ctx_nr = perf_sw_context; 605 bts_pmu.task_ctx_nr = perf_sw_context;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09c26a4f139c..731153a4681e 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3847,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = {
3847 3847
3848__init int intel_pmu_init(void) 3848__init int intel_pmu_init(void)
3849{ 3849{
3850 struct attribute **extra_attr = NULL;
3851 struct attribute **to_free = NULL;
3850 union cpuid10_edx edx; 3852 union cpuid10_edx edx;
3851 union cpuid10_eax eax; 3853 union cpuid10_eax eax;
3852 union cpuid10_ebx ebx; 3854 union cpuid10_ebx ebx;
@@ -3854,7 +3856,6 @@ __init int intel_pmu_init(void)
3854 unsigned int unused; 3856 unsigned int unused;
3855 struct extra_reg *er; 3857 struct extra_reg *er;
3856 int version, i; 3858 int version, i;
3857 struct attribute **extra_attr = NULL;
3858 char *name; 3859 char *name;
3859 3860
3860 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 3861 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -4294,6 +4295,7 @@ __init int intel_pmu_init(void)
4294 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4295 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4295 hsw_format_attr : nhm_format_attr; 4296 hsw_format_attr : nhm_format_attr;
4296 extra_attr = merge_attr(extra_attr, skl_format_attr); 4297 extra_attr = merge_attr(extra_attr, skl_format_attr);
4298 to_free = extra_attr;
4297 x86_pmu.cpu_events = get_hsw_events_attrs(); 4299 x86_pmu.cpu_events = get_hsw_events_attrs();
4298 intel_pmu_pebs_data_source_skl( 4300 intel_pmu_pebs_data_source_skl(
4299 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); 4301 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
@@ -4401,6 +4403,7 @@ __init int intel_pmu_init(void)
4401 pr_cont("full-width counters, "); 4403 pr_cont("full-width counters, ");
4402 } 4404 }
4403 4405
4406 kfree(to_free);
4404 return 0; 4407 return 0;
4405} 4408}
4406 4409
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 3674a4b6f8bd..18c25ab28557 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -3,16 +3,19 @@
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5 5
6#include <asm/cpu_entry_area.h>
6#include <asm/perf_event.h> 7#include <asm/perf_event.h>
8#include <asm/tlbflush.h>
7#include <asm/insn.h> 9#include <asm/insn.h>
8 10
9#include "../perf_event.h" 11#include "../perf_event.h"
10 12
13/* Waste a full page so it can be mapped into the cpu_entry_area */
14DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
15
11/* The size of a BTS record in bytes: */ 16/* The size of a BTS record in bytes: */
12#define BTS_RECORD_SIZE 24 17#define BTS_RECORD_SIZE 24
13 18
14#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
15#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
16#define PEBS_FIXUP_SIZE PAGE_SIZE 19#define PEBS_FIXUP_SIZE PAGE_SIZE
17 20
18/* 21/*
@@ -279,17 +282,67 @@ void fini_debug_store_on_cpu(int cpu)
279 282
280static DEFINE_PER_CPU(void *, insn_buffer); 283static DEFINE_PER_CPU(void *, insn_buffer);
281 284
282static int alloc_pebs_buffer(int cpu) 285static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
283{ 286{
284 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 287 unsigned long start = (unsigned long)cea;
288 phys_addr_t pa;
289 size_t msz = 0;
290
291 pa = virt_to_phys(addr);
292
293 preempt_disable();
294 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
295 cea_set_pte(cea, pa, prot);
296
297 /*
298 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
299 * all TLB entries for it.
300 */
301 flush_tlb_kernel_range(start, start + size);
302 preempt_enable();
303}
304
305static void ds_clear_cea(void *cea, size_t size)
306{
307 unsigned long start = (unsigned long)cea;
308 size_t msz = 0;
309
310 preempt_disable();
311 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
312 cea_set_pte(cea, 0, PAGE_NONE);
313
314 flush_tlb_kernel_range(start, start + size);
315 preempt_enable();
316}
317
318static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
319{
320 unsigned int order = get_order(size);
285 int node = cpu_to_node(cpu); 321 int node = cpu_to_node(cpu);
286 int max; 322 struct page *page;
287 void *buffer, *ibuffer; 323
324 page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
325 return page ? page_address(page) : NULL;
326}
327
328static void dsfree_pages(const void *buffer, size_t size)
329{
330 if (buffer)
331 free_pages((unsigned long)buffer, get_order(size));
332}
333
334static int alloc_pebs_buffer(int cpu)
335{
336 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
337 struct debug_store *ds = hwev->ds;
338 size_t bsiz = x86_pmu.pebs_buffer_size;
339 int max, node = cpu_to_node(cpu);
340 void *buffer, *ibuffer, *cea;
288 341
289 if (!x86_pmu.pebs) 342 if (!x86_pmu.pebs)
290 return 0; 343 return 0;
291 344
292 buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); 345 buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
293 if (unlikely(!buffer)) 346 if (unlikely(!buffer))
294 return -ENOMEM; 347 return -ENOMEM;
295 348
@@ -300,99 +353,94 @@ static int alloc_pebs_buffer(int cpu)
300 if (x86_pmu.intel_cap.pebs_format < 2) { 353 if (x86_pmu.intel_cap.pebs_format < 2) {
301 ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); 354 ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
302 if (!ibuffer) { 355 if (!ibuffer) {
303 kfree(buffer); 356 dsfree_pages(buffer, bsiz);
304 return -ENOMEM; 357 return -ENOMEM;
305 } 358 }
306 per_cpu(insn_buffer, cpu) = ibuffer; 359 per_cpu(insn_buffer, cpu) = ibuffer;
307 } 360 }
308 361 hwev->ds_pebs_vaddr = buffer;
309 max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; 362 /* Update the cpu entry area mapping */
310 363 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
311 ds->pebs_buffer_base = (u64)(unsigned long)buffer; 364 ds->pebs_buffer_base = (unsigned long) cea;
365 ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
312 ds->pebs_index = ds->pebs_buffer_base; 366 ds->pebs_index = ds->pebs_buffer_base;
313 ds->pebs_absolute_maximum = ds->pebs_buffer_base + 367 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
314 max * x86_pmu.pebs_record_size; 368 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
315
316 return 0; 369 return 0;
317} 370}
318 371
319static void release_pebs_buffer(int cpu) 372static void release_pebs_buffer(int cpu)
320{ 373{
321 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 374 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
375 void *cea;
322 376
323 if (!ds || !x86_pmu.pebs) 377 if (!x86_pmu.pebs)
324 return; 378 return;
325 379
326 kfree(per_cpu(insn_buffer, cpu)); 380 kfree(per_cpu(insn_buffer, cpu));
327 per_cpu(insn_buffer, cpu) = NULL; 381 per_cpu(insn_buffer, cpu) = NULL;
328 382
329 kfree((void *)(unsigned long)ds->pebs_buffer_base); 383 /* Clear the fixmap */
330 ds->pebs_buffer_base = 0; 384 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
385 ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
386 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
387 hwev->ds_pebs_vaddr = NULL;
331} 388}
332 389
333static int alloc_bts_buffer(int cpu) 390static int alloc_bts_buffer(int cpu)
334{ 391{
335 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 392 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
336 int node = cpu_to_node(cpu); 393 struct debug_store *ds = hwev->ds;
337 int max, thresh; 394 void *buffer, *cea;
338 void *buffer; 395 int max;
339 396
340 if (!x86_pmu.bts) 397 if (!x86_pmu.bts)
341 return 0; 398 return 0;
342 399
343 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); 400 buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
344 if (unlikely(!buffer)) { 401 if (unlikely(!buffer)) {
345 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); 402 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
346 return -ENOMEM; 403 return -ENOMEM;
347 } 404 }
348 405 hwev->ds_bts_vaddr = buffer;
349 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 406 /* Update the fixmap */
350 thresh = max / 16; 407 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
351 408 ds->bts_buffer_base = (unsigned long) cea;
352 ds->bts_buffer_base = (u64)(unsigned long)buffer; 409 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
353 ds->bts_index = ds->bts_buffer_base; 410 ds->bts_index = ds->bts_buffer_base;
354 ds->bts_absolute_maximum = ds->bts_buffer_base + 411 max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
355 max * BTS_RECORD_SIZE; 412 ds->bts_absolute_maximum = ds->bts_buffer_base + max;
356 ds->bts_interrupt_threshold = ds->bts_absolute_maximum - 413 ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
357 thresh * BTS_RECORD_SIZE;
358
359 return 0; 414 return 0;
360} 415}
361 416
362static void release_bts_buffer(int cpu) 417static void release_bts_buffer(int cpu)
363{ 418{
364 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 419 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
420 void *cea;
365 421
366 if (!ds || !x86_pmu.bts) 422 if (!x86_pmu.bts)
367 return; 423 return;
368 424
369 kfree((void *)(unsigned long)ds->bts_buffer_base); 425 /* Clear the fixmap */
370 ds->bts_buffer_base = 0; 426 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
427 ds_clear_cea(cea, BTS_BUFFER_SIZE);
428 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
429 hwev->ds_bts_vaddr = NULL;
371} 430}
372 431
373static int alloc_ds_buffer(int cpu) 432static int alloc_ds_buffer(int cpu)
374{ 433{
375 int node = cpu_to_node(cpu); 434 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
376 struct debug_store *ds;
377
378 ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
379 if (unlikely(!ds))
380 return -ENOMEM;
381 435
436 memset(ds, 0, sizeof(*ds));
382 per_cpu(cpu_hw_events, cpu).ds = ds; 437 per_cpu(cpu_hw_events, cpu).ds = ds;
383
384 return 0; 438 return 0;
385} 439}
386 440
387static void release_ds_buffer(int cpu) 441static void release_ds_buffer(int cpu)
388{ 442{
389 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
390
391 if (!ds)
392 return;
393
394 per_cpu(cpu_hw_events, cpu).ds = NULL; 443 per_cpu(cpu_hw_events, cpu).ds = NULL;
395 kfree(ds);
396} 444}
397 445
398void release_ds_buffers(void) 446void release_ds_buffers(void)
@@ -402,16 +450,22 @@ void release_ds_buffers(void)
402 if (!x86_pmu.bts && !x86_pmu.pebs) 450 if (!x86_pmu.bts && !x86_pmu.pebs)
403 return; 451 return;
404 452
405 get_online_cpus(); 453 for_each_possible_cpu(cpu)
406 for_each_online_cpu(cpu) 454 release_ds_buffer(cpu);
455
456 for_each_possible_cpu(cpu) {
457 /*
458 * Again, ignore errors from offline CPUs, they will no longer
459 * observe cpu_hw_events.ds and not program the DS_AREA when
460 * they come up.
461 */
407 fini_debug_store_on_cpu(cpu); 462 fini_debug_store_on_cpu(cpu);
463 }
408 464
409 for_each_possible_cpu(cpu) { 465 for_each_possible_cpu(cpu) {
410 release_pebs_buffer(cpu); 466 release_pebs_buffer(cpu);
411 release_bts_buffer(cpu); 467 release_bts_buffer(cpu);
412 release_ds_buffer(cpu);
413 } 468 }
414 put_online_cpus();
415} 469}
416 470
417void reserve_ds_buffers(void) 471void reserve_ds_buffers(void)
@@ -431,8 +485,6 @@ void reserve_ds_buffers(void)
431 if (!x86_pmu.pebs) 485 if (!x86_pmu.pebs)
432 pebs_err = 1; 486 pebs_err = 1;
433 487
434 get_online_cpus();
435
436 for_each_possible_cpu(cpu) { 488 for_each_possible_cpu(cpu) {
437 if (alloc_ds_buffer(cpu)) { 489 if (alloc_ds_buffer(cpu)) {
438 bts_err = 1; 490 bts_err = 1;
@@ -469,11 +521,14 @@ void reserve_ds_buffers(void)
469 if (x86_pmu.pebs && !pebs_err) 521 if (x86_pmu.pebs && !pebs_err)
470 x86_pmu.pebs_active = 1; 522 x86_pmu.pebs_active = 1;
471 523
472 for_each_online_cpu(cpu) 524 for_each_possible_cpu(cpu) {
525 /*
526 * Ignores wrmsr_on_cpu() errors for offline CPUs they
527 * will get this call through intel_pmu_cpu_starting().
528 */
473 init_debug_store_on_cpu(cpu); 529 init_debug_store_on_cpu(cpu);
530 }
474 } 531 }
475
476 put_online_cpus();
477} 532}
478 533
479/* 534/*
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 005908ee9333..a2efb490f743 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -755,14 +755,14 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init), 755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
756 756
757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init), 757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init), 758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init), 759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init), 760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
761 761
762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init), 762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init), 763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init), 764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init), 765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
766 766
767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), 767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init), 768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f7aaadf9331f..8e4ea143ed96 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -14,6 +14,8 @@
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16 16
17#include <asm/intel_ds.h>
18
17/* To enable MSR tracing please use the generic trace points. */ 19/* To enable MSR tracing please use the generic trace points. */
18 20
19/* 21/*
@@ -77,8 +79,6 @@ struct amd_nb {
77 struct event_constraint event_constraints[X86_PMC_IDX_MAX]; 79 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
78}; 80};
79 81
80/* The maximal number of PEBS events: */
81#define MAX_PEBS_EVENTS 8
82#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) 82#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
83 83
84/* 84/*
@@ -95,23 +95,6 @@ struct amd_nb {
95 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ 95 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) 96 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
97 97
98/*
99 * A debug store configuration.
100 *
101 * We only support architectures that use 64bit fields.
102 */
103struct debug_store {
104 u64 bts_buffer_base;
105 u64 bts_index;
106 u64 bts_absolute_maximum;
107 u64 bts_interrupt_threshold;
108 u64 pebs_buffer_base;
109 u64 pebs_index;
110 u64 pebs_absolute_maximum;
111 u64 pebs_interrupt_threshold;
112 u64 pebs_event_reset[MAX_PEBS_EVENTS];
113};
114
115#define PEBS_REGS \ 98#define PEBS_REGS \
116 (PERF_REG_X86_AX | \ 99 (PERF_REG_X86_AX | \
117 PERF_REG_X86_BX | \ 100 PERF_REG_X86_BX | \
@@ -216,6 +199,8 @@ struct cpu_hw_events {
216 * Intel DebugStore bits 199 * Intel DebugStore bits
217 */ 200 */
218 struct debug_store *ds; 201 struct debug_store *ds;
202 void *ds_pebs_vaddr;
203 void *ds_bts_vaddr;
219 u64 pebs_enabled; 204 u64 pebs_enabled;
220 int n_pebs; 205 int n_pebs;
221 int n_large_pebs; 206 int n_large_pebs;
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index dbfd0854651f..cf5961ca8677 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
140 ".popsection\n" \ 140 ".popsection\n" \
141 ".pushsection .altinstr_replacement, \"ax\"\n" \ 141 ".pushsection .altinstr_replacement, \"ax\"\n" \
142 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ 142 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
143 ".popsection" 143 ".popsection\n"
144 144
145#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ 145#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
146 OLDINSTR_2(oldinstr, 1, 2) \ 146 OLDINSTR_2(oldinstr, 1, 2) \
@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
151 ".pushsection .altinstr_replacement, \"ax\"\n" \ 151 ".pushsection .altinstr_replacement, \"ax\"\n" \
152 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ 152 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
153 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ 153 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
154 ".popsection" 154 ".popsection\n"
155 155
156/* 156/*
157 * Alternative instructions for different CPU types or capabilities. 157 * Alternative instructions for different CPU types or capabilities.
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a9e57f08bfa6..98722773391d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -136,6 +136,7 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
136extern void disable_local_APIC(void); 136extern void disable_local_APIC(void);
137extern void lapic_shutdown(void); 137extern void lapic_shutdown(void);
138extern void sync_Arb_IDs(void); 138extern void sync_Arb_IDs(void);
139extern void init_bsp_APIC(void);
139extern void apic_intr_mode_init(void); 140extern void apic_intr_mode_init(void);
140extern void setup_local_APIC(void); 141extern void setup_local_APIC(void);
141extern void init_apic_mappings(void); 142extern void init_apic_mappings(void);
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index ff700d81e91e..1908214b9125 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,31 @@
11#include <asm/pgtable.h> 11#include <asm/pgtable.h>
12#include <asm/special_insns.h> 12#include <asm/special_insns.h>
13#include <asm/preempt.h> 13#include <asm/preempt.h>
14#include <asm/asm.h>
14 15
15#ifndef CONFIG_X86_CMPXCHG64 16#ifndef CONFIG_X86_CMPXCHG64
16extern void cmpxchg8b_emu(void); 17extern void cmpxchg8b_emu(void);
17#endif 18#endif
19
20#ifdef CONFIG_RETPOLINE
21#ifdef CONFIG_X86_32
22#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
23#else
24#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
25INDIRECT_THUNK(8)
26INDIRECT_THUNK(9)
27INDIRECT_THUNK(10)
28INDIRECT_THUNK(11)
29INDIRECT_THUNK(12)
30INDIRECT_THUNK(13)
31INDIRECT_THUNK(14)
32INDIRECT_THUNK(15)
33#endif
34INDIRECT_THUNK(ax)
35INDIRECT_THUNK(bx)
36INDIRECT_THUNK(cx)
37INDIRECT_THUNK(dx)
38INDIRECT_THUNK(si)
39INDIRECT_THUNK(di)
40INDIRECT_THUNK(bp)
41#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 219faaec51df..386a6900e206 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -136,6 +136,7 @@
136#endif 136#endif
137 137
138#ifndef __ASSEMBLY__ 138#ifndef __ASSEMBLY__
139#ifndef __BPF__
139/* 140/*
140 * This output constraint should be used for any inline asm which has a "call" 141 * This output constraint should be used for any inline asm which has a "call"
141 * instruction. Otherwise the asm may be inserted before the frame pointer 142 * instruction. Otherwise the asm may be inserted before the frame pointer
@@ -145,5 +146,6 @@
145register unsigned long current_stack_pointer asm(_ASM_SP); 146register unsigned long current_stack_pointer asm(_ASM_SP);
146#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) 147#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
147#endif 148#endif
149#endif
148 150
149#endif /* _ASM_X86_ASM_H */ 151#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
new file mode 100644
index 000000000000..4a7884b8dca5
--- /dev/null
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -0,0 +1,81 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#ifndef _ASM_X86_CPU_ENTRY_AREA_H
4#define _ASM_X86_CPU_ENTRY_AREA_H
5
6#include <linux/percpu-defs.h>
7#include <asm/processor.h>
8#include <asm/intel_ds.h>
9
10/*
11 * cpu_entry_area is a percpu region that contains things needed by the CPU
12 * and early entry/exit code. Real types aren't used for all fields here
13 * to avoid circular header dependencies.
14 *
15 * Every field is a virtual alias of some other allocated backing store.
16 * There is no direct allocation of a struct cpu_entry_area.
17 */
18struct cpu_entry_area {
19 char gdt[PAGE_SIZE];
20
21 /*
22 * The GDT is just below entry_stack and thus serves (on x86_64) as
23 * a a read-only guard page.
24 */
25 struct entry_stack_page entry_stack_page;
26
27 /*
28 * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
29 * we need task switches to work, and task switches write to the TSS.
30 */
31 struct tss_struct tss;
32
33 char entry_trampoline[PAGE_SIZE];
34
35#ifdef CONFIG_X86_64
36 /*
37 * Exception stacks used for IST entries.
38 *
39 * In the future, this should have a separate slot for each stack
40 * with guard pages between them.
41 */
42 char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
43#endif
44#ifdef CONFIG_CPU_SUP_INTEL
45 /*
46 * Per CPU debug store for Intel performance monitoring. Wastes a
47 * full page at the moment.
48 */
49 struct debug_store cpu_debug_store;
50 /*
51 * The actual PEBS/BTS buffers must be mapped to user space
52 * Reserve enough fixmap PTEs.
53 */
54 struct debug_store_buffers cpu_debug_buffers;
55#endif
56};
57
58#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
59#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
60
61DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
62
63extern void setup_cpu_entry_areas(void);
64extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
65
66#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
67#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
68
69#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
70
71#define CPU_ENTRY_AREA_MAP_SIZE \
72 (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
73
74extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
75
76static inline struct entry_stack *cpu_entry_stack(int cpu)
77{
78 return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
79}
80
81#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index bf6a76202a77..ea9a7dde62e5 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
135 set_bit(bit, (unsigned long *)cpu_caps_set); \ 135 set_bit(bit, (unsigned long *)cpu_caps_set); \
136} while (0) 136} while (0)
137 137
138#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
139
138#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) 140#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
139/* 141/*
140 * Static testing of CPU features. Used the same as boot_cpu_has(). 142 * Static testing of CPU features. Used the same as boot_cpu_has().
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index c0b0e9e8aa66..25b9375c1484 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -197,17 +197,20 @@
197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
200#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
200 201
201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 202#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
204 205#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
206#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
207#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
205#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 208#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
206#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
207#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ 209#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
208#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 210#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
209 211
210#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 212#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
213#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
211 214
212/* Virtualization flags: Linux defined, word 8 */ 215/* Virtualization flags: Linux defined, word 8 */
213#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 216#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -242,6 +245,7 @@
242#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 245#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
243#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 246#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
244#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 247#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
248#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
245#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 249#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
246#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 250#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
247#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 251#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
@@ -266,6 +270,7 @@
266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ 270/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 271#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 272#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
273#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
269 274
270/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 275/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
271#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 276#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -339,5 +344,8 @@
339#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 344#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
340#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 345#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
341#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 346#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
347#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
348#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
349#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
342 350
343#endif /* _ASM_X86_CPUFEATURES_H */ 351#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 4011cb03ef08..13c5ee878a47 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -7,6 +7,7 @@
7#include <asm/mmu.h> 7#include <asm/mmu.h>
8#include <asm/fixmap.h> 8#include <asm/fixmap.h>
9#include <asm/irq_vectors.h> 9#include <asm/irq_vectors.h>
10#include <asm/cpu_entry_area.h>
10 11
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/percpu.h>
@@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
20 21
21 desc->type = (info->read_exec_only ^ 1) << 1; 22 desc->type = (info->read_exec_only ^ 1) << 1;
22 desc->type |= info->contents << 2; 23 desc->type |= info->contents << 2;
24 /* Set the ACCESS bit so it can be mapped RO */
25 desc->type |= 1;
23 26
24 desc->s = 1; 27 desc->s = 1;
25 desc->dpl = 0x3; 28 desc->dpl = 0x3;
@@ -60,17 +63,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
60 return this_cpu_ptr(&gdt_page)->gdt; 63 return this_cpu_ptr(&gdt_page)->gdt;
61} 64}
62 65
63/* Get the fixmap index for a specific processor */
64static inline unsigned int get_cpu_gdt_ro_index(int cpu)
65{
66 return FIX_GDT_REMAP_BEGIN + cpu;
67}
68
69/* Provide the fixmap address of the remapped GDT */ 66/* Provide the fixmap address of the remapped GDT */
70static inline struct desc_struct *get_cpu_gdt_ro(int cpu) 67static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
71{ 68{
72 unsigned int idx = get_cpu_gdt_ro_index(cpu); 69 return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
73 return (struct desc_struct *)__fix_to_virt(idx);
74} 70}
75 71
76/* Provide the current read-only GDT */ 72/* Provide the current read-only GDT */
@@ -185,7 +181,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
185#endif 181#endif
186} 182}
187 183
188static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) 184static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
189{ 185{
190 struct desc_struct *d = get_cpu_gdt_rw(cpu); 186 struct desc_struct *d = get_cpu_gdt_rw(cpu);
191 tss_desc tss; 187 tss_desc tss;
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 14d6d5007314..b027633e7300 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -50,6 +50,12 @@
50# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) 50# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
51#endif 51#endif
52 52
53#ifdef CONFIG_PAGE_TABLE_ISOLATION
54# define DISABLE_PTI 0
55#else
56# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
57#endif
58
53/* 59/*
54 * Make sure to add features to the correct mask 60 * Make sure to add features to the correct mask
55 */ 61 */
@@ -60,7 +66,7 @@
60#define DISABLED_MASK4 (DISABLE_PCID) 66#define DISABLED_MASK4 (DISABLE_PCID)
61#define DISABLED_MASK5 0 67#define DISABLED_MASK5 0
62#define DISABLED_MASK6 0 68#define DISABLED_MASK6 0
63#define DISABLED_MASK7 0 69#define DISABLED_MASK7 (DISABLE_PTI)
64#define DISABLED_MASK8 0 70#define DISABLED_MASK8 0
65#define DISABLED_MASK9 (DISABLE_MPX) 71#define DISABLED_MASK9 (DISABLE_MPX)
66#define DISABLED_MASK10 0 72#define DISABLED_MASK10 0
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 0211029076ea..6777480d8a42 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_ESPFIX_H 2#ifndef _ASM_X86_ESPFIX_H
3#define _ASM_X86_ESPFIX_H 3#define _ASM_X86_ESPFIX_H
4 4
5#ifdef CONFIG_X86_64 5#ifdef CONFIG_X86_ESPFIX64
6 6
7#include <asm/percpu.h> 7#include <asm/percpu.h>
8 8
@@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
11 11
12extern void init_espfix_bsp(void); 12extern void init_espfix_bsp(void);
13extern void init_espfix_ap(int cpu); 13extern void init_espfix_ap(int cpu);
14 14#else
15#endif /* CONFIG_X86_64 */ 15static inline void init_espfix_ap(int cpu) { }
16#endif
16 17
17#endif /* _ASM_X86_ESPFIX_H */ 18#endif /* _ASM_X86_ESPFIX_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b0c505fe9a95..64c4a30e0d39 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP;
44 PAGE_SIZE) 44 PAGE_SIZE)
45#endif 45#endif
46 46
47
48/* 47/*
49 * Here we define all the compile-time 'special' virtual 48 * Here we define all the compile-time 'special' virtual
50 * addresses. The point is to have a constant address at 49 * addresses. The point is to have a constant address at
@@ -84,7 +83,6 @@ enum fixed_addresses {
84 FIX_IO_APIC_BASE_0, 83 FIX_IO_APIC_BASE_0,
85 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 84 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
86#endif 85#endif
87 FIX_RO_IDT, /* Virtual mapping for read-only IDT */
88#ifdef CONFIG_X86_32 86#ifdef CONFIG_X86_32
89 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 87 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
90 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 88 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -100,9 +98,6 @@ enum fixed_addresses {
100#ifdef CONFIG_X86_INTEL_MID 98#ifdef CONFIG_X86_INTEL_MID
101 FIX_LNW_VRTC, 99 FIX_LNW_VRTC,
102#endif 100#endif
103 /* Fixmap entries to remap the GDTs, one per processor. */
104 FIX_GDT_REMAP_BEGIN,
105 FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
106 101
107#ifdef CONFIG_ACPI_APEI_GHES 102#ifdef CONFIG_ACPI_APEI_GHES
108 /* Used for GHES mapping from assorted contexts */ 103 /* Used for GHES mapping from assorted contexts */
@@ -143,7 +138,7 @@ enum fixed_addresses {
143extern void reserve_top_address(unsigned long reserve); 138extern void reserve_top_address(unsigned long reserve);
144 139
145#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 140#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
146#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 141#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
147 142
148extern int fixmaps_set; 143extern int fixmaps_set;
149 144
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 1b0a5abcd8ae..96aa6b9884dc 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,16 +20,7 @@
20#ifndef _ASM_X86_HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define _ASM_X86_HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23#ifdef CONFIG_HYPERVISOR_GUEST 23/* x86 hypervisor types */
24
25#include <asm/kvm_para.h>
26#include <asm/x86_init.h>
27#include <asm/xen/hypervisor.h>
28
29/*
30 * x86 hypervisor information
31 */
32
33enum x86_hypervisor_type { 24enum x86_hypervisor_type {
34 X86_HYPER_NATIVE = 0, 25 X86_HYPER_NATIVE = 0,
35 X86_HYPER_VMWARE, 26 X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
39 X86_HYPER_KVM, 30 X86_HYPER_KVM,
40}; 31};
41 32
33#ifdef CONFIG_HYPERVISOR_GUEST
34
35#include <asm/kvm_para.h>
36#include <asm/x86_init.h>
37#include <asm/xen/hypervisor.h>
38
42struct hypervisor_x86 { 39struct hypervisor_x86 {
43 /* Hypervisor name */ 40 /* Hypervisor name */
44 const char *name; 41 const char *name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
58 55
59extern enum x86_hypervisor_type x86_hyper_type; 56extern enum x86_hypervisor_type x86_hyper_type;
60extern void init_hypervisor_platform(void); 57extern void init_hypervisor_platform(void);
58static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
59{
60 return x86_hyper_type == type;
61}
61#else 62#else
62static inline void init_hypervisor_platform(void) { } 63static inline void init_hypervisor_platform(void) { }
64static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
65{
66 return type == X86_HYPER_NATIVE;
67}
63#endif /* CONFIG_HYPERVISOR_GUEST */ 68#endif /* CONFIG_HYPERVISOR_GUEST */
64#endif /* _ASM_X86_HYPERVISOR_H */ 69#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
new file mode 100644
index 000000000000..62a9f4966b42
--- /dev/null
+++ b/arch/x86/include/asm/intel_ds.h
@@ -0,0 +1,36 @@
1#ifndef _ASM_INTEL_DS_H
2#define _ASM_INTEL_DS_H
3
4#include <linux/percpu-defs.h>
5
6#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
7#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
8
9/* The maximal number of PEBS events: */
10#define MAX_PEBS_EVENTS 8
11
12/*
13 * A debug store configuration.
14 *
15 * We only support architectures that use 64bit fields.
16 */
17struct debug_store {
18 u64 bts_buffer_base;
19 u64 bts_index;
20 u64 bts_absolute_maximum;
21 u64 bts_interrupt_threshold;
22 u64 pebs_buffer_base;
23 u64 pebs_index;
24 u64 pebs_absolute_maximum;
25 u64 pebs_interrupt_threshold;
26 u64 pebs_event_reset[MAX_PEBS_EVENTS];
27} __aligned(PAGE_SIZE);
28
29DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
30
31struct debug_store_buffers {
32 char bts_buffer[BTS_BUFFER_SIZE];
33 char pebs_buffer[PEBS_BUFFER_SIZE];
34};
35
36#endif
diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
new file mode 100644
index 000000000000..989cfa86de85
--- /dev/null
+++ b/arch/x86/include/asm/invpcid.h
@@ -0,0 +1,53 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_INVPCID
3#define _ASM_X86_INVPCID
4
5static inline void __invpcid(unsigned long pcid, unsigned long addr,
6 unsigned long type)
7{
8 struct { u64 d[2]; } desc = { { pcid, addr } };
9
10 /*
11 * The memory clobber is because the whole point is to invalidate
12 * stale TLB entries and, especially if we're flushing global
13 * mappings, we don't want the compiler to reorder any subsequent
14 * memory accesses before the TLB flush.
15 *
16 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
17 * invpcid (%rcx), %rax in long mode.
18 */
19 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
20 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
21}
22
23#define INVPCID_TYPE_INDIV_ADDR 0
24#define INVPCID_TYPE_SINGLE_CTXT 1
25#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
26#define INVPCID_TYPE_ALL_NON_GLOBAL 3
27
28/* Flush all mappings for a given pcid and addr, not including globals. */
29static inline void invpcid_flush_one(unsigned long pcid,
30 unsigned long addr)
31{
32 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
33}
34
35/* Flush all mappings for a given PCID, not including globals. */
36static inline void invpcid_flush_single_context(unsigned long pcid)
37{
38 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
39}
40
41/* Flush all mappings, including globals, for all PCIDs. */
42static inline void invpcid_flush_all(void)
43{
44 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
45}
46
47/* Flush all mappings for all PCIDs except globals. */
48static inline void invpcid_flush_all_nonglobals(void)
49{
50 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
51}
52
53#endif /* _ASM_X86_INVPCID */
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index 139feef467f7..c066ffae222b 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -44,7 +44,7 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
44extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 44extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
45 unsigned int nr_irqs); 45 unsigned int nr_irqs);
46extern int mp_irqdomain_activate(struct irq_domain *domain, 46extern int mp_irqdomain_activate(struct irq_domain *domain,
47 struct irq_data *irq_data, bool early); 47 struct irq_data *irq_data, bool reserve);
48extern void mp_irqdomain_deactivate(struct irq_domain *domain, 48extern void mp_irqdomain_deactivate(struct irq_domain *domain,
49 struct irq_data *irq_data); 49 struct irq_data *irq_data);
50extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); 50extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c8ef23f2c28f..89f08955fff7 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
142 swapgs; \ 142 swapgs; \
143 sysretl 143 sysretl
144 144
145#ifdef CONFIG_DEBUG_ENTRY
146#define SAVE_FLAGS(x) pushfq; popq %rax
147#endif
145#else 148#else
146#define INTERRUPT_RETURN iret 149#define INTERRUPT_RETURN iret
147#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit 150#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f86a8caa561e..395c9631e000 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
26extern int __must_check __die(const char *, struct pt_regs *, long); 26extern int __must_check __die(const char *, struct pt_regs *, long);
27extern void show_stack_regs(struct pt_regs *regs); 27extern void show_stack_regs(struct pt_regs *regs);
28extern void __show_regs(struct pt_regs *regs, int all); 28extern void __show_regs(struct pt_regs *regs, int all);
29extern void show_iret_regs(struct pt_regs *regs);
29extern unsigned long oops_begin(void); 30extern unsigned long oops_begin(void);
30extern void oops_end(unsigned long, struct pt_regs *, int signr); 31extern void oops_end(unsigned long, struct pt_regs *, int signr);
31 32
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/include/asm/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 034caa1a084e..b24b1c8b3979 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -214,8 +214,6 @@ struct x86_emulate_ops {
214 void (*halt)(struct x86_emulate_ctxt *ctxt); 214 void (*halt)(struct x86_emulate_ctxt *ctxt);
215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
217 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
218 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
219 int (*intercept)(struct x86_emulate_ctxt *ctxt, 217 int (*intercept)(struct x86_emulate_ctxt *ctxt,
220 struct x86_instruction_info *info, 218 struct x86_instruction_info *info,
221 enum x86_intercept_stage stage); 219 enum x86_intercept_stage stage);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5fb968b..516798431328 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -536,7 +536,20 @@ struct kvm_vcpu_arch {
536 struct kvm_mmu_memory_cache mmu_page_cache; 536 struct kvm_mmu_memory_cache mmu_page_cache;
537 struct kvm_mmu_memory_cache mmu_page_header_cache; 537 struct kvm_mmu_memory_cache mmu_page_header_cache;
538 538
539 /*
540 * QEMU userspace and the guest each have their own FPU state.
541 * In vcpu_run, we switch between the user and guest FPU contexts.
542 * While running a VCPU, the VCPU thread will have the guest FPU
543 * context.
544 *
545 * Note that while the PKRU state lives inside the fpu registers,
546 * it is switched out separately at VMENTER and VMEXIT time. The
547 * "guest_fpu" state here contains the guest FPU context, with the
548 * host PRKU bits.
549 */
550 struct fpu user_fpu;
539 struct fpu guest_fpu; 551 struct fpu guest_fpu;
552
540 u64 xcr0; 553 u64 xcr0;
541 u64 guest_supported_xcr0; 554 u64 guest_supported_xcr0;
542 u32 guest_xstate_size; 555 u32 guest_xstate_size;
@@ -1435,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
1435#define put_smstate(type, buf, offset, val) \ 1448#define put_smstate(type, buf, offset, val) \
1436 *(type *)((buf) + (offset) - 0x7e00) = val 1449 *(type *)((buf) + (offset) - 0x7e00) = val
1437 1450
1451void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1452 unsigned long start, unsigned long end);
1453
1438#endif /* _ASM_X86_KVM_HOST_H */ 1454#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c9459a4c3c68..22c5f3e6f820 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
39 39
40void __init sme_early_init(void); 40void __init sme_early_init(void);
41 41
42void __init sme_encrypt_kernel(void); 42void __init sme_encrypt_kernel(struct boot_params *bp);
43void __init sme_enable(struct boot_params *bp); 43void __init sme_enable(struct boot_params *bp);
44 44
45int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); 45int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
67 67
68static inline void __init sme_early_init(void) { } 68static inline void __init sme_early_init(void) { }
69 69
70static inline void __init sme_encrypt_kernel(void) { } 70static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
71static inline void __init sme_enable(struct boot_params *bp) { } 71static inline void __init sme_enable(struct boot_params *bp) { }
72 72
73static inline bool sme_active(void) { return false; } 73static inline bool sme_active(void) { return false; }
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 9ea26f167497..5ff3e8af2c20 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,6 +3,7 @@
3#define _ASM_X86_MMU_H 3#define _ASM_X86_MMU_H
4 4
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/rwsem.h>
6#include <linux/mutex.h> 7#include <linux/mutex.h>
7#include <linux/atomic.h> 8#include <linux/atomic.h>
8 9
@@ -27,7 +28,8 @@ typedef struct {
27 atomic64_t tlb_gen; 28 atomic64_t tlb_gen;
28 29
29#ifdef CONFIG_MODIFY_LDT_SYSCALL 30#ifdef CONFIG_MODIFY_LDT_SYSCALL
30 struct ldt_struct *ldt; 31 struct rw_semaphore ldt_usr_sem;
32 struct ldt_struct *ldt;
31#endif 33#endif
32 34
33#ifdef CONFIG_X86_64 35#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 6d16d15d09a0..c931b88982a0 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -50,22 +50,53 @@ struct ldt_struct {
50 * call gates. On native, we could merge the ldt_struct and LDT 50 * call gates. On native, we could merge the ldt_struct and LDT
51 * allocations, but it's not worth trying to optimize. 51 * allocations, but it's not worth trying to optimize.
52 */ 52 */
53 struct desc_struct *entries; 53 struct desc_struct *entries;
54 unsigned int nr_entries; 54 unsigned int nr_entries;
55
56 /*
57 * If PTI is in use, then the entries array is not mapped while we're
58 * in user mode. The whole array will be aliased at the addressed
59 * given by ldt_slot_va(slot). We use two slots so that we can allocate
60 * and map, and enable a new LDT without invalidating the mapping
61 * of an older, still-in-use LDT.
62 *
63 * slot will be -1 if this LDT doesn't have an alias mapping.
64 */
65 int slot;
55}; 66};
56 67
68/* This is a multiple of PAGE_SIZE. */
69#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
70
71static inline void *ldt_slot_va(int slot)
72{
73#ifdef CONFIG_X86_64
74 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
75#else
76 BUG();
77#endif
78}
79
57/* 80/*
58 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
59 */ 82 */
60int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); 83static inline void init_new_context_ldt(struct mm_struct *mm)
84{
85 mm->context.ldt = NULL;
86 init_rwsem(&mm->context.ldt_usr_sem);
87}
88int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
61void destroy_context_ldt(struct mm_struct *mm); 89void destroy_context_ldt(struct mm_struct *mm);
90void ldt_arch_exit_mmap(struct mm_struct *mm);
62#else /* CONFIG_MODIFY_LDT_SYSCALL */ 91#else /* CONFIG_MODIFY_LDT_SYSCALL */
63static inline int init_new_context_ldt(struct task_struct *tsk, 92static inline void init_new_context_ldt(struct mm_struct *mm) { }
64 struct mm_struct *mm) 93static inline int ldt_dup_context(struct mm_struct *oldmm,
94 struct mm_struct *mm)
65{ 95{
66 return 0; 96 return 0;
67} 97}
68static inline void destroy_context_ldt(struct mm_struct *mm) {} 98static inline void destroy_context_ldt(struct mm_struct *mm) { }
99static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
69#endif 100#endif
70 101
71static inline void load_mm_ldt(struct mm_struct *mm) 102static inline void load_mm_ldt(struct mm_struct *mm)
@@ -90,10 +121,31 @@ static inline void load_mm_ldt(struct mm_struct *mm)
90 * that we can see. 121 * that we can see.
91 */ 122 */
92 123
93 if (unlikely(ldt)) 124 if (unlikely(ldt)) {
94 set_ldt(ldt->entries, ldt->nr_entries); 125 if (static_cpu_has(X86_FEATURE_PTI)) {
95 else 126 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
127 /*
128 * Whoops -- either the new LDT isn't mapped
129 * (if slot == -1) or is mapped into a bogus
130 * slot (if slot > 1).
131 */
132 clear_LDT();
133 return;
134 }
135
136 /*
137 * If page table isolation is enabled, ldt->entries
138 * will not be mapped in the userspace pagetables.
139 * Tell the CPU to access the LDT through the alias
140 * at ldt_slot_va(ldt->slot).
141 */
142 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
143 } else {
144 set_ldt(ldt->entries, ldt->nr_entries);
145 }
146 } else {
96 clear_LDT(); 147 clear_LDT();
148 }
97#else 149#else
98 clear_LDT(); 150 clear_LDT();
99#endif 151#endif
@@ -132,18 +184,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
132static inline int init_new_context(struct task_struct *tsk, 184static inline int init_new_context(struct task_struct *tsk,
133 struct mm_struct *mm) 185 struct mm_struct *mm)
134{ 186{
187 mutex_init(&mm->context.lock);
188
135 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); 189 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
136 atomic64_set(&mm->context.tlb_gen, 0); 190 atomic64_set(&mm->context.tlb_gen, 0);
137 191
138 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 192#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
139 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 193 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
140 /* pkey 0 is the default and always allocated */ 194 /* pkey 0 is the default and always allocated */
141 mm->context.pkey_allocation_map = 0x1; 195 mm->context.pkey_allocation_map = 0x1;
142 /* -1 means unallocated or invalid */ 196 /* -1 means unallocated or invalid */
143 mm->context.execute_only_pkey = -1; 197 mm->context.execute_only_pkey = -1;
144 } 198 }
145 #endif 199#endif
146 return init_new_context_ldt(tsk, mm); 200 init_new_context_ldt(mm);
201 return 0;
147} 202}
148static inline void destroy_context(struct mm_struct *mm) 203static inline void destroy_context(struct mm_struct *mm)
149{ 204{
@@ -176,15 +231,16 @@ do { \
176} while (0) 231} while (0)
177#endif 232#endif
178 233
179static inline void arch_dup_mmap(struct mm_struct *oldmm, 234static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
180 struct mm_struct *mm)
181{ 235{
182 paravirt_arch_dup_mmap(oldmm, mm); 236 paravirt_arch_dup_mmap(oldmm, mm);
237 return ldt_dup_context(oldmm, mm);
183} 238}
184 239
185static inline void arch_exit_mmap(struct mm_struct *mm) 240static inline void arch_exit_mmap(struct mm_struct *mm)
186{ 241{
187 paravirt_arch_exit_mmap(mm); 242 paravirt_arch_exit_mmap(mm);
243 ldt_arch_exit_mmap(mm);
188} 244}
189 245
190#ifdef CONFIG_X86_64 246#ifdef CONFIG_X86_64
@@ -282,33 +338,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
282} 338}
283 339
284/* 340/*
285 * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
286 * bits. This serves two purposes. It prevents a nasty situation in
287 * which PCID-unaware code saves CR3, loads some other value (with PCID
288 * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
289 * the saved ASID was nonzero. It also means that any bugs involving
290 * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
291 * deterministically.
292 */
293
294static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
295{
296 if (static_cpu_has(X86_FEATURE_PCID)) {
297 VM_WARN_ON_ONCE(asid > 4094);
298 return __sme_pa(mm->pgd) | (asid + 1);
299 } else {
300 VM_WARN_ON_ONCE(asid != 0);
301 return __sme_pa(mm->pgd);
302 }
303}
304
305static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
306{
307 VM_WARN_ON_ONCE(asid > 4094);
308 return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
309}
310
311/*
312 * This can be used from process context to figure out what the value of 341 * This can be used from process context to figure out what the value of
313 * CR3 is without needing to do a (slow) __read_cr3(). 342 * CR3 is without needing to do a (slow) __read_cr3().
314 * 343 *
@@ -317,7 +346,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
317 */ 346 */
318static inline unsigned long __get_current_cr3_fast(void) 347static inline unsigned long __get_current_cr3_fast(void)
319{ 348{
320 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), 349 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
321 this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 350 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
322 351
323 /* For now, be very restrictive about when this can be called. */ 352 /* For now, be very restrictive about when this can be called. */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5400add2885b..8bf450b13d9f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
7#include <linux/nmi.h> 7#include <linux/nmi.h>
8#include <asm/io.h> 8#include <asm/io.h>
9#include <asm/hyperv.h> 9#include <asm/hyperv.h>
10#include <asm/nospec-branch.h>
10 11
11/* 12/*
12 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent 13 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
186 return U64_MAX; 187 return U64_MAX;
187 188
188 __asm__ __volatile__("mov %4, %%r8\n" 189 __asm__ __volatile__("mov %4, %%r8\n"
189 "call *%5" 190 CALL_NOSPEC
190 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 191 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
191 "+c" (control), "+d" (input_address) 192 "+c" (control), "+d" (input_address)
192 : "r" (output_address), "m" (hv_hypercall_pg) 193 : "r" (output_address),
194 THUNK_TARGET(hv_hypercall_pg)
193 : "cc", "memory", "r8", "r9", "r10", "r11"); 195 : "cc", "memory", "r8", "r9", "r10", "r11");
194#else 196#else
195 u32 input_address_hi = upper_32_bits(input_address); 197 u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
200 if (!hv_hypercall_pg) 202 if (!hv_hypercall_pg)
201 return U64_MAX; 203 return U64_MAX;
202 204
203 __asm__ __volatile__("call *%7" 205 __asm__ __volatile__(CALL_NOSPEC
204 : "=A" (hv_status), 206 : "=A" (hv_status),
205 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 207 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
206 : "A" (control), 208 : "A" (control),
207 "b" (input_address_hi), 209 "b" (input_address_hi),
208 "D"(output_address_hi), "S"(output_address_lo), 210 "D"(output_address_hi), "S"(output_address_lo),
209 "m" (hv_hypercall_pg) 211 THUNK_TARGET(hv_hypercall_pg)
210 : "cc", "memory"); 212 : "cc", "memory");
211#endif /* !x86_64 */ 213#endif /* !x86_64 */
212 return hv_status; 214 return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
227 229
228#ifdef CONFIG_X86_64 230#ifdef CONFIG_X86_64
229 { 231 {
230 __asm__ __volatile__("call *%4" 232 __asm__ __volatile__(CALL_NOSPEC
231 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 233 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
232 "+c" (control), "+d" (input1) 234 "+c" (control), "+d" (input1)
233 : "m" (hv_hypercall_pg) 235 : THUNK_TARGET(hv_hypercall_pg)
234 : "cc", "r8", "r9", "r10", "r11"); 236 : "cc", "r8", "r9", "r10", "r11");
235 } 237 }
236#else 238#else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
238 u32 input1_hi = upper_32_bits(input1); 240 u32 input1_hi = upper_32_bits(input1);
239 u32 input1_lo = lower_32_bits(input1); 241 u32 input1_lo = lower_32_bits(input1);
240 242
241 __asm__ __volatile__ ("call *%5" 243 __asm__ __volatile__ (CALL_NOSPEC
242 : "=A"(hv_status), 244 : "=A"(hv_status),
243 "+c"(input1_lo), 245 "+c"(input1_lo),
244 ASM_CALL_CONSTRAINT 246 ASM_CALL_CONSTRAINT
245 : "A" (control), 247 : "A" (control),
246 "b" (input1_hi), 248 "b" (input1_hi),
247 "m" (hv_hypercall_pg) 249 THUNK_TARGET(hv_hypercall_pg)
248 : "cc", "edi", "esi"); 250 : "cc", "edi", "esi");
249 } 251 }
250#endif 252#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 34c4922bbc3f..e7b983a35506 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -355,6 +355,9 @@
355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL 355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
356#define FAM10H_MMIO_CONF_BASE_SHIFT 20 356#define FAM10H_MMIO_CONF_BASE_SHIFT 20
357#define MSR_FAM10H_NODE_ID 0xc001100c 357#define MSR_FAM10H_NODE_ID 0xc001100c
358#define MSR_F10H_DECFG 0xc0011029
359#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
360#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
358 361
359/* K8 MSRs */ 362/* K8 MSRs */
360#define MSR_K8_TOP_MEM1 0xc001001a 363#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..4ad41087ce0e
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,222 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __NOSPEC_BRANCH_H__
4#define __NOSPEC_BRANCH_H__
5
6#include <asm/alternative.h>
7#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h>
9
10/*
11 * Fill the CPU return stack buffer.
12 *
13 * Each entry in the RSB, if used for a speculative 'ret', contains an
14 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
15 *
16 * This is required in various cases for retpoline and IBRS-based
17 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
18 * eliminate potentially bogus entries from the RSB, and sometimes
19 * purely to ensure that it doesn't get empty, which on some CPUs would
20 * allow predictions from other (unwanted!) sources to be used.
21 *
22 * We define a CPP macro such that it can be used from both .S files and
23 * inline assembly. It's possible to do a .macro and then include that
24 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
25 */
26
27#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
28#define RSB_FILL_LOOPS 16 /* To avoid underflow */
29
30/*
31 * Google experimented with loop-unrolling and this turned out to be
32 * the optimal version — two calls, each with their own speculation
33 * trap should their return address end up getting used, in a loop.
34 */
35#define __FILL_RETURN_BUFFER(reg, nr, sp) \
36 mov $(nr/2), reg; \
37771: \
38 call 772f; \
39773: /* speculation trap */ \
40 pause; \
41 lfence; \
42 jmp 773b; \
43772: \
44 call 774f; \
45775: /* speculation trap */ \
46 pause; \
47 lfence; \
48 jmp 775b; \
49774: \
50 dec reg; \
51 jnz 771b; \
52 add $(BITS_PER_LONG/8) * nr, sp;
53
54#ifdef __ASSEMBLY__
55
56/*
57 * This should be used immediately before a retpoline alternative. It tells
58 * objtool where the retpolines are so that it can make sense of the control
59 * flow by just reading the original instruction(s) and ignoring the
60 * alternatives.
61 */
62.macro ANNOTATE_NOSPEC_ALTERNATIVE
63 .Lannotate_\@:
64 .pushsection .discard.nospec
65 .long .Lannotate_\@ - .
66 .popsection
67.endm
68
69/*
70 * These are the bare retpoline primitives for indirect jmp and call.
71 * Do not use these directly; they only exist to make the ALTERNATIVE
72 * invocation below less ugly.
73 */
74.macro RETPOLINE_JMP reg:req
75 call .Ldo_rop_\@
76.Lspec_trap_\@:
77 pause
78 lfence
79 jmp .Lspec_trap_\@
80.Ldo_rop_\@:
81 mov \reg, (%_ASM_SP)
82 ret
83.endm
84
85/*
86 * This is a wrapper around RETPOLINE_JMP so the called function in reg
87 * returns to the instruction after the macro.
88 */
89.macro RETPOLINE_CALL reg:req
90 jmp .Ldo_call_\@
91.Ldo_retpoline_jmp_\@:
92 RETPOLINE_JMP \reg
93.Ldo_call_\@:
94 call .Ldo_retpoline_jmp_\@
95.endm
96
97/*
98 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
99 * indirect jmp/call which may be susceptible to the Spectre variant 2
100 * attack.
101 */
102.macro JMP_NOSPEC reg:req
103#ifdef CONFIG_RETPOLINE
104 ANNOTATE_NOSPEC_ALTERNATIVE
105 ALTERNATIVE_2 __stringify(jmp *\reg), \
106 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
107 __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
108#else
109 jmp *\reg
110#endif
111.endm
112
113.macro CALL_NOSPEC reg:req
114#ifdef CONFIG_RETPOLINE
115 ANNOTATE_NOSPEC_ALTERNATIVE
116 ALTERNATIVE_2 __stringify(call *\reg), \
117 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
118 __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
119#else
120 call *\reg
121#endif
122.endm
123
124 /*
125 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
126 * monstrosity above, manually.
127 */
128.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
129#ifdef CONFIG_RETPOLINE
130 ANNOTATE_NOSPEC_ALTERNATIVE
131 ALTERNATIVE "jmp .Lskip_rsb_\@", \
132 __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
133 \ftr
134.Lskip_rsb_\@:
135#endif
136.endm
137
138#else /* __ASSEMBLY__ */
139
140#define ANNOTATE_NOSPEC_ALTERNATIVE \
141 "999:\n\t" \
142 ".pushsection .discard.nospec\n\t" \
143 ".long 999b - .\n\t" \
144 ".popsection\n\t"
145
146#if defined(CONFIG_X86_64) && defined(RETPOLINE)
147
148/*
149 * Since the inline asm uses the %V modifier which is only in newer GCC,
150 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
151 */
152# define CALL_NOSPEC \
153 ANNOTATE_NOSPEC_ALTERNATIVE \
154 ALTERNATIVE( \
155 "call *%[thunk_target]\n", \
156 "call __x86_indirect_thunk_%V[thunk_target]\n", \
157 X86_FEATURE_RETPOLINE)
158# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
159
160#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
161/*
162 * For i386 we use the original ret-equivalent retpoline, because
163 * otherwise we'll run out of registers. We don't care about CET
164 * here, anyway.
165 */
166# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
167 " jmp 904f;\n" \
168 " .align 16\n" \
169 "901: call 903f;\n" \
170 "902: pause;\n" \
171 " lfence;\n" \
172 " jmp 902b;\n" \
173 " .align 16\n" \
174 "903: addl $4, %%esp;\n" \
175 " pushl %[thunk_target];\n" \
176 " ret;\n" \
177 " .align 16\n" \
178 "904: call 901b;\n", \
179 X86_FEATURE_RETPOLINE)
180
181# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
182#else /* No retpoline for C / inline asm */
183# define CALL_NOSPEC "call *%[thunk_target]\n"
184# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
185#endif
186
187/* The Spectre V2 mitigation variants */
188enum spectre_v2_mitigation {
189 SPECTRE_V2_NONE,
190 SPECTRE_V2_RETPOLINE_MINIMAL,
191 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
192 SPECTRE_V2_RETPOLINE_GENERIC,
193 SPECTRE_V2_RETPOLINE_AMD,
194 SPECTRE_V2_IBRS,
195};
196
197extern char __indirect_thunk_start[];
198extern char __indirect_thunk_end[];
199
200/*
201 * On VMEXIT we must ensure that no RSB predictions learned in the guest
202 * can be followed in the host, by overwriting the RSB completely. Both
203 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
204 * CPUs with IBRS_ATT *might* it be avoided.
205 */
206static inline void vmexit_fill_RSB(void)
207{
208#ifdef CONFIG_RETPOLINE
209 unsigned long loops;
210
211 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
212 ALTERNATIVE("jmp 910f",
213 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
214 X86_FEATURE_RETPOLINE)
215 "910:"
216 : "=r" (loops), ASM_CALL_CONSTRAINT
217 : : "memory" );
218#endif
219}
220
221#endif /* __ASSEMBLY__ */
222#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 283efcaac8af..892df375b615 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -927,6 +927,15 @@ extern void default_banner(void);
927 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ 927 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
928 CLBR_NONE, \ 928 CLBR_NONE, \
929 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) 929 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
930
931#ifdef CONFIG_DEBUG_ENTRY
932#define SAVE_FLAGS(clobbers) \
933 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
934 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
935 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
936 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
937#endif
938
930#endif /* CONFIG_X86_32 */ 939#endif /* CONFIG_X86_32 */
931 940
932#endif /* __ASSEMBLY__ */ 941#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 7a5d6695abd3..eb66fa9cd0fc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -38,6 +38,7 @@ do { \
38#define PCI_NOASSIGN_ROMS 0x80000 38#define PCI_NOASSIGN_ROMS 0x80000
39#define PCI_ROOT_NO_CRS 0x100000 39#define PCI_ROOT_NO_CRS 0x100000
40#define PCI_NOASSIGN_BARS 0x200000 40#define PCI_NOASSIGN_BARS 0x200000
41#define PCI_BIG_ROOT_WINDOW 0x400000
41 42
42extern unsigned int pci_probe; 43extern unsigned int pci_probe;
43extern unsigned long pirq_table_addr; 44extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 4b5e1eafada7..aff42e1da6ee 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {}
30 */ 30 */
31extern gfp_t __userpte_alloc_gfp; 31extern gfp_t __userpte_alloc_gfp;
32 32
33#ifdef CONFIG_PAGE_TABLE_ISOLATION
34/*
35 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
36 * both 8k in size and 8k-aligned. That lets us just flip bit 12
37 * in a pointer to swap between the two 4k halves.
38 */
39#define PGD_ALLOCATION_ORDER 1
40#else
41#define PGD_ALLOCATION_ORDER 0
42#endif
43
33/* 44/*
34 * Allocate and free page tables. 45 * Allocate and free page tables.
35 */ 46 */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 95e2dfd75521..e42b8943cb1a 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD];
28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); 28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29 29
30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); 30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
31void ptdump_walk_pgd_level_checkwx(void); 32void ptdump_walk_pgd_level_checkwx(void);
32 33
33#ifdef CONFIG_DEBUG_WX 34#ifdef CONFIG_DEBUG_WX
@@ -841,7 +842,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
841 842
842static inline int p4d_bad(p4d_t p4d) 843static inline int p4d_bad(p4d_t p4d)
843{ 844{
844 return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 845 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
846
847 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
848 ignore_flags |= _PAGE_NX;
849
850 return (p4d_flags(p4d) & ~ignore_flags) != 0;
845} 851}
846#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 852#endif /* CONFIG_PGTABLE_LEVELS > 3 */
847 853
@@ -875,7 +881,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
875 881
876static inline int pgd_bad(pgd_t pgd) 882static inline int pgd_bad(pgd_t pgd)
877{ 883{
878 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; 884 unsigned long ignore_flags = _PAGE_USER;
885
886 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
887 ignore_flags |= _PAGE_NX;
888
889 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
879} 890}
880 891
881static inline int pgd_none(pgd_t pgd) 892static inline int pgd_none(pgd_t pgd)
@@ -904,7 +915,11 @@ static inline int pgd_none(pgd_t pgd)
904 * pgd_offset() returns a (pgd_t *) 915 * pgd_offset() returns a (pgd_t *)
905 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 916 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
906 */ 917 */
907#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) 918#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
919/*
920 * a shortcut to get a pgd_t in a given mm
921 */
922#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
908/* 923/*
909 * a shortcut which implies the use of the kernel's pgd, instead 924 * a shortcut which implies the use of the kernel's pgd, instead
910 * of a process's 925 * of a process's
@@ -1106,7 +1121,14 @@ static inline int pud_write(pud_t pud)
1106 */ 1121 */
1107static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) 1122static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1108{ 1123{
1109 memcpy(dst, src, count * sizeof(pgd_t)); 1124 memcpy(dst, src, count * sizeof(pgd_t));
1125#ifdef CONFIG_PAGE_TABLE_ISOLATION
1126 if (!static_cpu_has(X86_FEATURE_PTI))
1127 return;
1128 /* Clone the user space pgd as well */
1129 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1130 count * sizeof(pgd_t));
1131#endif
1110} 1132}
1111 1133
1112#define PTE_SHIFT ilog2(PTRS_PER_PTE) 1134#define PTE_SHIFT ilog2(PTRS_PER_PTE)
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index f2ca9b28fd68..ce245b0cdfca 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
38#define LAST_PKMAP 1024 38#define LAST_PKMAP 1024
39#endif 39#endif
40 40
41#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 41/*
42 & PMD_MASK) 42 * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
43 * to avoid include recursion hell
44 */
45#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
46
47#define CPU_ENTRY_AREA_BASE \
48 ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
49
50#define PKMAP_BASE \
51 ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
43 52
44#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
45# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 54# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
46#else 55#else
47# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 56# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
48#endif 57#endif
49 58
50#define MODULES_VADDR VMALLOC_START 59#define MODULES_VADDR VMALLOC_START
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index e9f05331e732..81462e9a34f6 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -131,9 +131,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
131#endif 131#endif
132} 132}
133 133
134#ifdef CONFIG_PAGE_TABLE_ISOLATION
135/*
136 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
137 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
138 * the user one is in the last 4k. To switch between them, you
139 * just need to flip the 12th bit in their addresses.
140 */
141#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
142
143/*
144 * This generates better code than the inline assembly in
145 * __set_bit().
146 */
147static inline void *ptr_set_bit(void *ptr, int bit)
148{
149 unsigned long __ptr = (unsigned long)ptr;
150
151 __ptr |= BIT(bit);
152 return (void *)__ptr;
153}
154static inline void *ptr_clear_bit(void *ptr, int bit)
155{
156 unsigned long __ptr = (unsigned long)ptr;
157
158 __ptr &= ~BIT(bit);
159 return (void *)__ptr;
160}
161
162static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
163{
164 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
165}
166
167static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
168{
169 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
170}
171
172static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
173{
174 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
175}
176
177static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
178{
179 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
180}
181#endif /* CONFIG_PAGE_TABLE_ISOLATION */
182
183/*
184 * Page table pages are page-aligned. The lower half of the top
185 * level is used for userspace and the top half for the kernel.
186 *
187 * Returns true for parts of the PGD that map userspace and
188 * false for the parts that map the kernel.
189 */
190static inline bool pgdp_maps_userspace(void *__ptr)
191{
192 unsigned long ptr = (unsigned long)__ptr;
193
194 return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
195}
196
197#ifdef CONFIG_PAGE_TABLE_ISOLATION
198pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
199
200/*
201 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
202 * Populates the user and returns the resulting PGD that must be set in
203 * the kernel copy of the page tables.
204 */
205static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
206{
207 if (!static_cpu_has(X86_FEATURE_PTI))
208 return pgd;
209 return __pti_set_user_pgd(pgdp, pgd);
210}
211#else
212static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
213{
214 return pgd;
215}
216#endif
217
134static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 218static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
135{ 219{
220#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
221 p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
222#else
136 *p4dp = p4d; 223 *p4dp = p4d;
224#endif
137} 225}
138 226
139static inline void native_p4d_clear(p4d_t *p4d) 227static inline void native_p4d_clear(p4d_t *p4d)
@@ -147,7 +235,11 @@ static inline void native_p4d_clear(p4d_t *p4d)
147 235
148static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 236static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
149{ 237{
238#ifdef CONFIG_PAGE_TABLE_ISOLATION
239 *pgdp = pti_set_user_pgd(pgdp, pgd);
240#else
150 *pgdp = pgd; 241 *pgdp = pgd;
242#endif
151} 243}
152 244
153static inline void native_pgd_clear(pgd_t *pgd) 245static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 6d5f45dcd4a1..6b8f73dcbc2c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -75,33 +75,52 @@ typedef struct { pteval_t pte; } pte_t;
75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
76#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 76#define PGDIR_MASK (~(PGDIR_SIZE - 1))
77 77
78/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 78/*
79#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 79 * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
80 *
81 * Be very careful vs. KASLR when changing anything here. The KASLR address
82 * range must not overlap with anything except the KASAN shadow area, which
83 * is correct as KASAN disables KASLR.
84 */
85#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
86
80#ifdef CONFIG_X86_5LEVEL 87#ifdef CONFIG_X86_5LEVEL
81#define VMALLOC_SIZE_TB _AC(16384, UL) 88# define VMALLOC_SIZE_TB _AC(12800, UL)
82#define __VMALLOC_BASE _AC(0xff92000000000000, UL) 89# define __VMALLOC_BASE _AC(0xffa0000000000000, UL)
83#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) 90# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
91# define LDT_PGD_ENTRY _AC(-112, UL)
92# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
84#else 93#else
85#define VMALLOC_SIZE_TB _AC(32, UL) 94# define VMALLOC_SIZE_TB _AC(32, UL)
86#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 95# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
87#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 96# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
97# define LDT_PGD_ENTRY _AC(-3, UL)
98# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
88#endif 99#endif
100
89#ifdef CONFIG_RANDOMIZE_MEMORY 101#ifdef CONFIG_RANDOMIZE_MEMORY
90#define VMALLOC_START vmalloc_base 102# define VMALLOC_START vmalloc_base
91#define VMEMMAP_START vmemmap_base 103# define VMEMMAP_START vmemmap_base
92#else 104#else
93#define VMALLOC_START __VMALLOC_BASE 105# define VMALLOC_START __VMALLOC_BASE
94#define VMEMMAP_START __VMEMMAP_BASE 106# define VMEMMAP_START __VMEMMAP_BASE
95#endif /* CONFIG_RANDOMIZE_MEMORY */ 107#endif /* CONFIG_RANDOMIZE_MEMORY */
96#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 108
97#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 109#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
110
111#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
98/* The module sections ends with the start of the fixmap */ 112/* The module sections ends with the start of the fixmap */
99#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 113#define MODULES_END _AC(0xffffffffff000000, UL)
100#define MODULES_LEN (MODULES_END - MODULES_VADDR) 114#define MODULES_LEN (MODULES_END - MODULES_VADDR)
101#define ESPFIX_PGD_ENTRY _AC(-2, UL) 115
102#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 116#define ESPFIX_PGD_ENTRY _AC(-2, UL)
103#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 117#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
104#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) 118
119#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
120#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
121
122#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
123#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
105 124
106#define EARLY_DYNAMIC_PAGE_TABLES 64 125#define EARLY_DYNAMIC_PAGE_TABLES 64
107 126
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 43212a43ee69..625a52a5594f 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -38,6 +38,11 @@
38#define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) 38#define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull)
39#define CR3_PCID_MASK 0xFFFull 39#define CR3_PCID_MASK 0xFFFull
40#define CR3_NOFLUSH BIT_ULL(63) 40#define CR3_NOFLUSH BIT_ULL(63)
41
42#ifdef CONFIG_PAGE_TABLE_ISOLATION
43# define X86_CR3_PTI_PCID_USER_BIT 11
44#endif
45
41#else 46#else
42/* 47/*
43 * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save 48 * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cc16fa882e3e..d3a67fba200a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -163,9 +163,9 @@ enum cpuid_regs_idx {
163extern struct cpuinfo_x86 boot_cpu_data; 163extern struct cpuinfo_x86 boot_cpu_data;
164extern struct cpuinfo_x86 new_cpu_data; 164extern struct cpuinfo_x86 new_cpu_data;
165 165
166extern struct tss_struct doublefault_tss; 166extern struct x86_hw_tss doublefault_tss;
167extern __u32 cpu_caps_cleared[NCAPINTS]; 167extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
168extern __u32 cpu_caps_set[NCAPINTS]; 168extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
169 169
170#ifdef CONFIG_SMP 170#ifdef CONFIG_SMP
171DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 171DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -253,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
253 write_cr3(__sme_pa(pgdir)); 253 write_cr3(__sme_pa(pgdir));
254} 254}
255 255
256/*
257 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
258 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
259 * unrelated to the task-switch mechanism:
260 */
256#ifdef CONFIG_X86_32 261#ifdef CONFIG_X86_32
257/* This is the TSS defined by the hardware. */ 262/* This is the TSS defined by the hardware. */
258struct x86_hw_tss { 263struct x86_hw_tss {
@@ -305,7 +310,13 @@ struct x86_hw_tss {
305struct x86_hw_tss { 310struct x86_hw_tss {
306 u32 reserved1; 311 u32 reserved1;
307 u64 sp0; 312 u64 sp0;
313
314 /*
315 * We store cpu_current_top_of_stack in sp1 so it's always accessible.
316 * Linux does not use ring 1, so sp1 is not otherwise needed.
317 */
308 u64 sp1; 318 u64 sp1;
319
309 u64 sp2; 320 u64 sp2;
310 u64 reserved2; 321 u64 reserved2;
311 u64 ist[7]; 322 u64 ist[7];
@@ -323,12 +334,22 @@ struct x86_hw_tss {
323#define IO_BITMAP_BITS 65536 334#define IO_BITMAP_BITS 65536
324#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 335#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
325#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 336#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
326#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 337#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
327#define INVALID_IO_BITMAP_OFFSET 0x8000 338#define INVALID_IO_BITMAP_OFFSET 0x8000
328 339
340struct entry_stack {
341 unsigned long words[64];
342};
343
344struct entry_stack_page {
345 struct entry_stack stack;
346} __aligned(PAGE_SIZE);
347
329struct tss_struct { 348struct tss_struct {
330 /* 349 /*
331 * The hardware state: 350 * The fixed hardware portion. This must not cross a page boundary
351 * at risk of violating the SDM's advice and potentially triggering
352 * errata.
332 */ 353 */
333 struct x86_hw_tss x86_tss; 354 struct x86_hw_tss x86_tss;
334 355
@@ -339,18 +360,9 @@ struct tss_struct {
339 * be within the limit. 360 * be within the limit.
340 */ 361 */
341 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 362 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
363} __aligned(PAGE_SIZE);
342 364
343#ifdef CONFIG_X86_32 365DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
344 /*
345 * Space for the temporary SYSENTER stack.
346 */
347 unsigned long SYSENTER_stack_canary;
348 unsigned long SYSENTER_stack[64];
349#endif
350
351} ____cacheline_aligned;
352
353DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
354 366
355/* 367/*
356 * sizeof(unsigned long) coming from an extra "long" at the end 368 * sizeof(unsigned long) coming from an extra "long" at the end
@@ -364,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
364 376
365#ifdef CONFIG_X86_32 377#ifdef CONFIG_X86_32
366DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); 378DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
379#else
380/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
381#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
367#endif 382#endif
368 383
369/* 384/*
@@ -523,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
523static inline void 538static inline void
524native_load_sp0(unsigned long sp0) 539native_load_sp0(unsigned long sp0)
525{ 540{
526 this_cpu_write(cpu_tss.x86_tss.sp0, sp0); 541 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
527} 542}
528 543
529static inline void native_swapgs(void) 544static inline void native_swapgs(void)
@@ -535,12 +550,12 @@ static inline void native_swapgs(void)
535 550
536static inline unsigned long current_top_of_stack(void) 551static inline unsigned long current_top_of_stack(void)
537{ 552{
538#ifdef CONFIG_X86_64 553 /*
539 return this_cpu_read_stable(cpu_tss.x86_tss.sp0); 554 * We can't read directly from tss.sp0: sp0 on x86_32 is special in
540#else 555 * and around vm86 mode and sp0 on x86_64 is special because of the
541 /* sp0 on x86_32 is special in and around vm86 mode. */ 556 * entry trampoline.
557 */
542 return this_cpu_read_stable(cpu_current_top_of_stack); 558 return this_cpu_read_stable(cpu_current_top_of_stack);
543#endif
544} 559}
545 560
546static inline bool on_thread_stack(void) 561static inline bool on_thread_stack(void)
@@ -837,13 +852,22 @@ static inline void spin_lock_prefetch(const void *x)
837 852
838#else 853#else
839/* 854/*
840 * User space process size. 47bits minus one guard page. The guard 855 * User space process size. This is the first address outside the user range.
841 * page is necessary on Intel CPUs: if a SYSCALL instruction is at 856 * There are a few constraints that determine this:
842 * the highest possible canonical userspace address, then that 857 *
843 * syscall will enter the kernel with a non-canonical return 858 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
844 * address, and SYSRET will explode dangerously. We avoid this 859 * address, then that syscall will enter the kernel with a
845 * particular problem by preventing anything from being mapped 860 * non-canonical return address, and SYSRET will explode dangerously.
846 * at the maximum canonical address. 861 * We avoid this particular problem by preventing anything executable
862 * from being mapped at the maximum canonical address.
863 *
864 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
865 * CPUs malfunction if they execute code from the highest canonical page.
866 * They'll speculate right off the end of the canonical space, and
867 * bad things happen. This is worked around in the same way as the
868 * Intel problem.
869 *
870 * With page table isolation enabled, we map the LDT in ... [stay tuned]
847 */ 871 */
848#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) 872#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
849 873
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
new file mode 100644
index 000000000000..0b5ef05b2d2d
--- /dev/null
+++ b/arch/x86/include/asm/pti.h
@@ -0,0 +1,14 @@
1// SPDX-License-Identifier: GPL-2.0
2#ifndef _ASM_X86_PTI_H
3#define _ASM_X86_PTI_H
4#ifndef __ASSEMBLY__
5
6#ifdef CONFIG_PAGE_TABLE_ISOLATION
7extern void pti_init(void);
8extern void pti_check_boottime_disable(void);
9#else
10static inline void pti_check_boottime_disable(void) { }
11#endif
12
13#endif /* __ASSEMBLY__ */
14#endif /* _ASM_X86_PTI_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index b20f9d623f9c..8f09012b92e7 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -236,11 +236,23 @@
236 */ 236 */
237#define EARLY_IDT_HANDLER_SIZE 9 237#define EARLY_IDT_HANDLER_SIZE 9
238 238
239/*
240 * xen_early_idt_handler_array is for Xen pv guests: for each entry in
241 * early_idt_handler_array it contains a prequel in the form of
242 * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
243 * max 8 bytes.
244 */
245#define XEN_EARLY_IDT_HANDLER_SIZE 8
246
239#ifndef __ASSEMBLY__ 247#ifndef __ASSEMBLY__
240 248
241extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; 249extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
242extern void early_ignore_irq(void); 250extern void early_ignore_irq(void);
243 251
252#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
253extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
254#endif
255
244/* 256/*
245 * Load a segment. Fall back on loading the zero segment if something goes 257 * Load a segment. Fall back on loading the zero segment if something goes
246 * wrong. This variant assumes that loading zero fully clears the segment. 258 * wrong. This variant assumes that loading zero fully clears the segment.
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 8da111b3c342..f73706878772 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -16,6 +16,7 @@ enum stack_type {
16 STACK_TYPE_TASK, 16 STACK_TYPE_TASK,
17 STACK_TYPE_IRQ, 17 STACK_TYPE_IRQ,
18 STACK_TYPE_SOFTIRQ, 18 STACK_TYPE_SOFTIRQ,
19 STACK_TYPE_ENTRY,
19 STACK_TYPE_EXCEPTION, 20 STACK_TYPE_EXCEPTION,
20 STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, 21 STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
21}; 22};
@@ -28,6 +29,8 @@ struct stack_info {
28bool in_task_stack(unsigned long *stack, struct task_struct *task, 29bool in_task_stack(unsigned long *stack, struct task_struct *task,
29 struct stack_info *info); 30 struct stack_info *info);
30 31
32bool in_entry_stack(unsigned long *stack, struct stack_info *info);
33
31int get_stack_info(unsigned long *stack, struct task_struct *task, 34int get_stack_info(unsigned long *stack, struct task_struct *task,
32 struct stack_info *info, unsigned long *visit_mask); 35 struct stack_info *info, unsigned long *visit_mask);
33 36
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 982c325dad33..8be6afb58471 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -12,7 +12,13 @@
12 12
13/* image of the saved processor state */ 13/* image of the saved processor state */
14struct saved_context { 14struct saved_context {
15 u16 es, fs, gs, ss; 15 /*
16 * On x86_32, all segment registers, with the possible exception of
17 * gs, are saved at kernel entry in pt_regs.
18 */
19#ifdef CONFIG_X86_32_LAZY_GS
20 u16 gs;
21#endif
16 unsigned long cr0, cr2, cr3, cr4; 22 unsigned long cr0, cr2, cr3, cr4;
17 u64 misc_enable; 23 u64 misc_enable;
18 bool misc_enable_saved; 24 bool misc_enable_saved;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 7306e911faee..a7af9f53c0cb 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -20,8 +20,20 @@
20 */ 20 */
21struct saved_context { 21struct saved_context {
22 struct pt_regs regs; 22 struct pt_regs regs;
23 u16 ds, es, fs, gs, ss; 23
24 unsigned long gs_base, gs_kernel_base, fs_base; 24 /*
25 * User CS and SS are saved in current_pt_regs(). The rest of the
26 * segment selectors need to be saved and restored here.
27 */
28 u16 ds, es, fs, gs;
29
30 /*
31 * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
32 * so we save them separately. We save the kernelmode GSBASE to
33 * restore percpu access after resume.
34 */
35 unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
36
25 unsigned long cr0, cr2, cr3, cr4, cr8; 37 unsigned long cr0, cr2, cr3, cr4, cr8;
26 u64 misc_enable; 38 u64 misc_enable;
27 bool misc_enable_saved; 39 bool misc_enable_saved;
@@ -30,8 +42,7 @@ struct saved_context {
30 u16 gdt_pad; /* Unused */ 42 u16 gdt_pad; /* Unused */
31 struct desc_ptr gdt_desc; 43 struct desc_ptr gdt_desc;
32 u16 idt_pad; 44 u16 idt_pad;
33 u16 idt_limit; 45 struct desc_ptr idt;
34 unsigned long idt_base;
35 u16 ldt; 46 u16 ldt;
36 u16 tss; 47 u16 tss;
37 unsigned long tr; 48 unsigned long tr;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 8c6bd6863db9..eb5f7999a893 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -16,8 +16,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
16 struct tss_struct *tss); 16 struct tss_struct *tss);
17 17
18/* This runs runs on the previous thread's stack. */ 18/* This runs runs on the previous thread's stack. */
19static inline void prepare_switch_to(struct task_struct *prev, 19static inline void prepare_switch_to(struct task_struct *next)
20 struct task_struct *next)
21{ 20{
22#ifdef CONFIG_VMAP_STACK 21#ifdef CONFIG_VMAP_STACK
23 /* 22 /*
@@ -70,7 +69,7 @@ struct fork_frame {
70 69
71#define switch_to(prev, next, last) \ 70#define switch_to(prev, next, last) \
72do { \ 71do { \
73 prepare_switch_to(prev, next); \ 72 prepare_switch_to(next); \
74 \ 73 \
75 ((last) = __switch_to_asm((prev), (next))); \ 74 ((last) = __switch_to_asm((prev), (next))); \
76} while (0) 75} while (0)
@@ -79,10 +78,10 @@ do { \
79static inline void refresh_sysenter_cs(struct thread_struct *thread) 78static inline void refresh_sysenter_cs(struct thread_struct *thread)
80{ 79{
81 /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 80 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
82 if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) 81 if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
83 return; 82 return;
84 83
85 this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); 84 this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
86 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 85 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
87} 86}
88#endif 87#endif
@@ -90,10 +89,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
90/* This is used when switching tasks or entering/exiting vm86 mode. */ 89/* This is used when switching tasks or entering/exiting vm86 mode. */
91static inline void update_sp0(struct task_struct *task) 90static inline void update_sp0(struct task_struct *task)
92{ 91{
92 /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
93#ifdef CONFIG_X86_32 93#ifdef CONFIG_X86_32
94 load_sp0(task->thread.sp0); 94 load_sp0(task->thread.sp0);
95#else 95#else
96 load_sp0(task_top_of_stack(task)); 96 if (static_cpu_has(X86_FEATURE_XENPV))
97 load_sp0(task_top_of_stack(task));
97#endif 98#endif
98} 99}
99 100
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 70f425947dc5..00223333821a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
207#else /* !__ASSEMBLY__ */ 207#else /* !__ASSEMBLY__ */
208 208
209#ifdef CONFIG_X86_64 209#ifdef CONFIG_X86_64
210# define cpu_current_top_of_stack (cpu_tss + TSS_sp0) 210# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
211#endif 211#endif
212 212
213#endif 213#endif
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 509046cfa5ce..d33e4a26dc7e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -9,70 +9,130 @@
9#include <asm/cpufeature.h> 9#include <asm/cpufeature.h>
10#include <asm/special_insns.h> 10#include <asm/special_insns.h>
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include <asm/invpcid.h>
13#include <asm/pti.h>
14#include <asm/processor-flags.h>
12 15
13static inline void __invpcid(unsigned long pcid, unsigned long addr, 16/*
14 unsigned long type) 17 * The x86 feature is called PCID (Process Context IDentifier). It is similar
15{ 18 * to what is traditionally called ASID on the RISC processors.
16 struct { u64 d[2]; } desc = { { pcid, addr } }; 19 *
20 * We don't use the traditional ASID implementation, where each process/mm gets
21 * its own ASID and flush/restart when we run out of ASID space.
22 *
23 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
24 * that came by on this CPU, allowing cheaper switch_mm between processes on
25 * this CPU.
26 *
27 * We end up with different spaces for different things. To avoid confusion we
28 * use different names for each of them:
29 *
30 * ASID - [0, TLB_NR_DYN_ASIDS-1]
31 * the canonical identifier for an mm
32 *
33 * kPCID - [1, TLB_NR_DYN_ASIDS]
34 * the value we write into the PCID part of CR3; corresponds to the
35 * ASID+1, because PCID 0 is special.
36 *
37 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
38 * for KPTI each mm has two address spaces and thus needs two
39 * PCID values, but we can still do with a single ASID denomination
40 * for each mm. Corresponds to kPCID + 2048.
41 *
42 */
17 43
18 /* 44/* There are 12 bits of space for ASIDS in CR3 */
19 * The memory clobber is because the whole point is to invalidate 45#define CR3_HW_ASID_BITS 12
20 * stale TLB entries and, especially if we're flushing global
21 * mappings, we don't want the compiler to reorder any subsequent
22 * memory accesses before the TLB flush.
23 *
24 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
25 * invpcid (%rcx), %rax in long mode.
26 */
27 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
28 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29}
30 46
31#define INVPCID_TYPE_INDIV_ADDR 0 47/*
32#define INVPCID_TYPE_SINGLE_CTXT 1 48 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
33#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 49 * user/kernel switches
34#define INVPCID_TYPE_ALL_NON_GLOBAL 3 50 */
51#ifdef CONFIG_PAGE_TABLE_ISOLATION
52# define PTI_CONSUMED_PCID_BITS 1
53#else
54# define PTI_CONSUMED_PCID_BITS 0
55#endif
35 56
36/* Flush all mappings for a given pcid and addr, not including globals. */ 57#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
37static inline void invpcid_flush_one(unsigned long pcid, 58
38 unsigned long addr) 59/*
39{ 60 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
40 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); 61 * for them being zero-based. Another -1 is because PCID 0 is reserved for
41} 62 * use by non-PCID-aware users.
63 */
64#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
42 65
43/* Flush all mappings for a given PCID, not including globals. */ 66/*
44static inline void invpcid_flush_single_context(unsigned long pcid) 67 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
68 * lines.
69 */
70#define TLB_NR_DYN_ASIDS 6
71
72/*
73 * Given @asid, compute kPCID
74 */
75static inline u16 kern_pcid(u16 asid)
45{ 76{
46 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); 77 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
78
79#ifdef CONFIG_PAGE_TABLE_ISOLATION
80 /*
81 * Make sure that the dynamic ASID space does not confict with the
82 * bit we are using to switch between user and kernel ASIDs.
83 */
84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
85
86 /*
87 * The ASID being passed in here should have respected the
88 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 */
90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
91#endif
92 /*
93 * The dynamically-assigned ASIDs that get passed in are small
94 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
95 * so do not bother to clear it.
96 *
97 * If PCID is on, ASID-aware code paths put the ASID+1 into the
98 * PCID bits. This serves two purposes. It prevents a nasty
99 * situation in which PCID-unaware code saves CR3, loads some other
100 * value (with PCID == 0), and then restores CR3, thus corrupting
101 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
102 * that any bugs involving loading a PCID-enabled CR3 with
103 * CR4.PCIDE off will trigger deterministically.
104 */
105 return asid + 1;
47} 106}
48 107
49/* Flush all mappings, including globals, for all PCIDs. */ 108/*
50static inline void invpcid_flush_all(void) 109 * Given @asid, compute uPCID
110 */
111static inline u16 user_pcid(u16 asid)
51{ 112{
52 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); 113 u16 ret = kern_pcid(asid);
114#ifdef CONFIG_PAGE_TABLE_ISOLATION
115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
116#endif
117 return ret;
53} 118}
54 119
55/* Flush all mappings for all PCIDs except globals. */ 120struct pgd_t;
56static inline void invpcid_flush_all_nonglobals(void) 121static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
57{ 122{
58 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); 123 if (static_cpu_has(X86_FEATURE_PCID)) {
124 return __sme_pa(pgd) | kern_pcid(asid);
125 } else {
126 VM_WARN_ON_ONCE(asid != 0);
127 return __sme_pa(pgd);
128 }
59} 129}
60 130
61static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) 131static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
62{ 132{
63 u64 new_tlb_gen; 133 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
64 134 VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
65 /* 135 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
66 * Bump the generation count. This also serves as a full barrier
67 * that synchronizes with switch_mm(): callers are required to order
68 * their read of mm_cpumask after their writes to the paging
69 * structures.
70 */
71 smp_mb__before_atomic();
72 new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
73 smp_mb__after_atomic();
74
75 return new_tlb_gen;
76} 136}
77 137
78#ifdef CONFIG_PARAVIRT 138#ifdef CONFIG_PARAVIRT
@@ -99,12 +159,6 @@ static inline bool tlb_defer_switch_to_init_mm(void)
99 return !static_cpu_has(X86_FEATURE_PCID); 159 return !static_cpu_has(X86_FEATURE_PCID);
100} 160}
101 161
102/*
103 * 6 because 6 should be plenty and struct tlb_state will fit in
104 * two cache lines.
105 */
106#define TLB_NR_DYN_ASIDS 6
107
108struct tlb_context { 162struct tlb_context {
109 u64 ctx_id; 163 u64 ctx_id;
110 u64 tlb_gen; 164 u64 tlb_gen;
@@ -139,6 +193,24 @@ struct tlb_state {
139 bool is_lazy; 193 bool is_lazy;
140 194
141 /* 195 /*
196 * If set we changed the page tables in such a way that we
197 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
198 * This tells us to go invalidate all the non-loaded ctxs[]
199 * on the next context switch.
200 *
201 * The current ctx was kept up-to-date as it ran and does not
202 * need to be invalidated.
203 */
204 bool invalidate_other;
205
206 /*
207 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
208 * the corresponding user PCID needs a flush next time we
209 * switch to it; see SWITCH_TO_USER_CR3.
210 */
211 unsigned short user_pcid_flush_mask;
212
213 /*
142 * Access to this CR4 shadow and to H/W CR4 is protected by 214 * Access to this CR4 shadow and to H/W CR4 is protected by
143 * disabling interrupts when modifying either one. 215 * disabling interrupts when modifying either one.
144 */ 216 */
@@ -173,40 +245,43 @@ static inline void cr4_init_shadow(void)
173 this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 245 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
174} 246}
175 247
248static inline void __cr4_set(unsigned long cr4)
249{
250 lockdep_assert_irqs_disabled();
251 this_cpu_write(cpu_tlbstate.cr4, cr4);
252 __write_cr4(cr4);
253}
254
176/* Set in this cpu's CR4. */ 255/* Set in this cpu's CR4. */
177static inline void cr4_set_bits(unsigned long mask) 256static inline void cr4_set_bits(unsigned long mask)
178{ 257{
179 unsigned long cr4; 258 unsigned long cr4, flags;
180 259
260 local_irq_save(flags);
181 cr4 = this_cpu_read(cpu_tlbstate.cr4); 261 cr4 = this_cpu_read(cpu_tlbstate.cr4);
182 if ((cr4 | mask) != cr4) { 262 if ((cr4 | mask) != cr4)
183 cr4 |= mask; 263 __cr4_set(cr4 | mask);
184 this_cpu_write(cpu_tlbstate.cr4, cr4); 264 local_irq_restore(flags);
185 __write_cr4(cr4);
186 }
187} 265}
188 266
189/* Clear in this cpu's CR4. */ 267/* Clear in this cpu's CR4. */
190static inline void cr4_clear_bits(unsigned long mask) 268static inline void cr4_clear_bits(unsigned long mask)
191{ 269{
192 unsigned long cr4; 270 unsigned long cr4, flags;
193 271
272 local_irq_save(flags);
194 cr4 = this_cpu_read(cpu_tlbstate.cr4); 273 cr4 = this_cpu_read(cpu_tlbstate.cr4);
195 if ((cr4 & ~mask) != cr4) { 274 if ((cr4 & ~mask) != cr4)
196 cr4 &= ~mask; 275 __cr4_set(cr4 & ~mask);
197 this_cpu_write(cpu_tlbstate.cr4, cr4); 276 local_irq_restore(flags);
198 __write_cr4(cr4);
199 }
200} 277}
201 278
202static inline void cr4_toggle_bits(unsigned long mask) 279static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
203{ 280{
204 unsigned long cr4; 281 unsigned long cr4;
205 282
206 cr4 = this_cpu_read(cpu_tlbstate.cr4); 283 cr4 = this_cpu_read(cpu_tlbstate.cr4);
207 cr4 ^= mask; 284 __cr4_set(cr4 ^ mask);
208 this_cpu_write(cpu_tlbstate.cr4, cr4);
209 __write_cr4(cr4);
210} 285}
211 286
212/* Read the CR4 shadow. */ 287/* Read the CR4 shadow. */
@@ -216,6 +291,14 @@ static inline unsigned long cr4_read_shadow(void)
216} 291}
217 292
218/* 293/*
294 * Mark all other ASIDs as invalid, preserves the current.
295 */
296static inline void invalidate_other_asid(void)
297{
298 this_cpu_write(cpu_tlbstate.invalidate_other, true);
299}
300
301/*
219 * Save some of cr4 feature set we're using (e.g. Pentium 4MB 302 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
220 * enable and PPro Global page enable), so that any CPU's that boot 303 * enable and PPro Global page enable), so that any CPU's that boot
221 * up after us can get the correct flags. This should only be used 304 * up after us can get the correct flags. This should only be used
@@ -234,37 +317,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
234 317
235extern void initialize_tlbstate_and_flush(void); 318extern void initialize_tlbstate_and_flush(void);
236 319
237static inline void __native_flush_tlb(void) 320/*
321 * Given an ASID, flush the corresponding user ASID. We can delay this
322 * until the next time we switch to it.
323 *
324 * See SWITCH_TO_USER_CR3.
325 */
326static inline void invalidate_user_asid(u16 asid)
238{ 327{
328 /* There is no user ASID if address space separation is off */
329 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
330 return;
331
239 /* 332 /*
240 * If current->mm == NULL then we borrow a mm which may change during a 333 * We only have a single ASID if PCID is off and the CR3
241 * task switch and therefore we must not be preempted while we write CR3 334 * write will have flushed it.
242 * back:
243 */ 335 */
244 preempt_disable(); 336 if (!cpu_feature_enabled(X86_FEATURE_PCID))
245 native_write_cr3(__native_read_cr3()); 337 return;
246 preempt_enable(); 338
339 if (!static_cpu_has(X86_FEATURE_PTI))
340 return;
341
342 __set_bit(kern_pcid(asid),
343 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
247} 344}
248 345
249static inline void __native_flush_tlb_global_irq_disabled(void) 346/*
347 * flush the entire current user mapping
348 */
349static inline void __native_flush_tlb(void)
250{ 350{
251 unsigned long cr4; 351 /*
352 * Preemption or interrupts must be disabled to protect the access
353 * to the per CPU variable and to prevent being preempted between
354 * read_cr3() and write_cr3().
355 */
356 WARN_ON_ONCE(preemptible());
252 357
253 cr4 = this_cpu_read(cpu_tlbstate.cr4); 358 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
254 /* clear PGE */ 359
255 native_write_cr4(cr4 & ~X86_CR4_PGE); 360 /* If current->mm == NULL then the read_cr3() "borrows" an mm */
256 /* write old PGE again and flush TLBs */ 361 native_write_cr3(__native_read_cr3());
257 native_write_cr4(cr4);
258} 362}
259 363
364/*
365 * flush everything
366 */
260static inline void __native_flush_tlb_global(void) 367static inline void __native_flush_tlb_global(void)
261{ 368{
262 unsigned long flags; 369 unsigned long cr4, flags;
263 370
264 if (static_cpu_has(X86_FEATURE_INVPCID)) { 371 if (static_cpu_has(X86_FEATURE_INVPCID)) {
265 /* 372 /*
266 * Using INVPCID is considerably faster than a pair of writes 373 * Using INVPCID is considerably faster than a pair of writes
267 * to CR4 sandwiched inside an IRQ flag save/restore. 374 * to CR4 sandwiched inside an IRQ flag save/restore.
375 *
376 * Note, this works with CR4.PCIDE=0 or 1.
268 */ 377 */
269 invpcid_flush_all(); 378 invpcid_flush_all();
270 return; 379 return;
@@ -277,36 +386,69 @@ static inline void __native_flush_tlb_global(void)
277 */ 386 */
278 raw_local_irq_save(flags); 387 raw_local_irq_save(flags);
279 388
280 __native_flush_tlb_global_irq_disabled(); 389 cr4 = this_cpu_read(cpu_tlbstate.cr4);
390 /* toggle PGE */
391 native_write_cr4(cr4 ^ X86_CR4_PGE);
392 /* write old PGE again and flush TLBs */
393 native_write_cr4(cr4);
281 394
282 raw_local_irq_restore(flags); 395 raw_local_irq_restore(flags);
283} 396}
284 397
398/*
399 * flush one page in the user mapping
400 */
285static inline void __native_flush_tlb_single(unsigned long addr) 401static inline void __native_flush_tlb_single(unsigned long addr)
286{ 402{
403 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
404
287 asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 405 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
406
407 if (!static_cpu_has(X86_FEATURE_PTI))
408 return;
409
410 /*
411 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
412 * Just use invalidate_user_asid() in case we are called early.
413 */
414 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
415 invalidate_user_asid(loaded_mm_asid);
416 else
417 invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
288} 418}
289 419
420/*
421 * flush everything
422 */
290static inline void __flush_tlb_all(void) 423static inline void __flush_tlb_all(void)
291{ 424{
292 if (boot_cpu_has(X86_FEATURE_PGE)) 425 if (boot_cpu_has(X86_FEATURE_PGE)) {
293 __flush_tlb_global(); 426 __flush_tlb_global();
294 else 427 } else {
428 /*
429 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
430 */
295 __flush_tlb(); 431 __flush_tlb();
296 432 }
297 /*
298 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
299 * we'd end up flushing kernel translations for the current ASID but
300 * we might fail to flush kernel translations for other cached ASIDs.
301 *
302 * To avoid this issue, we force PCID off if PGE is off.
303 */
304} 433}
305 434
435/*
436 * flush one page in the kernel mapping
437 */
306static inline void __flush_tlb_one(unsigned long addr) 438static inline void __flush_tlb_one(unsigned long addr)
307{ 439{
308 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 440 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
309 __flush_tlb_single(addr); 441 __flush_tlb_single(addr);
442
443 if (!static_cpu_has(X86_FEATURE_PTI))
444 return;
445
446 /*
447 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
448 * but since kernel space is replicated across all, we must also
449 * invalidate all others.
450 */
451 invalidate_other_asid();
310} 452}
311 453
312#define TLB_FLUSH_ALL -1UL 454#define TLB_FLUSH_ALL -1UL
@@ -367,6 +509,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
367void native_flush_tlb_others(const struct cpumask *cpumask, 509void native_flush_tlb_others(const struct cpumask *cpumask,
368 const struct flush_tlb_info *info); 510 const struct flush_tlb_info *info);
369 511
512static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
513{
514 /*
515 * Bump the generation count. This also serves as a full barrier
516 * that synchronizes with switch_mm(): callers are required to order
517 * their read of mm_cpumask after their writes to the paging
518 * structures.
519 */
520 return atomic64_inc_return(&mm->context.tlb_gen);
521}
522
370static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, 523static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
371 struct mm_struct *mm) 524 struct mm_struct *mm)
372{ 525{
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 84b9ec0c1bc0..22647a642e98 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -283,34 +283,34 @@ TRACE_EVENT(vector_alloc_managed,
283DECLARE_EVENT_CLASS(vector_activate, 283DECLARE_EVENT_CLASS(vector_activate,
284 284
285 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, 285 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
286 bool early), 286 bool reserve),
287 287
288 TP_ARGS(irq, is_managed, can_reserve, early), 288 TP_ARGS(irq, is_managed, can_reserve, reserve),
289 289
290 TP_STRUCT__entry( 290 TP_STRUCT__entry(
291 __field( unsigned int, irq ) 291 __field( unsigned int, irq )
292 __field( bool, is_managed ) 292 __field( bool, is_managed )
293 __field( bool, can_reserve ) 293 __field( bool, can_reserve )
294 __field( bool, early ) 294 __field( bool, reserve )
295 ), 295 ),
296 296
297 TP_fast_assign( 297 TP_fast_assign(
298 __entry->irq = irq; 298 __entry->irq = irq;
299 __entry->is_managed = is_managed; 299 __entry->is_managed = is_managed;
300 __entry->can_reserve = can_reserve; 300 __entry->can_reserve = can_reserve;
301 __entry->early = early; 301 __entry->reserve = reserve;
302 ), 302 ),
303 303
304 TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d", 304 TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
305 __entry->irq, __entry->is_managed, __entry->can_reserve, 305 __entry->irq, __entry->is_managed, __entry->can_reserve,
306 __entry->early) 306 __entry->reserve)
307); 307);
308 308
309#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ 309#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \
310DEFINE_EVENT_FN(vector_activate, name, \ 310DEFINE_EVENT_FN(vector_activate, name, \
311 TP_PROTO(unsigned int irq, bool is_managed, \ 311 TP_PROTO(unsigned int irq, bool is_managed, \
312 bool can_reserve, bool early), \ 312 bool can_reserve, bool reserve), \
313 TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \ 313 TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \
314 314
315DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); 315DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
316DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate); 316DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 1fadd310ff68..3de69330e6c5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
75dotraplinkage void do_stack_segment(struct pt_regs *, long); 75dotraplinkage void do_stack_segment(struct pt_regs *, long);
76#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
77dotraplinkage void do_double_fault(struct pt_regs *, long); 77dotraplinkage void do_double_fault(struct pt_regs *, long);
78asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
79#endif 78#endif
80dotraplinkage void do_general_protection(struct pt_regs *, long); 79dotraplinkage void do_general_protection(struct pt_regs *, long);
81dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); 80dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
@@ -89,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
89#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
90dotraplinkage void do_iret_error(struct pt_regs *, long); 89dotraplinkage void do_iret_error(struct pt_regs *, long);
91#endif 90#endif
91dotraplinkage void do_mce(struct pt_regs *, long);
92 92
93static inline int get_si_code(unsigned long condition) 93static inline int get_si_code(unsigned long condition)
94{ 94{
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index e9cc6fe1fc6f..1f86e1b0a5cd 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -7,6 +7,9 @@
7#include <asm/ptrace.h> 7#include <asm/ptrace.h>
8#include <asm/stacktrace.h> 8#include <asm/stacktrace.h>
9 9
10#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
11#define IRET_FRAME_SIZE (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
12
10struct unwind_state { 13struct unwind_state {
11 struct stack_info stack_info; 14 struct stack_info stack_info;
12 unsigned long stack_mask; 15 unsigned long stack_mask;
@@ -52,15 +55,28 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
52} 55}
53 56
54#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) 57#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
55static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) 58/*
59 * If 'partial' returns true, only the iret frame registers are valid.
60 */
61static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
62 bool *partial)
56{ 63{
57 if (unwind_done(state)) 64 if (unwind_done(state))
58 return NULL; 65 return NULL;
59 66
67 if (partial) {
68#ifdef CONFIG_UNWINDER_ORC
69 *partial = !state->full_regs;
70#else
71 *partial = false;
72#endif
73 }
74
60 return state->regs; 75 return state->regs;
61} 76}
62#else 77#else
63static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) 78static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
79 bool *partial)
64{ 80{
65 return NULL; 81 return NULL;
66} 82}
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d9a7c659009c..b986b2ca688a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -7,6 +7,7 @@
7 7
8#ifdef CONFIG_X86_VSYSCALL_EMULATION 8#ifdef CONFIG_X86_VSYSCALL_EMULATION
9extern void map_vsyscall(void); 9extern void map_vsyscall(void);
10extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
10 11
11/* 12/*
12 * Called on instruction fetch fault in vsyscall page. 13 * Called on instruction fetch fault in vsyscall page.
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7cb282e9e587..bfd882617613 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/smap.h> 46#include <asm/smap.h>
47#include <asm/nospec-branch.h>
47 48
48#include <xen/interface/xen.h> 49#include <xen/interface/xen.h>
49#include <xen/interface/sched.h> 50#include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
217 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 218 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
218 219
219 stac(); 220 stac();
220 asm volatile("call *%[call]" 221 asm volatile(CALL_NOSPEC
221 : __HYPERCALL_5PARAM 222 : __HYPERCALL_5PARAM
222 : [call] "a" (&hypercall_page[call]) 223 : [thunk_target] "a" (&hypercall_page[call])
223 : __HYPERCALL_CLOBBER5); 224 : __HYPERCALL_CLOBBER5);
224 clac(); 225 clac();
225 226
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index da1489cb64dc..1e901e421f2d 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
4generated-y += unistd_32.h 5generated-y += unistd_32.h
5generated-y += unistd_64.h 6generated-y += unistd_64.h
6generated-y += unistd_x32.h 7generated-y += unistd_x32.h
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 7e1e730396ae..bcba3c643e63 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -78,7 +78,12 @@
78#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) 78#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
79#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ 79#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
80#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) 80#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
81#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ 81
82#define X86_CR3_PCID_BITS 12
83#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL))
84
85#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
86#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
82 87
83/* 88/*
84 * Intel CPU features in CR4 89 * Intel CPU features in CR4
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 81bb565f4497..7e2baf7304ae 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n
29KASAN_SANITIZE_paravirt.o := n 29KASAN_SANITIZE_paravirt.o := n
30 30
31OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y 31OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
32OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
33OBJECT_FILES_NON_STANDARD_test_nx.o := y 32OBJECT_FILES_NON_STANDARD_test_nx.o := y
34OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y 33OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
35 34
35ifdef CONFIG_FRAME_POINTER
36OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
37endif
38
36# If instrumentation of this dir is enabled, boot hangs during first second. 39# If instrumentation of this dir is enabled, boot hangs during first second.
37# Probably could be more selective here, but note that files related to irqs, 40# Probably could be more selective here, but note that files related to irqs,
38# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to 41# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index dbaf14d69ebd..4817d743c263 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -344,9 +344,12 @@ done:
344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
345{ 345{
346 unsigned long flags; 346 unsigned long flags;
347 int i;
347 348
348 if (instr[0] != 0x90) 349 for (i = 0; i < a->padlen; i++) {
349 return; 350 if (instr[i] != 0x90)
351 return;
352 }
350 353
351 local_irq_save(flags); 354 local_irq_save(flags);
352 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 355 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 6e272f3ea984..25ddf02598d2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1286,6 +1286,55 @@ static int __init apic_intr_mode_select(void)
1286 return APIC_SYMMETRIC_IO; 1286 return APIC_SYMMETRIC_IO;
1287} 1287}
1288 1288
1289/*
1290 * An initial setup of the virtual wire mode.
1291 */
1292void __init init_bsp_APIC(void)
1293{
1294 unsigned int value;
1295
1296 /*
1297 * Don't do the setup now if we have a SMP BIOS as the
1298 * through-I/O-APIC virtual wire mode might be active.
1299 */
1300 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1301 return;
1302
1303 /*
1304 * Do not trust the local APIC being empty at bootup.
1305 */
1306 clear_local_APIC();
1307
1308 /*
1309 * Enable APIC.
1310 */
1311 value = apic_read(APIC_SPIV);
1312 value &= ~APIC_VECTOR_MASK;
1313 value |= APIC_SPIV_APIC_ENABLED;
1314
1315#ifdef CONFIG_X86_32
1316 /* This bit is reserved on P4/Xeon and should be cleared */
1317 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1318 (boot_cpu_data.x86 == 15))
1319 value &= ~APIC_SPIV_FOCUS_DISABLED;
1320 else
1321#endif
1322 value |= APIC_SPIV_FOCUS_DISABLED;
1323 value |= SPURIOUS_APIC_VECTOR;
1324 apic_write(APIC_SPIV, value);
1325
1326 /*
1327 * Set up the virtual wire mode.
1328 */
1329 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1330 value = APIC_DM_NMI;
1331 if (!lapic_is_integrated()) /* 82489DX */
1332 value |= APIC_LVT_LEVEL_TRIGGER;
1333 if (apic_extnmi == APIC_EXTNMI_NONE)
1334 value |= APIC_LVT_MASKED;
1335 apic_write(APIC_LVT1, value);
1336}
1337
1289/* Init the interrupt delivery mode for the BSP */ 1338/* Init the interrupt delivery mode for the BSP */
1290void __init apic_intr_mode_init(void) 1339void __init apic_intr_mode_init(void)
1291{ 1340{
@@ -2626,11 +2675,13 @@ static int __init apic_set_verbosity(char *arg)
2626 apic_verbosity = APIC_DEBUG; 2675 apic_verbosity = APIC_DEBUG;
2627 else if (strcmp("verbose", arg) == 0) 2676 else if (strcmp("verbose", arg) == 0)
2628 apic_verbosity = APIC_VERBOSE; 2677 apic_verbosity = APIC_VERBOSE;
2678#ifdef CONFIG_X86_64
2629 else { 2679 else {
2630 pr_warning("APIC Verbosity level %s not recognised" 2680 pr_warning("APIC Verbosity level %s not recognised"
2631 " use apic=verbose or apic=debug\n", arg); 2681 " use apic=verbose or apic=debug\n", arg);
2632 return -EINVAL; 2682 return -EINVAL;
2633 } 2683 }
2684#endif
2634 2685
2635 return 0; 2686 return 0;
2636} 2687}
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index aa85690e9b64..25a87028cb3f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -151,7 +151,7 @@ static struct apic apic_flat __ro_after_init = {
151 .apic_id_valid = default_apic_id_valid, 151 .apic_id_valid = default_apic_id_valid,
152 .apic_id_registered = flat_apic_id_registered, 152 .apic_id_registered = flat_apic_id_registered,
153 153
154 .irq_delivery_mode = dest_LowestPrio, 154 .irq_delivery_mode = dest_Fixed,
155 .irq_dest_mode = 1, /* logical */ 155 .irq_dest_mode = 1, /* logical */
156 156
157 .disable_esr = 0, 157 .disable_esr = 0,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 7b659c4480c9..5078b5ce63a7 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -110,7 +110,7 @@ struct apic apic_noop __ro_after_init = {
110 .apic_id_valid = default_apic_id_valid, 110 .apic_id_valid = default_apic_id_valid,
111 .apic_id_registered = noop_apic_id_registered, 111 .apic_id_registered = noop_apic_id_registered,
112 112
113 .irq_delivery_mode = dest_LowestPrio, 113 .irq_delivery_mode = dest_Fixed,
114 /* logical delivery broadcast to all CPUs: */ 114 /* logical delivery broadcast to all CPUs: */
115 .irq_dest_mode = 1, 115 .irq_dest_mode = 1,
116 116
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 201579dc5242..8a7963421460 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2988,7 +2988,7 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2988} 2988}
2989 2989
2990int mp_irqdomain_activate(struct irq_domain *domain, 2990int mp_irqdomain_activate(struct irq_domain *domain,
2991 struct irq_data *irq_data, bool early) 2991 struct irq_data *irq_data, bool reserve)
2992{ 2992{
2993 unsigned long flags; 2993 unsigned long flags;
2994 2994
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 9b18be764422..ce503c99f5c4 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -39,17 +39,13 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
39 ((apic->irq_dest_mode == 0) ? 39 ((apic->irq_dest_mode == 0) ?
40 MSI_ADDR_DEST_MODE_PHYSICAL : 40 MSI_ADDR_DEST_MODE_PHYSICAL :
41 MSI_ADDR_DEST_MODE_LOGICAL) | 41 MSI_ADDR_DEST_MODE_LOGICAL) |
42 ((apic->irq_delivery_mode != dest_LowestPrio) ? 42 MSI_ADDR_REDIRECTION_CPU |
43 MSI_ADDR_REDIRECTION_CPU :
44 MSI_ADDR_REDIRECTION_LOWPRI) |
45 MSI_ADDR_DEST_ID(cfg->dest_apicid); 43 MSI_ADDR_DEST_ID(cfg->dest_apicid);
46 44
47 msg->data = 45 msg->data =
48 MSI_DATA_TRIGGER_EDGE | 46 MSI_DATA_TRIGGER_EDGE |
49 MSI_DATA_LEVEL_ASSERT | 47 MSI_DATA_LEVEL_ASSERT |
50 ((apic->irq_delivery_mode != dest_LowestPrio) ? 48 MSI_DATA_DELIVERY_FIXED |
51 MSI_DATA_DELIVERY_FIXED :
52 MSI_DATA_DELIVERY_LOWPRI) |
53 MSI_DATA_VECTOR(cfg->vector); 49 MSI_DATA_VECTOR(cfg->vector);
54} 50}
55 51
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index fa22017de806..02e8acb134f8 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -105,7 +105,7 @@ static struct apic apic_default __ro_after_init = {
105 .apic_id_valid = default_apic_id_valid, 105 .apic_id_valid = default_apic_id_valid,
106 .apic_id_registered = default_apic_id_registered, 106 .apic_id_registered = default_apic_id_registered,
107 107
108 .irq_delivery_mode = dest_LowestPrio, 108 .irq_delivery_mode = dest_Fixed,
109 /* logical delivery broadcast to all CPUs: */ 109 /* logical delivery broadcast to all CPUs: */
110 .irq_dest_mode = 1, 110 .irq_dest_mode = 1,
111 111
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6a823a25eaff..3cc471beb50b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -184,6 +184,7 @@ static void reserve_irq_vector_locked(struct irq_data *irqd)
184 irq_matrix_reserve(vector_matrix); 184 irq_matrix_reserve(vector_matrix);
185 apicd->can_reserve = true; 185 apicd->can_reserve = true;
186 apicd->has_reserved = true; 186 apicd->has_reserved = true;
187 irqd_set_can_reserve(irqd);
187 trace_vector_reserve(irqd->irq, 0); 188 trace_vector_reserve(irqd->irq, 0);
188 vector_assign_managed_shutdown(irqd); 189 vector_assign_managed_shutdown(irqd);
189} 190}
@@ -368,8 +369,18 @@ static int activate_reserved(struct irq_data *irqd)
368 int ret; 369 int ret;
369 370
370 ret = assign_irq_vector_any_locked(irqd); 371 ret = assign_irq_vector_any_locked(irqd);
371 if (!ret) 372 if (!ret) {
372 apicd->has_reserved = false; 373 apicd->has_reserved = false;
374 /*
375 * Core might have disabled reservation mode after
376 * allocating the irq descriptor. Ideally this should
377 * happen before allocation time, but that would require
378 * completely convoluted ways of transporting that
379 * information.
380 */
381 if (!irqd_can_reserve(irqd))
382 apicd->can_reserve = false;
383 }
373 return ret; 384 return ret;
374} 385}
375 386
@@ -398,21 +409,21 @@ static int activate_managed(struct irq_data *irqd)
398} 409}
399 410
400static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, 411static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
401 bool early) 412 bool reserve)
402{ 413{
403 struct apic_chip_data *apicd = apic_chip_data(irqd); 414 struct apic_chip_data *apicd = apic_chip_data(irqd);
404 unsigned long flags; 415 unsigned long flags;
405 int ret = 0; 416 int ret = 0;
406 417
407 trace_vector_activate(irqd->irq, apicd->is_managed, 418 trace_vector_activate(irqd->irq, apicd->is_managed,
408 apicd->can_reserve, early); 419 apicd->can_reserve, reserve);
409 420
410 /* Nothing to do for fixed assigned vectors */ 421 /* Nothing to do for fixed assigned vectors */
411 if (!apicd->can_reserve && !apicd->is_managed) 422 if (!apicd->can_reserve && !apicd->is_managed)
412 return 0; 423 return 0;
413 424
414 raw_spin_lock_irqsave(&vector_lock, flags); 425 raw_spin_lock_irqsave(&vector_lock, flags);
415 if (early || irqd_is_managed_and_shutdown(irqd)) 426 if (reserve || irqd_is_managed_and_shutdown(irqd))
416 vector_assign_managed_shutdown(irqd); 427 vector_assign_managed_shutdown(irqd);
417 else if (apicd->is_managed) 428 else if (apicd->is_managed)
418 ret = activate_managed(irqd); 429 ret = activate_managed(irqd);
@@ -478,6 +489,7 @@ static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
478 } else { 489 } else {
479 /* Release the vector */ 490 /* Release the vector */
480 apicd->can_reserve = true; 491 apicd->can_reserve = true;
492 irqd_set_can_reserve(irqd);
481 clear_irq_vector(irqd); 493 clear_irq_vector(irqd);
482 realloc = true; 494 realloc = true;
483 } 495 }
@@ -530,20 +542,23 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
530 542
531 err = assign_irq_vector_policy(irqd, info); 543 err = assign_irq_vector_policy(irqd, info);
532 trace_vector_setup(virq + i, false, err); 544 trace_vector_setup(virq + i, false, err);
533 if (err) 545 if (err) {
546 irqd->chip_data = NULL;
547 free_apic_chip_data(apicd);
534 goto error; 548 goto error;
549 }
535 } 550 }
536 551
537 return 0; 552 return 0;
538 553
539error: 554error:
540 x86_vector_free_irqs(domain, virq, i + 1); 555 x86_vector_free_irqs(domain, virq, i);
541 return err; 556 return err;
542} 557}
543 558
544#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 559#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
545void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, 560static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
546 struct irq_data *irqd, int ind) 561 struct irq_data *irqd, int ind)
547{ 562{
548 unsigned int cpu, vector, prev_cpu, prev_vector; 563 unsigned int cpu, vector, prev_cpu, prev_vector;
549 struct apic_chip_data *apicd; 564 struct apic_chip_data *apicd;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 622f13ca8a94..8b04234e010b 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -184,7 +184,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
184 .apic_id_valid = x2apic_apic_id_valid, 184 .apic_id_valid = x2apic_apic_id_valid,
185 .apic_id_registered = x2apic_apic_id_registered, 185 .apic_id_registered = x2apic_apic_id_registered,
186 186
187 .irq_delivery_mode = dest_LowestPrio, 187 .irq_delivery_mode = dest_Fixed,
188 .irq_dest_mode = 1, /* logical */ 188 .irq_dest_mode = 1, /* logical */
189 189
190 .disable_esr = 0, 190 .disable_esr = 0,
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 8ea78275480d..76417a9aab73 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -17,6 +17,7 @@
17#include <asm/sigframe.h> 17#include <asm/sigframe.h>
18#include <asm/bootparam.h> 18#include <asm/bootparam.h>
19#include <asm/suspend.h> 19#include <asm/suspend.h>
20#include <asm/tlbflush.h>
20 21
21#ifdef CONFIG_XEN 22#ifdef CONFIG_XEN
22#include <xen/interface/xen.h> 23#include <xen/interface/xen.h>
@@ -93,4 +94,13 @@ void common(void) {
93 94
94 BLANK(); 95 BLANK();
95 DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); 96 DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
97
98 /* TLB state for the entry code */
99 OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
100
101 /* Layout info for cpu_entry_area */
102 OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
103 OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
104 OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
105 DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
96} 106}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index dedf428b20b6..fa1261eefa16 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -47,13 +47,8 @@ void foo(void)
47 BLANK(); 47 BLANK();
48 48
49 /* Offset from the sysenter stack to tss.sp0 */ 49 /* Offset from the sysenter stack to tss.sp0 */
50 DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - 50 DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
51 offsetofend(struct tss_struct, SYSENTER_stack)); 51 offsetofend(struct cpu_entry_area, entry_stack_page.stack));
52
53 /* Offset from cpu_tss to SYSENTER_stack */
54 OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
55 /* Size of SYSENTER_stack */
56 DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
57 52
58#ifdef CONFIG_CC_STACKPROTECTOR 53#ifdef CONFIG_CC_STACKPROTECTOR
59 BLANK(); 54 BLANK();
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 630212fa9b9d..bf51e51d808d 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -23,6 +23,9 @@ int main(void)
23#ifdef CONFIG_PARAVIRT 23#ifdef CONFIG_PARAVIRT
24 OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); 24 OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
25 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); 25 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
26#ifdef CONFIG_DEBUG_ENTRY
27 OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
28#endif
26 BLANK(); 29 BLANK();
27#endif 30#endif
28 31
@@ -63,6 +66,7 @@ int main(void)
63 66
64 OFFSET(TSS_ist, tss_struct, x86_tss.ist); 67 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
65 OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); 68 OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
69 OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
66 BLANK(); 70 BLANK();
67 71
68#ifdef CONFIG_CC_STACKPROTECTOR 72#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d58184b7cd44..ea831c858195 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
804 case 0x17: init_amd_zn(c); break; 804 case 0x17: init_amd_zn(c); break;
805 } 805 }
806 806
807 /* Enable workaround for FXSAVE leak */ 807 /*
808 if (c->x86 >= 6) 808 * Enable workaround for FXSAVE leak on CPUs
809 * without a XSaveErPtr feature
810 */
811 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
809 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 812 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
810 813
811 cpu_detect_cache_sizes(c); 814 cpu_detect_cache_sizes(c);
@@ -826,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
826 set_cpu_cap(c, X86_FEATURE_K8); 829 set_cpu_cap(c, X86_FEATURE_K8);
827 830
828 if (cpu_has(c, X86_FEATURE_XMM2)) { 831 if (cpu_has(c, X86_FEATURE_XMM2)) {
829 /* MFENCE stops RDTSC speculation */ 832 unsigned long long val;
830 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 833 int ret;
834
835 /*
836 * A serializing LFENCE has less overhead than MFENCE, so
837 * use it for execution serialization. On families which
838 * don't have that MSR, LFENCE is already serializing.
839 * msr_set_bit() uses the safe accessors, too, even if the MSR
840 * is not present.
841 */
842 msr_set_bit(MSR_F10H_DECFG,
843 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
844
845 /*
846 * Verify that the MSR write was successful (could be running
847 * under a hypervisor) and only then assume that LFENCE is
848 * serializing.
849 */
850 ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
851 if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
852 /* A serializing LFENCE stops RDTSC speculation */
853 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
854 } else {
855 /* MFENCE stops RDTSC speculation */
856 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
857 }
831 } 858 }
832 859
833 /* 860 /*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b2424c9b0..390b3dc3d438 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,10 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/cpu.h>
14
15#include <asm/nospec-branch.h>
16#include <asm/cmdline.h>
13#include <asm/bugs.h> 17#include <asm/bugs.h>
14#include <asm/processor.h> 18#include <asm/processor.h>
15#include <asm/processor-flags.h> 19#include <asm/processor-flags.h>
@@ -19,6 +23,9 @@
19#include <asm/alternative.h> 23#include <asm/alternative.h>
20#include <asm/pgtable.h> 24#include <asm/pgtable.h>
21#include <asm/set_memory.h> 25#include <asm/set_memory.h>
26#include <asm/intel-family.h>
27
28static void __init spectre_v2_select_mitigation(void);
22 29
23void __init check_bugs(void) 30void __init check_bugs(void)
24{ 31{
@@ -29,6 +36,9 @@ void __init check_bugs(void)
29 print_cpu_info(&boot_cpu_data); 36 print_cpu_info(&boot_cpu_data);
30 } 37 }
31 38
39 /* Select the proper spectre mitigation before patching alternatives */
40 spectre_v2_select_mitigation();
41
32#ifdef CONFIG_X86_32 42#ifdef CONFIG_X86_32
33 /* 43 /*
34 * Check whether we are able to run this kernel safely on SMP. 44 * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +70,214 @@ void __init check_bugs(void)
60 set_memory_4k((unsigned long)__va(0), 1); 70 set_memory_4k((unsigned long)__va(0), 1);
61#endif 71#endif
62} 72}
73
74/* The kernel command line selection */
75enum spectre_v2_mitigation_cmd {
76 SPECTRE_V2_CMD_NONE,
77 SPECTRE_V2_CMD_AUTO,
78 SPECTRE_V2_CMD_FORCE,
79 SPECTRE_V2_CMD_RETPOLINE,
80 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
81 SPECTRE_V2_CMD_RETPOLINE_AMD,
82};
83
84static const char *spectre_v2_strings[] = {
85 [SPECTRE_V2_NONE] = "Vulnerable",
86 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
87 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
88 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
89 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
90};
91
92#undef pr_fmt
93#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
94
95static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
96
97static void __init spec2_print_if_insecure(const char *reason)
98{
99 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
100 pr_info("%s\n", reason);
101}
102
103static void __init spec2_print_if_secure(const char *reason)
104{
105 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
106 pr_info("%s\n", reason);
107}
108
109static inline bool retp_compiler(void)
110{
111 return __is_defined(RETPOLINE);
112}
113
114static inline bool match_option(const char *arg, int arglen, const char *opt)
115{
116 int len = strlen(opt);
117
118 return len == arglen && !strncmp(arg, opt, len);
119}
120
121static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
122{
123 char arg[20];
124 int ret;
125
126 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
127 sizeof(arg));
128 if (ret > 0) {
129 if (match_option(arg, ret, "off")) {
130 goto disable;
131 } else if (match_option(arg, ret, "on")) {
132 spec2_print_if_secure("force enabled on command line.");
133 return SPECTRE_V2_CMD_FORCE;
134 } else if (match_option(arg, ret, "retpoline")) {
135 spec2_print_if_insecure("retpoline selected on command line.");
136 return SPECTRE_V2_CMD_RETPOLINE;
137 } else if (match_option(arg, ret, "retpoline,amd")) {
138 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
139 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
140 return SPECTRE_V2_CMD_AUTO;
141 }
142 spec2_print_if_insecure("AMD retpoline selected on command line.");
143 return SPECTRE_V2_CMD_RETPOLINE_AMD;
144 } else if (match_option(arg, ret, "retpoline,generic")) {
145 spec2_print_if_insecure("generic retpoline selected on command line.");
146 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
147 } else if (match_option(arg, ret, "auto")) {
148 return SPECTRE_V2_CMD_AUTO;
149 }
150 }
151
152 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
153 return SPECTRE_V2_CMD_AUTO;
154disable:
155 spec2_print_if_insecure("disabled on command line.");
156 return SPECTRE_V2_CMD_NONE;
157}
158
159/* Check for Skylake-like CPUs (for RSB handling) */
160static bool __init is_skylake_era(void)
161{
162 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
163 boot_cpu_data.x86 == 6) {
164 switch (boot_cpu_data.x86_model) {
165 case INTEL_FAM6_SKYLAKE_MOBILE:
166 case INTEL_FAM6_SKYLAKE_DESKTOP:
167 case INTEL_FAM6_SKYLAKE_X:
168 case INTEL_FAM6_KABYLAKE_MOBILE:
169 case INTEL_FAM6_KABYLAKE_DESKTOP:
170 return true;
171 }
172 }
173 return false;
174}
175
176static void __init spectre_v2_select_mitigation(void)
177{
178 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
179 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
180
181 /*
182 * If the CPU is not affected and the command line mode is NONE or AUTO
183 * then nothing to do.
184 */
185 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
186 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
187 return;
188
189 switch (cmd) {
190 case SPECTRE_V2_CMD_NONE:
191 return;
192
193 case SPECTRE_V2_CMD_FORCE:
194 /* FALLTRHU */
195 case SPECTRE_V2_CMD_AUTO:
196 goto retpoline_auto;
197
198 case SPECTRE_V2_CMD_RETPOLINE_AMD:
199 if (IS_ENABLED(CONFIG_RETPOLINE))
200 goto retpoline_amd;
201 break;
202 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
203 if (IS_ENABLED(CONFIG_RETPOLINE))
204 goto retpoline_generic;
205 break;
206 case SPECTRE_V2_CMD_RETPOLINE:
207 if (IS_ENABLED(CONFIG_RETPOLINE))
208 goto retpoline_auto;
209 break;
210 }
211 pr_err("kernel not compiled with retpoline; no mitigation available!");
212 return;
213
214retpoline_auto:
215 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
216 retpoline_amd:
217 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
218 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
219 goto retpoline_generic;
220 }
221 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
222 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
223 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
224 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
225 } else {
226 retpoline_generic:
227 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
228 SPECTRE_V2_RETPOLINE_MINIMAL;
229 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
230 }
231
232 spectre_v2_enabled = mode;
233 pr_info("%s\n", spectre_v2_strings[mode]);
234
235 /*
236 * If neither SMEP or KPTI are available, there is a risk of
237 * hitting userspace addresses in the RSB after a context switch
238 * from a shallow call stack to a deeper one. To prevent this fill
239 * the entire RSB, even when using IBRS.
240 *
241 * Skylake era CPUs have a separate issue with *underflow* of the
242 * RSB, when they will predict 'ret' targets from the generic BTB.
243 * The proper mitigation for this is IBRS. If IBRS is not supported
244 * or deactivated in favour of retpolines the RSB fill on context
245 * switch is required.
246 */
247 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
248 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
249 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
250 pr_info("Filling RSB on context switch\n");
251 }
252}
253
254#undef pr_fmt
255
256#ifdef CONFIG_SYSFS
257ssize_t cpu_show_meltdown(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
261 return sprintf(buf, "Not affected\n");
262 if (boot_cpu_has(X86_FEATURE_PTI))
263 return sprintf(buf, "Mitigation: PTI\n");
264 return sprintf(buf, "Vulnerable\n");
265}
266
267ssize_t cpu_show_spectre_v1(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
271 return sprintf(buf, "Not affected\n");
272 return sprintf(buf, "Vulnerable\n");
273}
274
275ssize_t cpu_show_spectre_v2(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
279 return sprintf(buf, "Not affected\n");
280
281 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
282}
283#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index fa998ca8aa5a..ef29ad001991 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -476,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
476 return NULL; /* Not found */ 476 return NULL; /* Not found */
477} 477}
478 478
479__u32 cpu_caps_cleared[NCAPINTS]; 479__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
480__u32 cpu_caps_set[NCAPINTS]; 480__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
481 481
482void load_percpu_segment(int cpu) 482void load_percpu_segment(int cpu)
483{ 483{
@@ -490,28 +490,23 @@ void load_percpu_segment(int cpu)
490 load_stack_canary_segment(); 490 load_stack_canary_segment();
491} 491}
492 492
493/* Setup the fixmap mapping only once per-processor */ 493#ifdef CONFIG_X86_32
494static inline void setup_fixmap_gdt(int cpu) 494/* The 32-bit entry code needs to find cpu_entry_area. */
495{ 495DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
496#ifdef CONFIG_X86_64
497 /* On 64-bit systems, we use a read-only fixmap GDT. */
498 pgprot_t prot = PAGE_KERNEL_RO;
499#else
500 /*
501 * On native 32-bit systems, the GDT cannot be read-only because
502 * our double fault handler uses a task gate, and entering through
503 * a task gate needs to change an available TSS to busy. If the GDT
504 * is read-only, that will triple fault.
505 *
506 * On Xen PV, the GDT must be read-only because the hypervisor requires
507 * it.
508 */
509 pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
510 PAGE_KERNEL_RO : PAGE_KERNEL;
511#endif 496#endif
512 497
513 __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot); 498#ifdef CONFIG_X86_64
514} 499/*
500 * Special IST stacks which the CPU switches to when it calls
501 * an IST-marked descriptor entry. Up to 7 stacks (hardware
502 * limit), all of them are 4K, except the debug stack which
503 * is 8K.
504 */
505static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
506 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
507 [DEBUG_STACK - 1] = DEBUG_STKSZ
508};
509#endif
515 510
516/* Load the original GDT from the per-cpu structure */ 511/* Load the original GDT from the per-cpu structure */
517void load_direct_gdt(int cpu) 512void load_direct_gdt(int cpu)
@@ -747,7 +742,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
747{ 742{
748 int i; 743 int i;
749 744
750 for (i = 0; i < NCAPINTS; i++) { 745 for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
751 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 746 c->x86_capability[i] &= ~cpu_caps_cleared[i];
752 c->x86_capability[i] |= cpu_caps_set[i]; 747 c->x86_capability[i] |= cpu_caps_set[i];
753 } 748 }
@@ -927,6 +922,13 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
927 } 922 }
928 923
929 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 924 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
925
926 if (c->x86_vendor != X86_VENDOR_AMD)
927 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
928
929 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
930 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
931
930 fpu__init_system(c); 932 fpu__init_system(c);
931 933
932#ifdef CONFIG_X86_32 934#ifdef CONFIG_X86_32
@@ -1250,7 +1252,7 @@ void enable_sep_cpu(void)
1250 return; 1252 return;
1251 1253
1252 cpu = get_cpu(); 1254 cpu = get_cpu();
1253 tss = &per_cpu(cpu_tss, cpu); 1255 tss = &per_cpu(cpu_tss_rw, cpu);
1254 1256
1255 /* 1257 /*
1256 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1258 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1259,11 +1261,7 @@ void enable_sep_cpu(void)
1259 1261
1260 tss->x86_tss.ss1 = __KERNEL_CS; 1262 tss->x86_tss.ss1 = __KERNEL_CS;
1261 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1263 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1262 1264 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1263 wrmsr(MSR_IA32_SYSENTER_ESP,
1264 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1265 0);
1266
1267 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1265 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1268 1266
1269 put_cpu(); 1267 put_cpu();
@@ -1357,25 +1355,22 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1357DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1355DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1358EXPORT_PER_CPU_SYMBOL(__preempt_count); 1356EXPORT_PER_CPU_SYMBOL(__preempt_count);
1359 1357
1360/*
1361 * Special IST stacks which the CPU switches to when it calls
1362 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1363 * limit), all of them are 4K, except the debug stack which
1364 * is 8K.
1365 */
1366static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1367 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1368 [DEBUG_STACK - 1] = DEBUG_STKSZ
1369};
1370
1371static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1372 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1373
1374/* May not be marked __init: used by software suspend */ 1358/* May not be marked __init: used by software suspend */
1375void syscall_init(void) 1359void syscall_init(void)
1376{ 1360{
1361 extern char _entry_trampoline[];
1362 extern char entry_SYSCALL_64_trampoline[];
1363
1364 int cpu = smp_processor_id();
1365 unsigned long SYSCALL64_entry_trampoline =
1366 (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
1367 (entry_SYSCALL_64_trampoline - _entry_trampoline);
1368
1377 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1369 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1378 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1370 if (static_cpu_has(X86_FEATURE_PTI))
1371 wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
1372 else
1373 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1379 1374
1380#ifdef CONFIG_IA32_EMULATION 1375#ifdef CONFIG_IA32_EMULATION
1381 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1376 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1386,7 +1381,7 @@ void syscall_init(void)
1386 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1381 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1387 */ 1382 */
1388 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1383 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1389 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1384 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
1390 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1385 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1391#else 1386#else
1392 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1387 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1530,7 +1525,7 @@ void cpu_init(void)
1530 if (cpu) 1525 if (cpu)
1531 load_ucode_ap(); 1526 load_ucode_ap();
1532 1527
1533 t = &per_cpu(cpu_tss, cpu); 1528 t = &per_cpu(cpu_tss_rw, cpu);
1534 oist = &per_cpu(orig_ist, cpu); 1529 oist = &per_cpu(orig_ist, cpu);
1535 1530
1536#ifdef CONFIG_NUMA 1531#ifdef CONFIG_NUMA
@@ -1569,7 +1564,7 @@ void cpu_init(void)
1569 * set up and load the per-CPU TSS 1564 * set up and load the per-CPU TSS
1570 */ 1565 */
1571 if (!oist->ist[0]) { 1566 if (!oist->ist[0]) {
1572 char *estacks = per_cpu(exception_stacks, cpu); 1567 char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
1573 1568
1574 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1569 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1575 estacks += exception_stack_sizes[v]; 1570 estacks += exception_stack_sizes[v];
@@ -1580,7 +1575,7 @@ void cpu_init(void)
1580 } 1575 }
1581 } 1576 }
1582 1577
1583 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1578 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1584 1579
1585 /* 1580 /*
1586 * <= is required because the CPU will access up to 1581 * <= is required because the CPU will access up to
@@ -1596,11 +1591,12 @@ void cpu_init(void)
1596 enter_lazy_tlb(&init_mm, me); 1591 enter_lazy_tlb(&init_mm, me);
1597 1592
1598 /* 1593 /*
1599 * Initialize the TSS. Don't bother initializing sp0, as the initial 1594 * Initialize the TSS. sp0 points to the entry trampoline stack
1600 * task never enters user mode. 1595 * regardless of what task is running.
1601 */ 1596 */
1602 set_tss_desc(cpu, t); 1597 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1603 load_TR_desc(); 1598 load_TR_desc();
1599 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1604 1600
1605 load_mm_ldt(&init_mm); 1601 load_mm_ldt(&init_mm);
1606 1602
@@ -1612,7 +1608,6 @@ void cpu_init(void)
1612 if (is_uv_system()) 1608 if (is_uv_system())
1613 uv_cpu_init(); 1609 uv_cpu_init();
1614 1610
1615 setup_fixmap_gdt(cpu);
1616 load_fixmap_gdt(cpu); 1611 load_fixmap_gdt(cpu);
1617} 1612}
1618 1613
@@ -1622,7 +1617,7 @@ void cpu_init(void)
1622{ 1617{
1623 int cpu = smp_processor_id(); 1618 int cpu = smp_processor_id();
1624 struct task_struct *curr = current; 1619 struct task_struct *curr = current;
1625 struct tss_struct *t = &per_cpu(cpu_tss, cpu); 1620 struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
1626 1621
1627 wait_for_master_cpu(cpu); 1622 wait_for_master_cpu(cpu);
1628 1623
@@ -1657,12 +1652,12 @@ void cpu_init(void)
1657 * Initialize the TSS. Don't bother initializing sp0, as the initial 1652 * Initialize the TSS. Don't bother initializing sp0, as the initial
1658 * task never enters user mode. 1653 * task never enters user mode.
1659 */ 1654 */
1660 set_tss_desc(cpu, t); 1655 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1661 load_TR_desc(); 1656 load_TR_desc();
1662 1657
1663 load_mm_ldt(&init_mm); 1658 load_mm_ldt(&init_mm);
1664 1659
1665 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1660 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1666 1661
1667#ifdef CONFIG_DOUBLEFAULT 1662#ifdef CONFIG_DOUBLEFAULT
1668 /* Set up doublefault TSS pointer in the GDT */ 1663 /* Set up doublefault TSS pointer in the GDT */
@@ -1674,7 +1669,6 @@ void cpu_init(void)
1674 1669
1675 fpu__init_cpu(); 1670 fpu__init_cpu();
1676 1671
1677 setup_fixmap_gdt(cpu);
1678 load_fixmap_gdt(cpu); 1672 load_fixmap_gdt(cpu);
1679} 1673}
1680#endif 1674#endif
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 88dcf8479013..99442370de40 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
525 */ 525 */
526 if (static_branch_unlikely(&rdt_mon_enable_key)) 526 if (static_branch_unlikely(&rdt_mon_enable_key))
527 rmdir_mondata_subdir_allrdtgrp(r, d->id); 527 rmdir_mondata_subdir_allrdtgrp(r, d->id);
528 kfree(d->ctrl_val);
529 kfree(d->rmid_busy_llc);
530 kfree(d->mbm_total);
531 kfree(d->mbm_local);
532 list_del(&d->list); 528 list_del(&d->list);
533 if (is_mbm_enabled()) 529 if (is_mbm_enabled())
534 cancel_delayed_work(&d->mbm_over); 530 cancel_delayed_work(&d->mbm_over);
@@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
545 cancel_delayed_work(&d->cqm_limbo); 541 cancel_delayed_work(&d->cqm_limbo);
546 } 542 }
547 543
544 kfree(d->ctrl_val);
545 kfree(d->rmid_busy_llc);
546 kfree(d->mbm_total);
547 kfree(d->mbm_local);
548 kfree(d); 548 kfree(d);
549 return; 549 return;
550 } 550 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b1d616d08eee..868e412b4f0c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1785void (*machine_check_vector)(struct pt_regs *, long error_code) = 1785void (*machine_check_vector)(struct pt_regs *, long error_code) =
1786 unexpected_machine_check; 1786 unexpected_machine_check;
1787 1787
1788dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
1789{
1790 machine_check_vector(regs, error_code);
1791}
1792
1788/* 1793/*
1789 * Called for each booted CPU to set up machine checks. 1794 * Called for each booted CPU to set up machine checks.
1790 * Must be called with preempt off: 1795 * Must be called with preempt off:
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index c6daec4bdba5..330b8462d426 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
470#define F14H_MPB_MAX_SIZE 1824 470#define F14H_MPB_MAX_SIZE 1824
471#define F15H_MPB_MAX_SIZE 4096 471#define F15H_MPB_MAX_SIZE 4096
472#define F16H_MPB_MAX_SIZE 3458 472#define F16H_MPB_MAX_SIZE 3458
473#define F17H_MPB_MAX_SIZE 3200
473 474
474 switch (family) { 475 switch (family) {
475 case 0x14: 476 case 0x14:
@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
481 case 0x16: 482 case 0x16:
482 max_size = F16H_MPB_MAX_SIZE; 483 max_size = F16H_MPB_MAX_SIZE;
483 break; 484 break;
485 case 0x17:
486 max_size = F17H_MPB_MAX_SIZE;
487 break;
484 default: 488 default:
485 max_size = F1XH_MPB_MAX_SIZE; 489 max_size = F1XH_MPB_MAX_SIZE;
486 break; 490 break;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index c4fa4a85d4cb..e4fc595cd6ea 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
239 break; 239 break;
240 case X86_VENDOR_AMD: 240 case X86_VENDOR_AMD:
241 if (c->x86 >= 0x10) 241 if (c->x86 >= 0x10)
242 return save_microcode_in_initrd_amd(cpuid_eax(1)); 242 ret = save_microcode_in_initrd_amd(cpuid_eax(1));
243 break; 243 break;
244 default: 244 default:
245 break; 245 break;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 7dbcb7adf797..f7c55b0e753a 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
45/* Current microcode patch used in early patching on the APs. */ 45/* Current microcode patch used in early patching on the APs. */
46static struct microcode_intel *intel_ucode_patch; 46static struct microcode_intel *intel_ucode_patch;
47 47
48/* last level cache size per core */
49static int llc_size_per_core;
50
48static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 51static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
49 unsigned int s2, unsigned int p2) 52 unsigned int s2, unsigned int p2)
50{ 53{
@@ -565,15 +568,6 @@ static void print_ucode(struct ucode_cpu_info *uci)
565} 568}
566#else 569#else
567 570
568/*
569 * Flush global tlb. We only do this in x86_64 where paging has been enabled
570 * already and PGE should be enabled as well.
571 */
572static inline void flush_tlb_early(void)
573{
574 __native_flush_tlb_global_irq_disabled();
575}
576
577static inline void print_ucode(struct ucode_cpu_info *uci) 571static inline void print_ucode(struct ucode_cpu_info *uci)
578{ 572{
579 struct microcode_intel *mc; 573 struct microcode_intel *mc;
@@ -602,10 +596,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
602 if (rev != mc->hdr.rev) 596 if (rev != mc->hdr.rev)
603 return -1; 597 return -1;
604 598
605#ifdef CONFIG_X86_64
606 /* Flush global tlb. This is precaution. */
607 flush_tlb_early();
608#endif
609 uci->cpu_sig.rev = rev; 599 uci->cpu_sig.rev = rev;
610 600
611 if (early) 601 if (early)
@@ -923,8 +913,19 @@ static bool is_blacklisted(unsigned int cpu)
923{ 913{
924 struct cpuinfo_x86 *c = &cpu_data(cpu); 914 struct cpuinfo_x86 *c = &cpu_data(cpu);
925 915
926 if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { 916 /*
927 pr_err_once("late loading on model 79 is disabled.\n"); 917 * Late loading on model 79 with microcode revision less than 0x0b000021
918 * and LLC size per core bigger than 2.5MB may result in a system hang.
919 * This behavior is documented in item BDF90, #334165 (Intel Xeon
920 * Processor E7-8800/4800 v4 Product Family).
921 */
922 if (c->x86 == 6 &&
923 c->x86_model == INTEL_FAM6_BROADWELL_X &&
924 c->x86_mask == 0x01 &&
925 llc_size_per_core > 2621440 &&
926 c->microcode < 0x0b000021) {
927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
928 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
928 return true; 929 return true;
929 } 930 }
930 931
@@ -979,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
979 .apply_microcode = apply_microcode_intel, 980 .apply_microcode = apply_microcode_intel,
980}; 981};
981 982
983static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
984{
985 u64 llc_size = c->x86_cache_size * 1024;
986
987 do_div(llc_size, c->x86_max_cores);
988
989 return (int)llc_size;
990}
991
982struct microcode_ops * __init init_intel_microcode(void) 992struct microcode_ops * __init init_intel_microcode(void)
983{ 993{
984 struct cpuinfo_x86 *c = &boot_cpu_data; 994 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -989,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
989 return NULL; 999 return NULL;
990 } 1000 }
991 1001
1002 llc_size_per_core = calc_llc_size_per_core(c);
1003
992 return &microcode_intel_ops; 1004 return &microcode_intel_ops;
993} 1005}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 05459ad3db46..d0e69769abfd 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,7 +21,6 @@ struct cpuid_bit {
21static const struct cpuid_bit cpuid_bits[] = { 21static const struct cpuid_bit cpuid_bits[] = {
22 { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, 22 { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
23 { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, 23 { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
24 { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
25 { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, 24 { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
26 { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, 25 { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
27 { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, 26 { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index 0e662c55ae90..0b8cedb20d6d 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
50 cpu_relax(); 50 cpu_relax();
51} 51}
52 52
53struct tss_struct doublefault_tss __cacheline_aligned = { 53struct x86_hw_tss doublefault_tss __cacheline_aligned = {
54 .x86_tss = { 54 .sp0 = STACK_START,
55 .sp0 = STACK_START, 55 .ss0 = __KERNEL_DS,
56 .ss0 = __KERNEL_DS, 56 .ldt = 0,
57 .ldt = 0, 57 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
58 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, 58
59 59 .ip = (unsigned long) doublefault_fn,
60 .ip = (unsigned long) doublefault_fn, 60 /* 0x2 bit is always set */
61 /* 0x2 bit is always set */ 61 .flags = X86_EFLAGS_SF | 0x2,
62 .flags = X86_EFLAGS_SF | 0x2, 62 .sp = STACK_START,
63 .sp = STACK_START, 63 .es = __USER_DS,
64 .es = __USER_DS, 64 .cs = __KERNEL_CS,
65 .cs = __KERNEL_CS, 65 .ss = __KERNEL_DS,
66 .ss = __KERNEL_DS, 66 .ds = __USER_DS,
67 .ds = __USER_DS, 67 .fs = __KERNEL_PERCPU,
68 .fs = __KERNEL_PERCPU, 68
69 69 .__cr3 = __pa_nodebug(swapper_pg_dir),
70 .__cr3 = __pa_nodebug(swapper_pg_dir),
71 }
72}; 70};
73 71
74/* dummy for do_double_fault() call */ 72/* dummy for do_double_fault() call */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index f13b4c00a5de..afbecff161d1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -18,6 +18,7 @@
18#include <linux/nmi.h> 18#include <linux/nmi.h>
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20 20
21#include <asm/cpu_entry_area.h>
21#include <asm/stacktrace.h> 22#include <asm/stacktrace.h>
22#include <asm/unwind.h> 23#include <asm/unwind.h>
23 24
@@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
43 return true; 44 return true;
44} 45}
45 46
47bool in_entry_stack(unsigned long *stack, struct stack_info *info)
48{
49 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
50
51 void *begin = ss;
52 void *end = ss + 1;
53
54 if ((void *)stack < begin || (void *)stack >= end)
55 return false;
56
57 info->type = STACK_TYPE_ENTRY;
58 info->begin = begin;
59 info->end = end;
60 info->next_sp = NULL;
61
62 return true;
63}
64
46static void printk_stack_address(unsigned long address, int reliable, 65static void printk_stack_address(unsigned long address, int reliable,
47 char *log_lvl) 66 char *log_lvl)
48{ 67{
@@ -50,6 +69,39 @@ static void printk_stack_address(unsigned long address, int reliable,
50 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); 69 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
51} 70}
52 71
72void show_iret_regs(struct pt_regs *regs)
73{
74 printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
75 printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
76 regs->sp, regs->flags);
77}
78
79static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
80 bool partial)
81{
82 /*
83 * These on_stack() checks aren't strictly necessary: the unwind code
84 * has already validated the 'regs' pointer. The checks are done for
85 * ordering reasons: if the registers are on the next stack, we don't
86 * want to print them out yet. Otherwise they'll be shown as part of
87 * the wrong stack. Later, when show_trace_log_lvl() switches to the
88 * next stack, this function will be called again with the same regs so
89 * they can be printed in the right context.
90 */
91 if (!partial && on_stack(info, regs, sizeof(*regs))) {
92 __show_regs(regs, 0);
93
94 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
95 IRET_FRAME_SIZE)) {
96 /*
97 * When an interrupt or exception occurs in entry code, the
98 * full pt_regs might not have been saved yet. In that case
99 * just print the iret frame.
100 */
101 show_iret_regs(regs);
102 }
103}
104
53void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 105void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
54 unsigned long *stack, char *log_lvl) 106 unsigned long *stack, char *log_lvl)
55{ 107{
@@ -57,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
57 struct stack_info stack_info = {0}; 109 struct stack_info stack_info = {0};
58 unsigned long visit_mask = 0; 110 unsigned long visit_mask = 0;
59 int graph_idx = 0; 111 int graph_idx = 0;
112 bool partial;
60 113
61 printk("%sCall Trace:\n", log_lvl); 114 printk("%sCall Trace:\n", log_lvl);
62 115
63 unwind_start(&state, task, regs, stack); 116 unwind_start(&state, task, regs, stack);
64 stack = stack ? : get_stack_pointer(task, regs); 117 stack = stack ? : get_stack_pointer(task, regs);
118 regs = unwind_get_entry_regs(&state, &partial);
65 119
66 /* 120 /*
67 * Iterate through the stacks, starting with the current stack pointer. 121 * Iterate through the stacks, starting with the current stack pointer.
@@ -71,31 +125,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
71 * - task stack 125 * - task stack
72 * - interrupt stack 126 * - interrupt stack
73 * - HW exception stacks (double fault, nmi, debug, mce) 127 * - HW exception stacks (double fault, nmi, debug, mce)
128 * - entry stack
74 * 129 *
75 * x86-32 can have up to three stacks: 130 * x86-32 can have up to four stacks:
76 * - task stack 131 * - task stack
77 * - softirq stack 132 * - softirq stack
78 * - hardirq stack 133 * - hardirq stack
134 * - entry stack
79 */ 135 */
80 for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { 136 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
81 const char *stack_name; 137 const char *stack_name;
82 138
83 /* 139 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
84 * If we overflowed the task stack into a guard page, jump back 140 /*
85 * to the bottom of the usable stack. 141 * We weren't on a valid stack. It's possible that
86 */ 142 * we overflowed a valid stack into a guard page.
87 if (task_stack_page(task) - (void *)stack < PAGE_SIZE) 143 * See if the next page up is valid so that we can
88 stack = task_stack_page(task); 144 * generate some kind of backtrace if this happens.
89 145 */
90 if (get_stack_info(stack, task, &stack_info, &visit_mask)) 146 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
91 break; 147 if (get_stack_info(stack, task, &stack_info, &visit_mask))
148 break;
149 }
92 150
93 stack_name = stack_type_name(stack_info.type); 151 stack_name = stack_type_name(stack_info.type);
94 if (stack_name) 152 if (stack_name)
95 printk("%s <%s>\n", log_lvl, stack_name); 153 printk("%s <%s>\n", log_lvl, stack_name);
96 154
97 if (regs && on_stack(&stack_info, regs, sizeof(*regs))) 155 if (regs)
98 __show_regs(regs, 0); 156 show_regs_if_on_stack(&stack_info, regs, partial);
99 157
100 /* 158 /*
101 * Scan the stack, printing any text addresses we find. At the 159 * Scan the stack, printing any text addresses we find. At the
@@ -119,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
119 177
120 /* 178 /*
121 * Don't print regs->ip again if it was already printed 179 * Don't print regs->ip again if it was already printed
122 * by __show_regs() below. 180 * by show_regs_if_on_stack().
123 */ 181 */
124 if (regs && stack == &regs->ip) 182 if (regs && stack == &regs->ip)
125 goto next; 183 goto next;
@@ -154,9 +212,9 @@ next:
154 unwind_next_frame(&state); 212 unwind_next_frame(&state);
155 213
156 /* if the frame has entry regs, print them */ 214 /* if the frame has entry regs, print them */
157 regs = unwind_get_entry_regs(&state); 215 regs = unwind_get_entry_regs(&state, &partial);
158 if (regs && on_stack(&stack_info, regs, sizeof(*regs))) 216 if (regs)
159 __show_regs(regs, 0); 217 show_regs_if_on_stack(&stack_info, regs, partial);
160 } 218 }
161 219
162 if (stack_name) 220 if (stack_name)
@@ -252,11 +310,13 @@ int __die(const char *str, struct pt_regs *regs, long err)
252 unsigned long sp; 310 unsigned long sp;
253#endif 311#endif
254 printk(KERN_DEFAULT 312 printk(KERN_DEFAULT
255 "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, 313 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
256 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", 314 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
257 IS_ENABLED(CONFIG_SMP) ? " SMP" : "", 315 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
258 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", 316 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
259 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); 317 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
318 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
319 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
260 320
261 if (notify_die(DIE_OOPS, str, regs, err, 321 if (notify_die(DIE_OOPS, str, regs, err,
262 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) 322 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index daefae83a3aa..04170f63e3a1 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
26 if (type == STACK_TYPE_SOFTIRQ) 26 if (type == STACK_TYPE_SOFTIRQ)
27 return "SOFTIRQ"; 27 return "SOFTIRQ";
28 28
29 if (type == STACK_TYPE_ENTRY)
30 return "ENTRY_TRAMPOLINE";
31
29 return NULL; 32 return NULL;
30} 33}
31 34
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
93 if (task != current) 96 if (task != current)
94 goto unknown; 97 goto unknown;
95 98
99 if (in_entry_stack(stack, info))
100 goto recursion_check;
101
96 if (in_hardirq_stack(stack, info)) 102 if (in_hardirq_stack(stack, info))
97 goto recursion_check; 103 goto recursion_check;
98 104
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 88ce2ffdb110..563e28d14f2c 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type)
37 if (type == STACK_TYPE_IRQ) 37 if (type == STACK_TYPE_IRQ)
38 return "IRQ"; 38 return "IRQ";
39 39
40 if (type == STACK_TYPE_ENTRY) {
41 /*
42 * On 64-bit, we have a generic entry stack that we
43 * use for all the kernel entry points, including
44 * SYSENTER.
45 */
46 return "ENTRY_TRAMPOLINE";
47 }
48
40 if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) 49 if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
41 return exception_stack_names[type - STACK_TYPE_EXCEPTION]; 50 return exception_stack_names[type - STACK_TYPE_EXCEPTION];
42 51
@@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
115 if (in_irq_stack(stack, info)) 124 if (in_irq_stack(stack, info))
116 goto recursion_check; 125 goto recursion_check;
117 126
127 if (in_entry_stack(stack, info))
128 goto recursion_check;
129
118 goto unknown; 130 goto unknown;
119 131
120recursion_check: 132recursion_check:
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b6c6468e10bc..4c8440de3355 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -8,6 +8,7 @@
8#include <asm/segment.h> 8#include <asm/segment.h>
9#include <asm/export.h> 9#include <asm/export.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h>
11 12
12#ifdef CC_USING_FENTRY 13#ifdef CC_USING_FENTRY
13# define function_hook __fentry__ 14# define function_hook __fentry__
@@ -197,7 +198,8 @@ ftrace_stub:
197 movl 0x4(%ebp), %edx 198 movl 0x4(%ebp), %edx
198 subl $MCOUNT_INSN_SIZE, %eax 199 subl $MCOUNT_INSN_SIZE, %eax
199 200
200 call *ftrace_trace_function 201 movl ftrace_trace_function, %ecx
202 CALL_NOSPEC %ecx
201 203
202 popl %edx 204 popl %edx
203 popl %ecx 205 popl %ecx
@@ -241,5 +243,5 @@ return_to_handler:
241 movl %eax, %ecx 243 movl %eax, %ecx
242 popl %edx 244 popl %edx
243 popl %eax 245 popl %eax
244 jmp *%ecx 246 JMP_NOSPEC %ecx
245#endif 247#endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291d948a..91b2cff4b79a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -7,7 +7,8 @@
7#include <asm/ptrace.h> 7#include <asm/ptrace.h>
8#include <asm/ftrace.h> 8#include <asm/ftrace.h>
9#include <asm/export.h> 9#include <asm/export.h>
10 10#include <asm/nospec-branch.h>
11#include <asm/unwind_hints.h>
11 12
12 .code64 13 .code64
13 .section .entry.text, "ax" 14 .section .entry.text, "ax"
@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
20EXPORT_SYMBOL(mcount) 21EXPORT_SYMBOL(mcount)
21#endif 22#endif
22 23
23/* All cases save the original rbp (8 bytes) */
24#ifdef CONFIG_FRAME_POINTER 24#ifdef CONFIG_FRAME_POINTER
25# ifdef CC_USING_FENTRY 25# ifdef CC_USING_FENTRY
26/* Save parent and function stack frames (rip and rbp) */ 26/* Save parent and function stack frames (rip and rbp) */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
31# endif 31# endif
32#else 32#else
33/* No need to save a stack frame */ 33/* No need to save a stack frame */
34# define MCOUNT_FRAME_SIZE 8 34# define MCOUNT_FRAME_SIZE 0
35#endif /* CONFIG_FRAME_POINTER */ 35#endif /* CONFIG_FRAME_POINTER */
36 36
37/* Size of stack used to save mcount regs in save_mcount_regs */ 37/* Size of stack used to save mcount regs in save_mcount_regs */
@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
64 */ 64 */
65.macro save_mcount_regs added=0 65.macro save_mcount_regs added=0
66 66
67 /* Always save the original rbp */ 67#ifdef CONFIG_FRAME_POINTER
68 /* Save the original rbp */
68 pushq %rbp 69 pushq %rbp
69 70
70#ifdef CONFIG_FRAME_POINTER
71 /* 71 /*
72 * Stack traces will stop at the ftrace trampoline if the frame pointer 72 * Stack traces will stop at the ftrace trampoline if the frame pointer
73 * is not set up properly. If fentry is used, we need to save a frame 73 * is not set up properly. If fentry is used, we need to save a frame
@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
105 * Save the original RBP. Even though the mcount ABI does not 105 * Save the original RBP. Even though the mcount ABI does not
106 * require this, it helps out callers. 106 * require this, it helps out callers.
107 */ 107 */
108#ifdef CONFIG_FRAME_POINTER
108 movq MCOUNT_REG_SIZE-8(%rsp), %rdx 109 movq MCOUNT_REG_SIZE-8(%rsp), %rdx
110#else
111 movq %rbp, %rdx
112#endif
109 movq %rdx, RBP(%rsp) 113 movq %rdx, RBP(%rsp)
110 114
111 /* Copy the parent address into %rsi (second parameter) */ 115 /* Copy the parent address into %rsi (second parameter) */
@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
148 152
149ENTRY(function_hook) 153ENTRY(function_hook)
150 retq 154 retq
151END(function_hook) 155ENDPROC(function_hook)
152 156
153ENTRY(ftrace_caller) 157ENTRY(ftrace_caller)
154 /* save_mcount_regs fills in first two parameters */ 158 /* save_mcount_regs fills in first two parameters */
@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
184/* This is weak to keep gas from relaxing the jumps */ 188/* This is weak to keep gas from relaxing the jumps */
185WEAK(ftrace_stub) 189WEAK(ftrace_stub)
186 retq 190 retq
187END(ftrace_caller) 191ENDPROC(ftrace_caller)
188 192
189ENTRY(ftrace_regs_caller) 193ENTRY(ftrace_regs_caller)
190 /* Save the current flags before any operations that can change them */ 194 /* Save the current flags before any operations that can change them */
@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
255 259
256 jmp ftrace_epilogue 260 jmp ftrace_epilogue
257 261
258END(ftrace_regs_caller) 262ENDPROC(ftrace_regs_caller)
259 263
260 264
261#else /* ! CONFIG_DYNAMIC_FTRACE */ 265#else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -286,12 +290,12 @@ trace:
286 * ip and parent ip are used and the list function is called when 290 * ip and parent ip are used and the list function is called when
287 * function tracing is enabled. 291 * function tracing is enabled.
288 */ 292 */
289 call *ftrace_trace_function 293 movq ftrace_trace_function, %r8
290 294 CALL_NOSPEC %r8
291 restore_mcount_regs 295 restore_mcount_regs
292 296
293 jmp fgraph_trace 297 jmp fgraph_trace
294END(function_hook) 298ENDPROC(function_hook)
295#endif /* CONFIG_DYNAMIC_FTRACE */ 299#endif /* CONFIG_DYNAMIC_FTRACE */
296 300
297#ifdef CONFIG_FUNCTION_GRAPH_TRACER 301#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
313 restore_mcount_regs 317 restore_mcount_regs
314 318
315 retq 319 retq
316END(ftrace_graph_caller) 320ENDPROC(ftrace_graph_caller)
317 321
318GLOBAL(return_to_handler) 322ENTRY(return_to_handler)
323 UNWIND_HINT_EMPTY
319 subq $24, %rsp 324 subq $24, %rsp
320 325
321 /* Save the return values */ 326 /* Save the return values */
@@ -329,5 +334,6 @@ GLOBAL(return_to_handler)
329 movq 8(%rsp), %rdx 334 movq 8(%rsp), %rdx
330 movq (%rsp), %rax 335 movq (%rsp), %rax
331 addq $24, %rsp 336 addq $24, %rsp
332 jmp *%rdi 337 JMP_NOSPEC %rdi
338END(return_to_handler)
333#endif 339#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 6a5d757b9cfd..7ba5d819ebe3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
157 p = fixup_pointer(&phys_base, physaddr); 157 p = fixup_pointer(&phys_base, physaddr);
158 *p += load_delta - sme_get_me_mask(); 158 *p += load_delta - sme_get_me_mask();
159 159
160 /* Encrypt the kernel (if SME is active) */ 160 /* Encrypt the kernel and related (if SME is active) */
161 sme_encrypt_kernel(); 161 sme_encrypt_kernel(bp);
162 162
163 /* 163 /*
164 * Return the SME encryption mask (if SME is active) to be used as a 164 * Return the SME encryption mask (if SME is active) to be used as a
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7dca675fe78d..04a625f0fcda 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag)
341 .balign PAGE_SIZE; \ 341 .balign PAGE_SIZE; \
342GLOBAL(name) 342GLOBAL(name)
343 343
344#ifdef CONFIG_PAGE_TABLE_ISOLATION
345/*
346 * Each PGD needs to be 8k long and 8k aligned. We do not
347 * ever go out to userspace with these, so we do not
348 * strictly *need* the second page, but this allows us to
349 * have a single set_pgd() implementation that does not
350 * need to worry about whether it has 4k or 8k to work
351 * with.
352 *
353 * This ensures PGDs are 8k long:
354 */
355#define PTI_USER_PGD_FILL 512
356/* This ensures they are 8k-aligned: */
357#define NEXT_PGD_PAGE(name) \
358 .balign 2 * PAGE_SIZE; \
359GLOBAL(name)
360#else
361#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
362#define PTI_USER_PGD_FILL 0
363#endif
364
344/* Automate the creation of 1 to 1 mapping pmd entries */ 365/* Automate the creation of 1 to 1 mapping pmd entries */
345#define PMDS(START, PERM, COUNT) \ 366#define PMDS(START, PERM, COUNT) \
346 i = 0 ; \ 367 i = 0 ; \
@@ -350,13 +371,14 @@ GLOBAL(name)
350 .endr 371 .endr
351 372
352 __INITDATA 373 __INITDATA
353NEXT_PAGE(early_top_pgt) 374NEXT_PGD_PAGE(early_top_pgt)
354 .fill 511,8,0 375 .fill 511,8,0
355#ifdef CONFIG_X86_5LEVEL 376#ifdef CONFIG_X86_5LEVEL
356 .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 377 .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
357#else 378#else
358 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 379 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
359#endif 380#endif
381 .fill PTI_USER_PGD_FILL,8,0
360 382
361NEXT_PAGE(early_dynamic_pgts) 383NEXT_PAGE(early_dynamic_pgts)
362 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 384 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts)
364 .data 386 .data
365 387
366#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) 388#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
367NEXT_PAGE(init_top_pgt) 389NEXT_PGD_PAGE(init_top_pgt)
368 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 390 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
369 .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 391 .org init_top_pgt + PGD_PAGE_OFFSET*8, 0
370 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
371 .org init_top_pgt + PGD_START_KERNEL*8, 0 393 .org init_top_pgt + PGD_START_KERNEL*8, 0
372 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 394 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
373 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 395 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
396 .fill PTI_USER_PGD_FILL,8,0
374 397
375NEXT_PAGE(level3_ident_pgt) 398NEXT_PAGE(level3_ident_pgt)
376 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 399 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt)
381 */ 404 */
382 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 405 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
383#else 406#else
384NEXT_PAGE(init_top_pgt) 407NEXT_PGD_PAGE(init_top_pgt)
385 .fill 512,8,0 408 .fill 512,8,0
409 .fill PTI_USER_PGD_FILL,8,0
386#endif 410#endif
387 411
388#ifdef CONFIG_X86_5LEVEL 412#ifdef CONFIG_X86_5LEVEL
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index d985cef3984f..56d99be3706a 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -56,7 +56,7 @@ struct idt_data {
56 * Early traps running on the DEFAULT_STACK because the other interrupt 56 * Early traps running on the DEFAULT_STACK because the other interrupt
57 * stacks work only after cpu_init(). 57 * stacks work only after cpu_init().
58 */ 58 */
59static const __initdata struct idt_data early_idts[] = { 59static const __initconst struct idt_data early_idts[] = {
60 INTG(X86_TRAP_DB, debug), 60 INTG(X86_TRAP_DB, debug),
61 SYSG(X86_TRAP_BP, int3), 61 SYSG(X86_TRAP_BP, int3),
62#ifdef CONFIG_X86_32 62#ifdef CONFIG_X86_32
@@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
70 * the traps which use them are reinitialized with IST after cpu_init() has 70 * the traps which use them are reinitialized with IST after cpu_init() has
71 * set up TSS. 71 * set up TSS.
72 */ 72 */
73static const __initdata struct idt_data def_idts[] = { 73static const __initconst struct idt_data def_idts[] = {
74 INTG(X86_TRAP_DE, divide_error), 74 INTG(X86_TRAP_DE, divide_error),
75 INTG(X86_TRAP_NMI, nmi), 75 INTG(X86_TRAP_NMI, nmi),
76 INTG(X86_TRAP_BR, bounds), 76 INTG(X86_TRAP_BR, bounds),
@@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
108/* 108/*
109 * The APIC and SMP idt entries 109 * The APIC and SMP idt entries
110 */ 110 */
111static const __initdata struct idt_data apic_idts[] = { 111static const __initconst struct idt_data apic_idts[] = {
112#ifdef CONFIG_SMP 112#ifdef CONFIG_SMP
113 INTG(RESCHEDULE_VECTOR, reschedule_interrupt), 113 INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
114 INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), 114 INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
@@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
150 * Early traps running on the DEFAULT_STACK because the other interrupt 150 * Early traps running on the DEFAULT_STACK because the other interrupt
151 * stacks work only after cpu_init(). 151 * stacks work only after cpu_init().
152 */ 152 */
153static const __initdata struct idt_data early_pf_idts[] = { 153static const __initconst struct idt_data early_pf_idts[] = {
154 INTG(X86_TRAP_PF, page_fault), 154 INTG(X86_TRAP_PF, page_fault),
155}; 155};
156 156
@@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
158 * Override for the debug_idt. Same as the default, but with interrupt 158 * Override for the debug_idt. Same as the default, but with interrupt
159 * stack set to DEFAULT_STACK (0). Required for NMI trap handling. 159 * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
160 */ 160 */
161static const __initdata struct idt_data dbg_idts[] = { 161static const __initconst struct idt_data dbg_idts[] = {
162 INTG(X86_TRAP_DB, debug), 162 INTG(X86_TRAP_DB, debug),
163 INTG(X86_TRAP_BP, int3), 163 INTG(X86_TRAP_BP, int3),
164}; 164};
@@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
180 * The exceptions which use Interrupt stacks. They are setup after 180 * The exceptions which use Interrupt stacks. They are setup after
181 * cpu_init() when the TSS has been initialized. 181 * cpu_init() when the TSS has been initialized.
182 */ 182 */
183static const __initdata struct idt_data ist_idts[] = { 183static const __initconst struct idt_data ist_idts[] = {
184 ISTG(X86_TRAP_DB, debug, DEBUG_STACK), 184 ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
185 ISTG(X86_TRAP_NMI, nmi, NMI_STACK), 185 ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
186 SISTG(X86_TRAP_BP, int3, DEBUG_STACK), 186 SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 3feb648781c4..2f723301eb58 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
67 * because the ->io_bitmap_max value must match the bitmap 67 * because the ->io_bitmap_max value must match the bitmap
68 * contents: 68 * contents:
69 */ 69 */
70 tss = &per_cpu(cpu_tss, get_cpu()); 70 tss = &per_cpu(cpu_tss_rw, get_cpu());
71 71
72 if (turn_on) 72 if (turn_on)
73 bitmap_clear(t->io_bitmap_ptr, from, num); 73 bitmap_clear(t->io_bitmap_ptr, from, num);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 49cfd9fe7589..68e1867cca80 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
219 /* high bit used in ret_from_ code */ 219 /* high bit used in ret_from_ code */
220 unsigned vector = ~regs->orig_ax; 220 unsigned vector = ~regs->orig_ax;
221 221
222 /*
223 * NB: Unlike exception entries, IRQ entries do not reliably
224 * handle context tracking in the low-level entry code. This is
225 * because syscall entries execute briefly with IRQs on before
226 * updating context tracking state, so we can take an IRQ from
227 * kernel mode with CONTEXT_USER. The low-level entry code only
228 * updates the context if we came from user mode, so we won't
229 * switch to CONTEXT_KERNEL. We'll fix that once the syscall
230 * code is cleaned up enough that we can cleanly defer enabling
231 * IRQs.
232 */
233
234 entering_irq(); 222 entering_irq();
235 223
236 /* entering_irq() tells RCU that we're not quiescent. Check it. */ 224 /* entering_irq() tells RCU that we're not quiescent. Check it. */
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a83b3346a0e1..c1bdbd3d3232 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -20,6 +20,7 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/nospec-branch.h>
23 24
24#ifdef CONFIG_DEBUG_STACKOVERFLOW 25#ifdef CONFIG_DEBUG_STACKOVERFLOW
25 26
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
55static void call_on_stack(void *func, void *stack) 56static void call_on_stack(void *func, void *stack)
56{ 57{
57 asm volatile("xchgl %%ebx,%%esp \n" 58 asm volatile("xchgl %%ebx,%%esp \n"
58 "call *%%edi \n" 59 CALL_NOSPEC
59 "movl %%ebx,%%esp \n" 60 "movl %%ebx,%%esp \n"
60 : "=b" (stack) 61 : "=b" (stack)
61 : "0" (stack), 62 : "0" (stack),
62 "D"(func) 63 [thunk_target] "D"(func)
63 : "memory", "cc", "edx", "ecx", "eax"); 64 : "memory", "cc", "edx", "ecx", "eax");
64} 65}
65 66
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
95 call_on_stack(print_stack_overflow, isp); 96 call_on_stack(print_stack_overflow, isp);
96 97
97 asm volatile("xchgl %%ebx,%%esp \n" 98 asm volatile("xchgl %%ebx,%%esp \n"
98 "call *%%edi \n" 99 CALL_NOSPEC
99 "movl %%ebx,%%esp \n" 100 "movl %%ebx,%%esp \n"
100 : "=a" (arg1), "=b" (isp) 101 : "=a" (arg1), "=b" (isp)
101 : "0" (desc), "1" (isp), 102 : "0" (desc), "1" (isp),
102 "D" (desc->handle_irq) 103 [thunk_target] "D" (desc->handle_irq)
103 : "memory", "cc", "ecx"); 104 : "memory", "cc", "ecx");
104 return 1; 105 return 1;
105} 106}
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 020efbf5786b..d86e344f5b3d 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
57 if (regs->sp >= estack_top && regs->sp <= estack_bottom) 57 if (regs->sp >= estack_top && regs->sp <= estack_bottom)
58 return; 58 return;
59 59
60 WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", 60 WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
61 current->comm, curbase, regs->sp, 61 current->comm, curbase, regs->sp,
62 irq_stack_top, irq_stack_bottom, 62 irq_stack_top, irq_stack_bottom,
63 estack_top, estack_bottom); 63 estack_top, estack_bottom, (void *)regs->ip);
64 64
65 if (sysctl_panic_on_stackoverflow) 65 if (sysctl_panic_on_stackoverflow)
66 panic("low stack detected by irq handler - check messages\n"); 66 panic("low stack detected by irq handler - check messages\n");
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 8da3e909e967..a539410c4ea9 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,6 +61,9 @@ void __init init_ISA_irqs(void)
61 struct irq_chip *chip = legacy_pic->chip; 61 struct irq_chip *chip = legacy_pic->chip;
62 int i; 62 int i;
63 63
64#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
65 init_bsp_APIC();
66#endif
64 legacy_pic->init(0); 67 legacy_pic->init(0);
65 68
66 for (i = 0; i < nr_legacy_irqs(); i++) 69 for (i = 0; i < nr_legacy_irqs(); i++)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e941136e24d8..203d398802a3 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
40#include <asm/debugreg.h> 40#include <asm/debugreg.h>
41#include <asm/set_memory.h> 41#include <asm/set_memory.h>
42#include <asm/sections.h> 42#include <asm/sections.h>
43#include <asm/nospec-branch.h>
43 44
44#include "common.h" 45#include "common.h"
45 46
@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
203} 204}
204 205
205/* Check whether insn is indirect jump */ 206/* Check whether insn is indirect jump */
206static int insn_is_indirect_jump(struct insn *insn) 207static int __insn_is_indirect_jump(struct insn *insn)
207{ 208{
208 return ((insn->opcode.bytes[0] == 0xff && 209 return ((insn->opcode.bytes[0] == 0xff &&
209 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 210 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
237 return (start <= target && target <= start + len); 238 return (start <= target && target <= start + len);
238} 239}
239 240
241static int insn_is_indirect_jump(struct insn *insn)
242{
243 int ret = __insn_is_indirect_jump(insn);
244
245#ifdef CONFIG_RETPOLINE
246 /*
247 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
248 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
249 * older gcc may use indirect jump. So we add this check instead of
250 * replace indirect-jump check.
251 */
252 if (!ret)
253 ret = insn_jump_into_range(insn,
254 (unsigned long)__indirect_thunk_start,
255 (unsigned long)__indirect_thunk_end -
256 (unsigned long)__indirect_thunk_start);
257#endif
258 return ret;
259}
260
240/* Decode whole function to ensure any instructions don't jump into target */ 261/* Decode whole function to ensure any instructions don't jump into target */
241static int can_optimize(unsigned long paddr) 262static int can_optimize(unsigned long paddr)
242{ 263{
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 1c1eae961340..26d713ecad34 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -5,6 +5,11 @@
5 * Copyright (C) 2002 Andi Kleen 5 * Copyright (C) 2002 Andi Kleen
6 * 6 *
7 * This handles calls from both 32bit and 64bit mode. 7 * This handles calls from both 32bit and 64bit mode.
8 *
9 * Lock order:
10 * contex.ldt_usr_sem
11 * mmap_sem
12 * context.lock
8 */ 13 */
9 14
10#include <linux/errno.h> 15#include <linux/errno.h>
@@ -19,6 +24,7 @@
19#include <linux/uaccess.h> 24#include <linux/uaccess.h>
20 25
21#include <asm/ldt.h> 26#include <asm/ldt.h>
27#include <asm/tlb.h>
22#include <asm/desc.h> 28#include <asm/desc.h>
23#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
24#include <asm/syscalls.h> 30#include <asm/syscalls.h>
@@ -42,17 +48,15 @@ static void refresh_ldt_segments(void)
42#endif 48#endif
43} 49}
44 50
45/* context.lock is held for us, so we don't need any locking. */ 51/* context.lock is held by the task which issued the smp function call */
46static void flush_ldt(void *__mm) 52static void flush_ldt(void *__mm)
47{ 53{
48 struct mm_struct *mm = __mm; 54 struct mm_struct *mm = __mm;
49 mm_context_t *pc;
50 55
51 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) 56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
52 return; 57 return;
53 58
54 pc = &mm->context; 59 load_mm_ldt(mm);
55 set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
56 60
57 refresh_ldt_segments(); 61 refresh_ldt_segments();
58} 62}
@@ -89,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
89 return NULL; 93 return NULL;
90 } 94 }
91 95
96 /* The new LDT isn't aliased for PTI yet. */
97 new_ldt->slot = -1;
98
92 new_ldt->nr_entries = num_entries; 99 new_ldt->nr_entries = num_entries;
93 return new_ldt; 100 return new_ldt;
94} 101}
95 102
103/*
104 * If PTI is enabled, this maps the LDT into the kernelmode and
105 * usermode tables for the given mm.
106 *
107 * There is no corresponding unmap function. Even if the LDT is freed, we
108 * leave the PTEs around until the slot is reused or the mm is destroyed.
109 * This is harmless: the LDT is always in ordinary memory, and no one will
110 * access the freed slot.
111 *
112 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
113 * it useful, and the flush would slow down modify_ldt().
114 */
115static int
116map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
117{
118#ifdef CONFIG_PAGE_TABLE_ISOLATION
119 bool is_vmalloc, had_top_level_entry;
120 unsigned long va;
121 spinlock_t *ptl;
122 pgd_t *pgd;
123 int i;
124
125 if (!static_cpu_has(X86_FEATURE_PTI))
126 return 0;
127
128 /*
129 * Any given ldt_struct should have map_ldt_struct() called at most
130 * once.
131 */
132 WARN_ON(ldt->slot != -1);
133
134 /*
135 * Did we already have the top level entry allocated? We can't
136 * use pgd_none() for this because it doens't do anything on
137 * 4-level page table kernels.
138 */
139 pgd = pgd_offset(mm, LDT_BASE_ADDR);
140 had_top_level_entry = (pgd->pgd != 0);
141
142 is_vmalloc = is_vmalloc_addr(ldt->entries);
143
144 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
145 unsigned long offset = i << PAGE_SHIFT;
146 const void *src = (char *)ldt->entries + offset;
147 unsigned long pfn;
148 pte_t pte, *ptep;
149
150 va = (unsigned long)ldt_slot_va(slot) + offset;
151 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
152 page_to_pfn(virt_to_page(src));
153 /*
154 * Treat the PTI LDT range as a *userspace* range.
155 * get_locked_pte() will allocate all needed pagetables
156 * and account for them in this mm.
157 */
158 ptep = get_locked_pte(mm, va, &ptl);
159 if (!ptep)
160 return -ENOMEM;
161 /*
162 * Map it RO so the easy to find address is not a primary
163 * target via some kernel interface which misses a
164 * permission check.
165 */
166 pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
167 set_pte_at(mm, va, ptep, pte);
168 pte_unmap_unlock(ptep, ptl);
169 }
170
171 if (mm->context.ldt) {
172 /*
173 * We already had an LDT. The top-level entry should already
174 * have been allocated and synchronized with the usermode
175 * tables.
176 */
177 WARN_ON(!had_top_level_entry);
178 if (static_cpu_has(X86_FEATURE_PTI))
179 WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
180 } else {
181 /*
182 * This is the first time we're mapping an LDT for this process.
183 * Sync the pgd to the usermode tables.
184 */
185 WARN_ON(had_top_level_entry);
186 if (static_cpu_has(X86_FEATURE_PTI)) {
187 WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
188 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
189 }
190 }
191
192 va = (unsigned long)ldt_slot_va(slot);
193 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
194
195 ldt->slot = slot;
196#endif
197 return 0;
198}
199
200static void free_ldt_pgtables(struct mm_struct *mm)
201{
202#ifdef CONFIG_PAGE_TABLE_ISOLATION
203 struct mmu_gather tlb;
204 unsigned long start = LDT_BASE_ADDR;
205 unsigned long end = start + (1UL << PGDIR_SHIFT);
206
207 if (!static_cpu_has(X86_FEATURE_PTI))
208 return;
209
210 tlb_gather_mmu(&tlb, mm, start, end);
211 free_pgd_range(&tlb, start, end, start, end);
212 tlb_finish_mmu(&tlb, start, end);
213#endif
214}
215
96/* After calling this, the LDT is immutable. */ 216/* After calling this, the LDT is immutable. */
97static void finalize_ldt_struct(struct ldt_struct *ldt) 217static void finalize_ldt_struct(struct ldt_struct *ldt)
98{ 218{
99 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); 219 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
100} 220}
101 221
102/* context.lock is held */ 222static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
103static void install_ldt(struct mm_struct *current_mm,
104 struct ldt_struct *ldt)
105{ 223{
224 mutex_lock(&mm->context.lock);
225
106 /* Synchronizes with READ_ONCE in load_mm_ldt. */ 226 /* Synchronizes with READ_ONCE in load_mm_ldt. */
107 smp_store_release(&current_mm->context.ldt, ldt); 227 smp_store_release(&mm->context.ldt, ldt);
108 228
109 /* Activate the LDT for all CPUs using current_mm. */ 229 /* Activate the LDT for all CPUs using currents mm. */
110 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); 230 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
231
232 mutex_unlock(&mm->context.lock);
111} 233}
112 234
113static void free_ldt_struct(struct ldt_struct *ldt) 235static void free_ldt_struct(struct ldt_struct *ldt)
@@ -124,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt)
124} 246}
125 247
126/* 248/*
127 * we do not have to muck with descriptors here, that is 249 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
128 * done in switch_mm() as needed. 250 * the new task is not running, so nothing can be installed.
129 */ 251 */
130int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) 252int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
131{ 253{
132 struct ldt_struct *new_ldt; 254 struct ldt_struct *new_ldt;
133 struct mm_struct *old_mm;
134 int retval = 0; 255 int retval = 0;
135 256
136 mutex_init(&mm->context.lock); 257 if (!old_mm)
137 old_mm = current->mm;
138 if (!old_mm) {
139 mm->context.ldt = NULL;
140 return 0; 258 return 0;
141 }
142 259
143 mutex_lock(&old_mm->context.lock); 260 mutex_lock(&old_mm->context.lock);
144 if (!old_mm->context.ldt) { 261 if (!old_mm->context.ldt)
145 mm->context.ldt = NULL;
146 goto out_unlock; 262 goto out_unlock;
147 }
148 263
149 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); 264 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
150 if (!new_ldt) { 265 if (!new_ldt) {
@@ -156,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
156 new_ldt->nr_entries * LDT_ENTRY_SIZE); 271 new_ldt->nr_entries * LDT_ENTRY_SIZE);
157 finalize_ldt_struct(new_ldt); 272 finalize_ldt_struct(new_ldt);
158 273
274 retval = map_ldt_struct(mm, new_ldt, 0);
275 if (retval) {
276 free_ldt_pgtables(mm);
277 free_ldt_struct(new_ldt);
278 goto out_unlock;
279 }
159 mm->context.ldt = new_ldt; 280 mm->context.ldt = new_ldt;
160 281
161out_unlock: 282out_unlock:
@@ -174,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm)
174 mm->context.ldt = NULL; 295 mm->context.ldt = NULL;
175} 296}
176 297
298void ldt_arch_exit_mmap(struct mm_struct *mm)
299{
300 free_ldt_pgtables(mm);
301}
302
177static int read_ldt(void __user *ptr, unsigned long bytecount) 303static int read_ldt(void __user *ptr, unsigned long bytecount)
178{ 304{
179 struct mm_struct *mm = current->mm; 305 struct mm_struct *mm = current->mm;
180 unsigned long entries_size; 306 unsigned long entries_size;
181 int retval; 307 int retval;
182 308
183 mutex_lock(&mm->context.lock); 309 down_read(&mm->context.ldt_usr_sem);
184 310
185 if (!mm->context.ldt) { 311 if (!mm->context.ldt) {
186 retval = 0; 312 retval = 0;
@@ -209,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
209 retval = bytecount; 335 retval = bytecount;
210 336
211out_unlock: 337out_unlock:
212 mutex_unlock(&mm->context.lock); 338 up_read(&mm->context.ldt_usr_sem);
213 return retval; 339 return retval;
214} 340}
215 341
@@ -269,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
269 ldt.avl = 0; 395 ldt.avl = 0;
270 } 396 }
271 397
272 mutex_lock(&mm->context.lock); 398 if (down_write_killable(&mm->context.ldt_usr_sem))
399 return -EINTR;
273 400
274 old_ldt = mm->context.ldt; 401 old_ldt = mm->context.ldt;
275 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; 402 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
@@ -286,12 +413,31 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
286 new_ldt->entries[ldt_info.entry_number] = ldt; 413 new_ldt->entries[ldt_info.entry_number] = ldt;
287 finalize_ldt_struct(new_ldt); 414 finalize_ldt_struct(new_ldt);
288 415
416 /*
417 * If we are using PTI, map the new LDT into the userspace pagetables.
418 * If there is already an LDT, use the other slot so that other CPUs
419 * will continue to use the old LDT until install_ldt() switches
420 * them over to the new LDT.
421 */
422 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
423 if (error) {
424 /*
425 * This only can fail for the first LDT setup. If an LDT is
426 * already installed then the PTE page is already
427 * populated. Mop up a half populated page table.
428 */
429 if (!WARN_ON_ONCE(old_ldt))
430 free_ldt_pgtables(mm);
431 free_ldt_struct(new_ldt);
432 goto out_unlock;
433 }
434
289 install_ldt(mm, new_ldt); 435 install_ldt(mm, new_ldt);
290 free_ldt_struct(old_ldt); 436 free_ldt_struct(old_ldt);
291 error = 0; 437 error = 0;
292 438
293out_unlock: 439out_unlock:
294 mutex_unlock(&mm->context.lock); 440 up_write(&mm->context.ldt_usr_sem);
295out: 441out:
296 return error; 442 return error;
297} 443}
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 00bc751c861c..edfede768688 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -48,8 +48,6 @@ static void load_segments(void)
48 "\tmovl $"STR(__KERNEL_DS)",%%eax\n" 48 "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
49 "\tmovl %%eax,%%ds\n" 49 "\tmovl %%eax,%%ds\n"
50 "\tmovl %%eax,%%es\n" 50 "\tmovl %%eax,%%es\n"
51 "\tmovl %%eax,%%fs\n"
52 "\tmovl %%eax,%%gs\n"
53 "\tmovl %%eax,%%ss\n" 51 "\tmovl %%eax,%%ss\n"
54 : : : "eax", "memory"); 52 : : : "eax", "memory");
55#undef STR 53#undef STR
@@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image)
232 * The gdt & idt are now invalid. 230 * The gdt & idt are now invalid.
233 * If you want to load them you must set up your own idt & gdt. 231 * If you want to load them you must set up your own idt & gdt.
234 */ 232 */
235 set_gdt(phys_to_virt(0), 0);
236 idt_invalidate(phys_to_virt(0)); 233 idt_invalidate(phys_to_virt(0));
234 set_gdt(phys_to_virt(0), 0);
237 235
238 /* now call it */ 236 /* now call it */
239 image->start = relocate_kernel_ptr((unsigned long)image->head, 237 image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index ac0be8283325..9edadabf04f6 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
10DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); 10DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
11DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); 11DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
12DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); 12DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
13DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
14DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); 13DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15 14
16DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); 15DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
60 PATCH_SITE(pv_mmu_ops, read_cr2); 59 PATCH_SITE(pv_mmu_ops, read_cr2);
61 PATCH_SITE(pv_mmu_ops, read_cr3); 60 PATCH_SITE(pv_mmu_ops, read_cr3);
62 PATCH_SITE(pv_mmu_ops, write_cr3); 61 PATCH_SITE(pv_mmu_ops, write_cr3);
63 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
64 PATCH_SITE(pv_cpu_ops, wbinvd); 62 PATCH_SITE(pv_cpu_ops, wbinvd);
65#if defined(CONFIG_PARAVIRT_SPINLOCKS) 63#if defined(CONFIG_PARAVIRT_SPINLOCKS)
66 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): 64 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 97fb3e5737f5..cb368c2a22ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -47,7 +47,7 @@
47 * section. Since TSS's are completely CPU-local, we want them 47 * section. Since TSS's are completely CPU-local, we want them
48 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 48 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
49 */ 49 */
50__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { 50__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
51 .x86_tss = { 51 .x86_tss = {
52 /* 52 /*
53 * .sp0 is only used when entering ring 0 from a lower 53 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
56 * Poison it. 56 * Poison it.
57 */ 57 */
58 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, 58 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
59
60#ifdef CONFIG_X86_64
61 /*
62 * .sp1 is cpu_current_top_of_stack. The init task never
63 * runs user code, but cpu_current_top_of_stack should still
64 * be well defined before the first context switch.
65 */
66 .sp1 = TOP_OF_INIT_STACK,
67#endif
68
59#ifdef CONFIG_X86_32 69#ifdef CONFIG_X86_32
60 .ss0 = __KERNEL_DS, 70 .ss0 = __KERNEL_DS,
61 .ss1 = __KERNEL_CS, 71 .ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
71 */ 81 */
72 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, 82 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
73#endif 83#endif
74#ifdef CONFIG_X86_32
75 .SYSENTER_stack_canary = STACK_END_MAGIC,
76#endif
77}; 84};
78EXPORT_PER_CPU_SYMBOL(cpu_tss); 85EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
79 86
80DEFINE_PER_CPU(bool, __tss_limit_invalid); 87DEFINE_PER_CPU(bool, __tss_limit_invalid);
81EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); 88EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
104 struct fpu *fpu = &t->fpu; 111 struct fpu *fpu = &t->fpu;
105 112
106 if (bp) { 113 if (bp) {
107 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); 114 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
108 115
109 t->io_bitmap_ptr = NULL; 116 t->io_bitmap_ptr = NULL;
110 clear_thread_flag(TIF_IO_BITMAP); 117 clear_thread_flag(TIF_IO_BITMAP);
@@ -299,7 +306,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
299 } 306 }
300 307
301 if ((tifp ^ tifn) & _TIF_NOTSC) 308 if ((tifp ^ tifn) & _TIF_NOTSC)
302 cr4_toggle_bits(X86_CR4_TSD); 309 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
303 310
304 if ((tifp ^ tifn) & _TIF_NOCPUID) 311 if ((tifp ^ tifn) & _TIF_NOCPUID)
305 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 312 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
@@ -373,19 +380,24 @@ void stop_this_cpu(void *dummy)
373 disable_local_APIC(); 380 disable_local_APIC();
374 mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); 381 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
375 382
383 /*
384 * Use wbinvd on processors that support SME. This provides support
385 * for performing a successful kexec when going from SME inactive
386 * to SME active (or vice-versa). The cache must be cleared so that
387 * if there are entries with the same physical address, both with and
388 * without the encryption bit, they don't race each other when flushed
389 * and potentially end up with the wrong entry being committed to
390 * memory.
391 */
392 if (boot_cpu_has(X86_FEATURE_SME))
393 native_wbinvd();
376 for (;;) { 394 for (;;) {
377 /* 395 /*
378 * Use wbinvd followed by hlt to stop the processor. This 396 * Use native_halt() so that memory contents don't change
379 * provides support for kexec on a processor that supports 397 * (stack usage and variables) after possibly issuing the
380 * SME. With kexec, going from SME inactive to SME active 398 * native_wbinvd() above.
381 * requires clearing cache entries so that addresses without
382 * the encryption bit set don't corrupt the same physical
383 * address that has the encryption bit set when caches are
384 * flushed. To achieve this a wbinvd is performed followed by
385 * a hlt. Even if the processor is not in the kexec/SME
386 * scenario this only adds a wbinvd to a halting processor.
387 */ 399 */
388 asm volatile("wbinvd; hlt" : : : "memory"); 400 native_halt();
389 } 401 }
390} 402}
391 403
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 45bf0c5f93e1..5224c6099184 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
234 struct fpu *prev_fpu = &prev->fpu; 234 struct fpu *prev_fpu = &prev->fpu;
235 struct fpu *next_fpu = &next->fpu; 235 struct fpu *next_fpu = &next->fpu;
236 int cpu = smp_processor_id(); 236 int cpu = smp_processor_id();
237 struct tss_struct *tss = &per_cpu(cpu_tss, cpu); 237 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
238 238
239 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 239 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
240 240
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index eeeb34f85c25..c75466232016 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
69 unsigned int fsindex, gsindex; 69 unsigned int fsindex, gsindex;
70 unsigned int ds, cs, es; 70 unsigned int ds, cs, es;
71 71
72 printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip); 72 show_iret_regs(regs);
73 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss, 73
74 regs->sp, regs->flags);
75 if (regs->orig_ax != -1) 74 if (regs->orig_ax != -1)
76 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); 75 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
77 else 76 else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
88 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
89 regs->r13, regs->r14, regs->r15); 88 regs->r13, regs->r14, regs->r15);
90 89
90 if (!all)
91 return;
92
91 asm("movl %%ds,%0" : "=r" (ds)); 93 asm("movl %%ds,%0" : "=r" (ds));
92 asm("movl %%cs,%0" : "=r" (cs)); 94 asm("movl %%cs,%0" : "=r" (cs));
93 asm("movl %%es,%0" : "=r" (es)); 95 asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
98 rdmsrl(MSR_GS_BASE, gs); 100 rdmsrl(MSR_GS_BASE, gs);
99 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 101 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
100 102
101 if (!all)
102 return;
103
104 cr0 = read_cr0(); 103 cr0 = read_cr0();
105 cr2 = read_cr2(); 104 cr2 = read_cr2();
106 cr3 = __read_cr3(); 105 cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
400 struct fpu *prev_fpu = &prev->fpu; 399 struct fpu *prev_fpu = &prev->fpu;
401 struct fpu *next_fpu = &next->fpu; 400 struct fpu *next_fpu = &next->fpu;
402 int cpu = smp_processor_id(); 401 int cpu = smp_processor_id();
403 struct tss_struct *tss = &per_cpu(cpu_tss, cpu); 402 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
404 403
405 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && 404 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
406 this_cpu_read(irq_count) != -1); 405 this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
462 * Switch the PDA and FPU contexts. 461 * Switch the PDA and FPU contexts.
463 */ 462 */
464 this_cpu_write(current_task, next_p); 463 this_cpu_write(current_task, next_p);
464 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
465 465
466 /* Reload sp0. */ 466 /* Reload sp0. */
467 update_sp0(next_p); 467 update_sp0(next_p);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 8af2e8d0c0a1..68d7ab81c62f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -364,16 +364,6 @@ static void __init reserve_initrd(void)
364 !ramdisk_image || !ramdisk_size) 364 !ramdisk_image || !ramdisk_size)
365 return; /* No initrd provided by bootloader */ 365 return; /* No initrd provided by bootloader */
366 366
367 /*
368 * If SME is active, this memory will be marked encrypted by the
369 * kernel when it is accessed (including relocation). However, the
370 * ramdisk image was loaded decrypted by the bootloader, so make
371 * sure that it is encrypted before accessing it. For SEV the
372 * ramdisk will already be encrypted, so only do this for SME.
373 */
374 if (sme_active())
375 sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
376
377 initrd_start = 0; 367 initrd_start = 0;
378 368
379 mapped_size = memblock_mem_size(max_pfn_mapped); 369 mapped_size = memblock_mem_size(max_pfn_mapped);
@@ -906,9 +896,6 @@ void __init setup_arch(char **cmdline_p)
906 set_bit(EFI_BOOT, &efi.flags); 896 set_bit(EFI_BOOT, &efi.flags);
907 set_bit(EFI_64BIT, &efi.flags); 897 set_bit(EFI_64BIT, &efi.flags);
908 } 898 }
909
910 if (efi_enabled(EFI_BOOT))
911 efi_memblock_x86_reserve_range();
912#endif 899#endif
913 900
914 x86_init.oem.arch_setup(); 901 x86_init.oem.arch_setup();
@@ -962,6 +949,8 @@ void __init setup_arch(char **cmdline_p)
962 949
963 parse_early_param(); 950 parse_early_param();
964 951
952 if (efi_enabled(EFI_BOOT))
953 efi_memblock_x86_reserve_range();
965#ifdef CONFIG_MEMORY_HOTPLUG 954#ifdef CONFIG_MEMORY_HOTPLUG
966 /* 955 /*
967 * Memory used by the kernel cannot be hot-removed because Linux 956 * Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 3d01df7d7cf6..ed556d50d7ed 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(__max_logical_packages);
106static unsigned int logical_packages __read_mostly; 106static unsigned int logical_packages __read_mostly;
107 107
108/* Maximum number of SMT threads on any online core */ 108/* Maximum number of SMT threads on any online core */
109int __max_smt_threads __read_mostly; 109int __read_mostly __max_smt_threads = 1;
110 110
111/* Flag to indicate if a complete sched domain rebuild is required */ 111/* Flag to indicate if a complete sched domain rebuild is required */
112bool x86_topology_update; 112bool x86_topology_update;
@@ -126,14 +126,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
126 spin_lock_irqsave(&rtc_lock, flags); 126 spin_lock_irqsave(&rtc_lock, flags);
127 CMOS_WRITE(0xa, 0xf); 127 CMOS_WRITE(0xa, 0xf);
128 spin_unlock_irqrestore(&rtc_lock, flags); 128 spin_unlock_irqrestore(&rtc_lock, flags);
129 local_flush_tlb();
130 pr_debug("1.\n");
131 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 129 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
132 start_eip >> 4; 130 start_eip >> 4;
133 pr_debug("2.\n");
134 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 131 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
135 start_eip & 0xf; 132 start_eip & 0xf;
136 pr_debug("3.\n");
137} 133}
138 134
139static inline void smpboot_restore_warm_reset_vector(void) 135static inline void smpboot_restore_warm_reset_vector(void)
@@ -141,11 +137,6 @@ static inline void smpboot_restore_warm_reset_vector(void)
141 unsigned long flags; 137 unsigned long flags;
142 138
143 /* 139 /*
144 * Install writable page 0 entry to set BIOS data area.
145 */
146 local_flush_tlb();
147
148 /*
149 * Paranoid: Set warm reset code and vector here back 140 * Paranoid: Set warm reset code and vector here back
150 * to default values. 141 * to default values.
151 */ 142 */
@@ -237,7 +228,7 @@ static void notrace start_secondary(void *unused)
237 load_cr3(swapper_pg_dir); 228 load_cr3(swapper_pg_dir);
238 __flush_tlb_all(); 229 __flush_tlb_all();
239#endif 230#endif
240 231 load_current_idt();
241 cpu_init(); 232 cpu_init();
242 x86_cpuinit.early_percpu_clock_init(); 233 x86_cpuinit.early_percpu_clock_init();
243 preempt_disable(); 234 preempt_disable();
@@ -932,12 +923,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
932 initial_code = (unsigned long)start_secondary; 923 initial_code = (unsigned long)start_secondary;
933 initial_stack = idle->thread.sp; 924 initial_stack = idle->thread.sp;
934 925
935 /* 926 /* Enable the espfix hack for this CPU */
936 * Enable the espfix hack for this CPU
937 */
938#ifdef CONFIG_X86_ESPFIX64
939 init_espfix_ap(cpu); 927 init_espfix_ap(cpu);
940#endif
941 928
942 /* So we see what's up */ 929 /* So we see what's up */
943 announce_cpu(cpu, apicid); 930 announce_cpu(cpu, apicid);
@@ -1304,7 +1291,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1304 * Today neither Intel nor AMD support heterogenous systems so 1291 * Today neither Intel nor AMD support heterogenous systems so
1305 * extrapolate the boot cpu's data to all packages. 1292 * extrapolate the boot cpu's data to all packages.
1306 */ 1293 */
1307 ncpus = cpu_data(0).booted_cores * smp_num_siblings; 1294 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1308 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); 1295 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
1309 pr_info("Max logical packages: %u\n", __max_logical_packages); 1296 pr_info("Max logical packages: %u\n", __max_logical_packages);
1310 1297
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 77835bc021c7..093f2ea5dd56 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -102,7 +102,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); 102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
103 unwind_next_frame(&state)) { 103 unwind_next_frame(&state)) {
104 104
105 regs = unwind_get_entry_regs(&state); 105 regs = unwind_get_entry_regs(&state, NULL);
106 if (regs) { 106 if (regs) {
107 /* 107 /*
108 * Kernel mode registers on the stack indicate an 108 * Kernel mode registers on the stack indicate an
@@ -164,8 +164,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk,
164{ 164{
165 int ret; 165 int ret;
166 166
167 /*
168 * If the task doesn't have a stack (e.g., a zombie), the stack is
169 * "reliably" empty.
170 */
167 if (!try_get_task_stack(tsk)) 171 if (!try_get_task_stack(tsk))
168 return -EINVAL; 172 return 0;
169 173
170 ret = __save_stack_trace_reliable(trace, tsk); 174 ret = __save_stack_trace_reliable(trace, tsk);
171 175
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a4eb27918ceb..a2486f444073 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
138 return -1; 138 return -1;
139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); 139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
140 pte_unmap(pte); 140 pte_unmap(pte);
141
142 /*
143 * PTI poisons low addresses in the kernel page tables in the
144 * name of making them unusable for userspace. To execute
145 * code at such a low address, the poison must be cleared.
146 *
147 * Note: 'pgd' actually gets set in p4d_alloc() _or_
148 * pud_alloc() depending on 4/5-level paging.
149 */
150 pgd->pgd &= ~_PAGE_NX;
151
141 return 0; 152 return 0;
142} 153}
143 154
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 9a9c9b076955..a5b802a12212 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx,
93 cpu = get_cpu(); 93 cpu = get_cpu();
94 94
95 while (n-- > 0) { 95 while (n-- > 0) {
96 if (LDT_empty(info) || LDT_zero(info)) { 96 if (LDT_empty(info) || LDT_zero(info))
97 memset(desc, 0, sizeof(*desc)); 97 memset(desc, 0, sizeof(*desc));
98 } else { 98 else
99 fill_ldt(desc, info); 99 fill_ldt(desc, info);
100
101 /*
102 * Always set the accessed bit so that the CPU
103 * doesn't try to write to the (read-only) GDT.
104 */
105 desc->type |= 1;
106 }
107 ++info; 100 ++info;
108 ++desc; 101 ++desc;
109 } 102 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 989514c94a55..446c9ef8cfc3 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -51,6 +51,7 @@
51#include <asm/traps.h> 51#include <asm/traps.h>
52#include <asm/desc.h> 52#include <asm/desc.h>
53#include <asm/fpu/internal.h> 53#include <asm/fpu/internal.h>
54#include <asm/cpu_entry_area.h>
54#include <asm/mce.h> 55#include <asm/mce.h>
55#include <asm/fixmap.h> 56#include <asm/fixmap.h>
56#include <asm/mach_traps.h> 57#include <asm/mach_traps.h>
@@ -348,23 +349,42 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
348 349
349 /* 350 /*
350 * If IRET takes a non-IST fault on the espfix64 stack, then we 351 * If IRET takes a non-IST fault on the espfix64 stack, then we
351 * end up promoting it to a doublefault. In that case, modify 352 * end up promoting it to a doublefault. In that case, take
352 * the stack to make it look like we just entered the #GP 353 * advantage of the fact that we're not using the normal (TSS.sp0)
353 * handler from user space, similar to bad_iret. 354 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
355 * and then modify our own IRET frame so that, when we return,
356 * we land directly at the #GP(0) vector with the stack already
357 * set up according to its expectations.
358 *
359 * The net result is that our #GP handler will think that we
360 * entered from usermode with the bad user context.
354 * 361 *
355 * No need for ist_enter here because we don't use RCU. 362 * No need for ist_enter here because we don't use RCU.
356 */ 363 */
357 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && 364 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
358 regs->cs == __KERNEL_CS && 365 regs->cs == __KERNEL_CS &&
359 regs->ip == (unsigned long)native_irq_return_iret) 366 regs->ip == (unsigned long)native_irq_return_iret)
360 { 367 {
361 struct pt_regs *normal_regs = task_pt_regs(current); 368 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
362 369
363 /* Fake a #GP(0) from userspace. */ 370 /*
364 memmove(&normal_regs->ip, (void *)regs->sp, 5*8); 371 * regs->sp points to the failing IRET frame on the
365 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ 372 * ESPFIX64 stack. Copy it to the entry stack. This fills
373 * in gpregs->ss through gpregs->ip.
374 *
375 */
376 memmove(&gpregs->ip, (void *)regs->sp, 5*8);
377 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
378
379 /*
380 * Adjust our frame so that we return straight to the #GP
381 * vector with the expected RSP value. This is safe because
382 * we won't enable interupts or schedule before we invoke
383 * general_protection, so nothing will clobber the stack
384 * frame we just set up.
385 */
366 regs->ip = (unsigned long)general_protection; 386 regs->ip = (unsigned long)general_protection;
367 regs->sp = (unsigned long)&normal_regs->orig_ax; 387 regs->sp = (unsigned long)&gpregs->orig_ax;
368 388
369 return; 389 return;
370 } 390 }
@@ -389,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
389 * 409 *
390 * Processors update CR2 whenever a page fault is detected. If a 410 * Processors update CR2 whenever a page fault is detected. If a
391 * second page fault occurs while an earlier page fault is being 411 * second page fault occurs while an earlier page fault is being
392 * deliv- ered, the faulting linear address of the second fault will 412 * delivered, the faulting linear address of the second fault will
393 * overwrite the contents of CR2 (replacing the previous 413 * overwrite the contents of CR2 (replacing the previous
394 * address). These updates to CR2 occur even if the page fault 414 * address). These updates to CR2 occur even if the page fault
395 * results in a double fault or occurs during the delivery of a 415 * results in a double fault or occurs during the delivery of a
@@ -605,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3);
605 625
606#ifdef CONFIG_X86_64 626#ifdef CONFIG_X86_64
607/* 627/*
608 * Help handler running on IST stack to switch off the IST stack if the 628 * Help handler running on a per-cpu (IST or entry trampoline) stack
609 * interrupted code was in user mode. The actual stack switch is done in 629 * to switch to the normal thread stack if the interrupted code was in
610 * entry_64.S 630 * user mode. The actual stack switch is done in entry_64.S
611 */ 631 */
612asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) 632asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
613{ 633{
614 struct pt_regs *regs = task_pt_regs(current); 634 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
615 *regs = *eregs; 635 if (regs != eregs)
636 *regs = *eregs;
616 return regs; 637 return regs;
617} 638}
618NOKPROBE_SYMBOL(sync_regs); 639NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
628 /* 649 /*
629 * This is called from entry_64.S early in handling a fault 650 * This is called from entry_64.S early in handling a fault
630 * caused by a bad iret to user mode. To handle the fault 651 * caused by a bad iret to user mode. To handle the fault
631 * correctly, we want move our stack frame to task_pt_regs 652 * correctly, we want to move our stack frame to where it would
632 * and we want to pretend that the exception came from the 653 * be had we entered directly on the entry stack (rather than
633 * iret target. 654 * just below the IRET frame) and we want to pretend that the
655 * exception came from the IRET target.
634 */ 656 */
635 struct bad_iret_stack *new_stack = 657 struct bad_iret_stack *new_stack =
636 container_of(task_pt_regs(current), 658 (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
637 struct bad_iret_stack, regs);
638 659
639 /* Copy the IRET target to the new stack. */ 660 /* Copy the IRET target to the new stack. */
640 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); 661 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
795 debug_stack_usage_dec(); 816 debug_stack_usage_dec();
796 817
797exit: 818exit:
798#if defined(CONFIG_X86_32)
799 /*
800 * This is the most likely code path that involves non-trivial use
801 * of the SYSENTER stack. Check that we haven't overrun it.
802 */
803 WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
804 "Overran or corrupted SYSENTER stack\n");
805#endif
806 ist_exit(regs); 819 ist_exit(regs);
807} 820}
808NOKPROBE_SYMBOL(do_debug); 821NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +942,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
929 942
930void __init trap_init(void) 943void __init trap_init(void)
931{ 944{
945 /* Init cpu_entry_area before IST entries are set up */
946 setup_cpu_entry_areas();
947
932 idt_setup_traps(); 948 idt_setup_traps();
933 949
934 /* 950 /*
@@ -936,8 +952,9 @@ void __init trap_init(void)
936 * "sidt" instruction will not leak the location of the kernel, and 952 * "sidt" instruction will not leak the location of the kernel, and
937 * to defend the IDT against arbitrary memory write vulnerabilities. 953 * to defend the IDT against arbitrary memory write vulnerabilities.
938 * It will be reloaded in cpu_init() */ 954 * It will be reloaded in cpu_init() */
939 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 955 cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
940 idt_descr.address = fix_to_virt(FIX_RO_IDT); 956 PAGE_KERNEL_RO);
957 idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
941 958
942 /* 959 /*
943 * Should be a barrier for any external CPU state: 960 * Should be a barrier for any external CPU state:
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8ea117f8142e..e169e85db434 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
602 case INTEL_FAM6_KABYLAKE_DESKTOP: 602 case INTEL_FAM6_KABYLAKE_DESKTOP:
603 crystal_khz = 24000; /* 24.0 MHz */ 603 crystal_khz = 24000; /* 24.0 MHz */
604 break; 604 break;
605 case INTEL_FAM6_SKYLAKE_X:
606 case INTEL_FAM6_ATOM_DENVERTON: 605 case INTEL_FAM6_ATOM_DENVERTON:
607 crystal_khz = 25000; /* 25.0 MHz */ 606 crystal_khz = 25000; /* 25.0 MHz */
608 break; 607 break;
@@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void)
612 } 611 }
613 } 612 }
614 613
614 if (crystal_khz == 0)
615 return 0;
615 /* 616 /*
616 * TSC frequency determined by CPUID is a "hardware reported" 617 * TSC frequency determined by CPUID is a "hardware reported"
617 * frequency and is the most accurate one so far we have. This 618 * frequency and is the most accurate one so far we have. This
@@ -1315,6 +1316,12 @@ void __init tsc_init(void)
1315 (unsigned long)cpu_khz / 1000, 1316 (unsigned long)cpu_khz / 1000,
1316 (unsigned long)cpu_khz % 1000); 1317 (unsigned long)cpu_khz % 1000);
1317 1318
1319 if (cpu_khz != tsc_khz) {
1320 pr_info("Detected %lu.%03lu MHz TSC",
1321 (unsigned long)tsc_khz / 1000,
1322 (unsigned long)tsc_khz % 1000);
1323 }
1324
1318 /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1325 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1319 tsc_store_and_check_tsc_adjust(true); 1326 tsc_store_and_check_tsc_adjust(true);
1320 1327
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index a3f973b2c97a..1f9188f5357c 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
74} 74}
75#endif 75#endif
76 76
77#ifdef CONFIG_DYNAMIC_FTRACE
78static struct orc_entry *orc_find(unsigned long ip);
79
80/*
81 * Ftrace dynamic trampolines do not have orc entries of their own.
82 * But they are copies of the ftrace entries that are static and
83 * defined in ftrace_*.S, which do have orc entries.
84 *
85 * If the undwinder comes across a ftrace trampoline, then find the
86 * ftrace function that was used to create it, and use that ftrace
87 * function's orc entrie, as the placement of the return code in
88 * the stack will be identical.
89 */
90static struct orc_entry *orc_ftrace_find(unsigned long ip)
91{
92 struct ftrace_ops *ops;
93 unsigned long caller;
94
95 ops = ftrace_ops_trampoline(ip);
96 if (!ops)
97 return NULL;
98
99 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
100 caller = (unsigned long)ftrace_regs_call;
101 else
102 caller = (unsigned long)ftrace_call;
103
104 /* Prevent unlikely recursion */
105 if (ip == caller)
106 return NULL;
107
108 return orc_find(caller);
109}
110#else
111static struct orc_entry *orc_ftrace_find(unsigned long ip)
112{
113 return NULL;
114}
115#endif
116
77static struct orc_entry *orc_find(unsigned long ip) 117static struct orc_entry *orc_find(unsigned long ip)
78{ 118{
119 static struct orc_entry *orc;
120
79 if (!orc_init) 121 if (!orc_init)
80 return NULL; 122 return NULL;
81 123
@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
111 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); 153 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
112 154
113 /* Module lookup: */ 155 /* Module lookup: */
114 return orc_module_find(ip); 156 orc = orc_module_find(ip);
157 if (orc)
158 return orc;
159
160 return orc_ftrace_find(ip);
115} 161}
116 162
117static void orc_sort_swap(void *_a, void *_b, int size) 163static void orc_sort_swap(void *_a, void *_b, int size)
@@ -253,22 +299,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
253 return NULL; 299 return NULL;
254} 300}
255 301
256static bool stack_access_ok(struct unwind_state *state, unsigned long addr, 302static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
257 size_t len) 303 size_t len)
258{ 304{
259 struct stack_info *info = &state->stack_info; 305 struct stack_info *info = &state->stack_info;
306 void *addr = (void *)_addr;
260 307
261 /* 308 if (!on_stack(info, addr, len) &&
262 * If the address isn't on the current stack, switch to the next one. 309 (get_stack_info(addr, state->task, info, &state->stack_mask)))
263 * 310 return false;
264 * We may have to traverse multiple stacks to deal with the possibility
265 * that info->next_sp could point to an empty stack and the address
266 * could be on a subsequent stack.
267 */
268 while (!on_stack(info, (void *)addr, len))
269 if (get_stack_info(info->next_sp, state->task, info,
270 &state->stack_mask))
271 return false;
272 311
273 return true; 312 return true;
274} 313}
@@ -283,42 +322,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
283 return true; 322 return true;
284} 323}
285 324
286#define REGS_SIZE (sizeof(struct pt_regs))
287#define SP_OFFSET (offsetof(struct pt_regs, sp))
288#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
289#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
290
291static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, 325static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
292 unsigned long *ip, unsigned long *sp, bool full) 326 unsigned long *ip, unsigned long *sp)
293{ 327{
294 size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE; 328 struct pt_regs *regs = (struct pt_regs *)addr;
295 size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
296 struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
297
298 if (IS_ENABLED(CONFIG_X86_64)) {
299 if (!stack_access_ok(state, addr, regs_size))
300 return false;
301 329
302 *ip = regs->ip; 330 /* x86-32 support will be more complicated due to the &regs->sp hack */
303 *sp = regs->sp; 331 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
304 332
305 return true; 333 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
306 }
307
308 if (!stack_access_ok(state, addr, sp_offset))
309 return false; 334 return false;
310 335
311 *ip = regs->ip; 336 *ip = regs->ip;
337 *sp = regs->sp;
338 return true;
339}
312 340
313 if (user_mode(regs)) { 341static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
314 if (!stack_access_ok(state, addr + sp_offset, 342 unsigned long *ip, unsigned long *sp)
315 REGS_SIZE - SP_OFFSET)) 343{
316 return false; 344 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
317 345
318 *sp = regs->sp; 346 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
319 } else 347 return false;
320 *sp = (unsigned long)&regs->sp;
321 348
349 *ip = regs->ip;
350 *sp = regs->sp;
322 return true; 351 return true;
323} 352}
324 353
@@ -327,7 +356,6 @@ bool unwind_next_frame(struct unwind_state *state)
327 unsigned long ip_p, sp, orig_ip, prev_sp = state->sp; 356 unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
328 enum stack_type prev_type = state->stack_info.type; 357 enum stack_type prev_type = state->stack_info.type;
329 struct orc_entry *orc; 358 struct orc_entry *orc;
330 struct pt_regs *ptregs;
331 bool indirect = false; 359 bool indirect = false;
332 360
333 if (unwind_done(state)) 361 if (unwind_done(state))
@@ -435,7 +463,7 @@ bool unwind_next_frame(struct unwind_state *state)
435 break; 463 break;
436 464
437 case ORC_TYPE_REGS: 465 case ORC_TYPE_REGS:
438 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { 466 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
439 orc_warn("can't dereference registers at %p for ip %pB\n", 467 orc_warn("can't dereference registers at %p for ip %pB\n",
440 (void *)sp, (void *)orig_ip); 468 (void *)sp, (void *)orig_ip);
441 goto done; 469 goto done;
@@ -447,20 +475,14 @@ bool unwind_next_frame(struct unwind_state *state)
447 break; 475 break;
448 476
449 case ORC_TYPE_REGS_IRET: 477 case ORC_TYPE_REGS_IRET:
450 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { 478 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
451 orc_warn("can't dereference iret registers at %p for ip %pB\n", 479 orc_warn("can't dereference iret registers at %p for ip %pB\n",
452 (void *)sp, (void *)orig_ip); 480 (void *)sp, (void *)orig_ip);
453 goto done; 481 goto done;
454 } 482 }
455 483
456 ptregs = container_of((void *)sp, struct pt_regs, ip); 484 state->regs = (void *)sp - IRET_FRAME_OFFSET;
457 if ((unsigned long)ptregs >= prev_sp && 485 state->full_regs = false;
458 on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
459 state->regs = ptregs;
460 state->full_regs = false;
461 } else
462 state->regs = NULL;
463
464 state->signal = true; 486 state->signal = true;
465 break; 487 break;
466 488
@@ -553,8 +575,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
553 } 575 }
554 576
555 if (get_stack_info((unsigned long *)state->sp, state->task, 577 if (get_stack_info((unsigned long *)state->sp, state->task,
556 &state->stack_info, &state->stack_mask)) 578 &state->stack_info, &state->stack_mask)) {
557 return; 579 /*
580 * We weren't on a valid stack. It's possible that
581 * we overflowed a valid stack into a guard page.
582 * See if the next page up is valid so that we can
583 * generate some kind of backtrace if this happens.
584 */
585 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
586 if (get_stack_info(next_page, state->task, &state->stack_info,
587 &state->stack_mask))
588 return;
589 }
558 590
559 /* 591 /*
560 * The caller can provide the address of the first frame directly 592 * The caller can provide the address of the first frame directly
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a4009fb9be87..9b138a06c1a4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -61,11 +61,17 @@ jiffies_64 = jiffies;
61 . = ALIGN(HPAGE_SIZE); \ 61 . = ALIGN(HPAGE_SIZE); \
62 __end_rodata_hpage_align = .; 62 __end_rodata_hpage_align = .;
63 63
64#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
65#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
66
64#else 67#else
65 68
66#define X64_ALIGN_RODATA_BEGIN 69#define X64_ALIGN_RODATA_BEGIN
67#define X64_ALIGN_RODATA_END 70#define X64_ALIGN_RODATA_END
68 71
72#define ALIGN_ENTRY_TEXT_BEGIN
73#define ALIGN_ENTRY_TEXT_END
74
69#endif 75#endif
70 76
71PHDRS { 77PHDRS {
@@ -102,11 +108,28 @@ SECTIONS
102 CPUIDLE_TEXT 108 CPUIDLE_TEXT
103 LOCK_TEXT 109 LOCK_TEXT
104 KPROBES_TEXT 110 KPROBES_TEXT
111 ALIGN_ENTRY_TEXT_BEGIN
105 ENTRY_TEXT 112 ENTRY_TEXT
106 IRQENTRY_TEXT 113 IRQENTRY_TEXT
114 ALIGN_ENTRY_TEXT_END
107 SOFTIRQENTRY_TEXT 115 SOFTIRQENTRY_TEXT
108 *(.fixup) 116 *(.fixup)
109 *(.gnu.warning) 117 *(.gnu.warning)
118
119#ifdef CONFIG_X86_64
120 . = ALIGN(PAGE_SIZE);
121 _entry_trampoline = .;
122 *(.entry_trampoline)
123 . = ALIGN(PAGE_SIZE);
124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
125#endif
126
127#ifdef CONFIG_RETPOLINE
128 __indirect_thunk_start = .;
129 *(.text.__x86.indirect_thunk)
130 __indirect_thunk_end = .;
131#endif
132
110 /* End of text section */ 133 /* End of text section */
111 _etext = .; 134 _etext = .;
112 } :text = 0x9090 135 } :text = 0x9090
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e7d04d0c8008..b514b2b2845a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1046,7 +1046,6 @@ static void fetch_register_operand(struct operand *op)
1046 1046
1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048{ 1048{
1049 ctxt->ops->get_fpu(ctxt);
1050 switch (reg) { 1049 switch (reg) {
1051 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1068,13 +1067,11 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1068#endif 1067#endif
1069 default: BUG(); 1068 default: BUG();
1070 } 1069 }
1071 ctxt->ops->put_fpu(ctxt);
1072} 1070}
1073 1071
1074static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1072static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 int reg) 1073 int reg)
1076{ 1074{
1077 ctxt->ops->get_fpu(ctxt);
1078 switch (reg) { 1075 switch (reg) {
1079 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1076 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1080 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 1077 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1096,12 +1093,10 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1096#endif 1093#endif
1097 default: BUG(); 1094 default: BUG();
1098 } 1095 }
1099 ctxt->ops->put_fpu(ctxt);
1100} 1096}
1101 1097
1102static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1098static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1103{ 1099{
1104 ctxt->ops->get_fpu(ctxt);
1105 switch (reg) { 1100 switch (reg) {
1106 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1107 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1113,12 +1108,10 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1113 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 default: BUG(); 1109 default: BUG();
1115 } 1110 }
1116 ctxt->ops->put_fpu(ctxt);
1117} 1111}
1118 1112
1119static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1113static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1120{ 1114{
1121 ctxt->ops->get_fpu(ctxt);
1122 switch (reg) { 1115 switch (reg) {
1123 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1116 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1124 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 1117 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1130,7 +1123,6 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1123 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 default: BUG(); 1124 default: BUG();
1132 } 1125 }
1133 ctxt->ops->put_fpu(ctxt);
1134} 1126}
1135 1127
1136static int em_fninit(struct x86_emulate_ctxt *ctxt) 1128static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1138,9 +1130,7 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
1138 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1139 return emulate_nm(ctxt); 1131 return emulate_nm(ctxt);
1140 1132
1141 ctxt->ops->get_fpu(ctxt);
1142 asm volatile("fninit"); 1133 asm volatile("fninit");
1143 ctxt->ops->put_fpu(ctxt);
1144 return X86EMUL_CONTINUE; 1134 return X86EMUL_CONTINUE;
1145} 1135}
1146 1136
@@ -1151,9 +1141,7 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1151 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1141 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1152 return emulate_nm(ctxt); 1142 return emulate_nm(ctxt);
1153 1143
1154 ctxt->ops->get_fpu(ctxt);
1155 asm volatile("fnstcw %0": "+m"(fcw)); 1144 asm volatile("fnstcw %0": "+m"(fcw));
1156 ctxt->ops->put_fpu(ctxt);
1157 1145
1158 ctxt->dst.val = fcw; 1146 ctxt->dst.val = fcw;
1159 1147
@@ -1167,9 +1155,7 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1155 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1168 return emulate_nm(ctxt); 1156 return emulate_nm(ctxt);
1169 1157
1170 ctxt->ops->get_fpu(ctxt);
1171 asm volatile("fnstsw %0": "+m"(fsw)); 1158 asm volatile("fnstsw %0": "+m"(fsw));
1172 ctxt->ops->put_fpu(ctxt);
1173 1159
1174 ctxt->dst.val = fsw; 1160 ctxt->dst.val = fsw;
1175 1161
@@ -2404,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2404} 2390}
2405 2391
2406static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, 2392static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2407 u64 cr0, u64 cr4) 2393 u64 cr0, u64 cr3, u64 cr4)
2408{ 2394{
2409 int bad; 2395 int bad;
2396 u64 pcid;
2397
2398 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2399 pcid = 0;
2400 if (cr4 & X86_CR4_PCIDE) {
2401 pcid = cr3 & 0xfff;
2402 cr3 &= ~0xfff;
2403 }
2404
2405 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2406 if (bad)
2407 return X86EMUL_UNHANDLEABLE;
2410 2408
2411 /* 2409 /*
2412 * First enable PAE, long mode needs it before CR0.PG = 1 is set. 2410 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2425,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2425 bad = ctxt->ops->set_cr(ctxt, 4, cr4); 2423 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2426 if (bad) 2424 if (bad)
2427 return X86EMUL_UNHANDLEABLE; 2425 return X86EMUL_UNHANDLEABLE;
2426 if (pcid) {
2427 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2428 if (bad)
2429 return X86EMUL_UNHANDLEABLE;
2430 }
2431
2428 } 2432 }
2429 2433
2430 return X86EMUL_CONTINUE; 2434 return X86EMUL_CONTINUE;
@@ -2435,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2435 struct desc_struct desc; 2439 struct desc_struct desc;
2436 struct desc_ptr dt; 2440 struct desc_ptr dt;
2437 u16 selector; 2441 u16 selector;
2438 u32 val, cr0, cr4; 2442 u32 val, cr0, cr3, cr4;
2439 int i; 2443 int i;
2440 2444
2441 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); 2445 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2442 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); 2446 cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
2443 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; 2447 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2444 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); 2448 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2445 2449
@@ -2481,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2481 2485
2482 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); 2486 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2483 2487
2484 return rsm_enter_protected_mode(ctxt, cr0, cr4); 2488 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2485} 2489}
2486 2490
2487static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) 2491static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2488{ 2492{
2489 struct desc_struct desc; 2493 struct desc_struct desc;
2490 struct desc_ptr dt; 2494 struct desc_ptr dt;
2491 u64 val, cr0, cr4; 2495 u64 val, cr0, cr3, cr4;
2492 u32 base3; 2496 u32 base3;
2493 u16 selector; 2497 u16 selector;
2494 int i, r; 2498 int i, r;
@@ -2505,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2505 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); 2509 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2506 2510
2507 cr0 = GET_SMSTATE(u64, smbase, 0x7f58); 2511 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2508 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); 2512 cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
2509 cr4 = GET_SMSTATE(u64, smbase, 0x7f48); 2513 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2510 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); 2514 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2511 val = GET_SMSTATE(u64, smbase, 0x7ed0); 2515 val = GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2533,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2533 dt.address = GET_SMSTATE(u64, smbase, 0x7e68); 2537 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2534 ctxt->ops->set_gdt(ctxt, &dt); 2538 ctxt->ops->set_gdt(ctxt, &dt);
2535 2539
2536 r = rsm_enter_protected_mode(ctxt, cr0, cr4); 2540 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2537 if (r != X86EMUL_CONTINUE) 2541 if (r != X86EMUL_CONTINUE)
2538 return r; 2542 return r;
2539 2543
@@ -4001,12 +4005,8 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4001 if (rc != X86EMUL_CONTINUE) 4005 if (rc != X86EMUL_CONTINUE)
4002 return rc; 4006 return rc;
4003 4007
4004 ctxt->ops->get_fpu(ctxt);
4005
4006 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 4008 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4007 4009
4008 ctxt->ops->put_fpu(ctxt);
4009
4010 if (rc != X86EMUL_CONTINUE) 4010 if (rc != X86EMUL_CONTINUE)
4011 return rc; 4011 return rc;
4012 4012
@@ -4049,8 +4049,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4049 if (rc != X86EMUL_CONTINUE) 4049 if (rc != X86EMUL_CONTINUE)
4050 return rc; 4050 return rc;
4051 4051
4052 ctxt->ops->get_fpu(ctxt);
4053
4054 if (size < __fxstate_size(16)) { 4052 if (size < __fxstate_size(16)) {
4055 rc = fxregs_fixup(&fx_state, size); 4053 rc = fxregs_fixup(&fx_state, size);
4056 if (rc != X86EMUL_CONTINUE) 4054 if (rc != X86EMUL_CONTINUE)
@@ -4066,8 +4064,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4066 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4064 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4067 4065
4068out: 4066out:
4069 ctxt->ops->put_fpu(ctxt);
4070
4071 return rc; 4067 return rc;
4072} 4068}
4073 4069
@@ -5317,9 +5313,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5317{ 5313{
5318 int rc; 5314 int rc;
5319 5315
5320 ctxt->ops->get_fpu(ctxt);
5321 rc = asm_safe("fwait"); 5316 rc = asm_safe("fwait");
5322 ctxt->ops->put_fpu(ctxt);
5323 5317
5324 if (unlikely(rc != X86EMUL_CONTINUE)) 5318 if (unlikely(rc != X86EMUL_CONTINUE))
5325 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5319 return emulate_exception(ctxt, MF_VECTOR, 0, false);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e5e66e5c6640..2b8eb4da4d08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3395 spin_lock(&vcpu->kvm->mmu_lock); 3395 spin_lock(&vcpu->kvm->mmu_lock);
3396 if(make_mmu_pages_available(vcpu) < 0) { 3396 if(make_mmu_pages_available(vcpu) < 0) {
3397 spin_unlock(&vcpu->kvm->mmu_lock); 3397 spin_unlock(&vcpu->kvm->mmu_lock);
3398 return 1; 3398 return -ENOSPC;
3399 } 3399 }
3400 sp = kvm_mmu_get_page(vcpu, 0, 0, 3400 sp = kvm_mmu_get_page(vcpu, 0, 0,
3401 vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); 3401 vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3410 spin_lock(&vcpu->kvm->mmu_lock); 3410 spin_lock(&vcpu->kvm->mmu_lock);
3411 if (make_mmu_pages_available(vcpu) < 0) { 3411 if (make_mmu_pages_available(vcpu) < 0) {
3412 spin_unlock(&vcpu->kvm->mmu_lock); 3412 spin_unlock(&vcpu->kvm->mmu_lock);
3413 return 1; 3413 return -ENOSPC;
3414 } 3414 }
3415 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), 3415 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3416 i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); 3416 i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3450 spin_lock(&vcpu->kvm->mmu_lock); 3450 spin_lock(&vcpu->kvm->mmu_lock);
3451 if (make_mmu_pages_available(vcpu) < 0) { 3451 if (make_mmu_pages_available(vcpu) < 0) {
3452 spin_unlock(&vcpu->kvm->mmu_lock); 3452 spin_unlock(&vcpu->kvm->mmu_lock);
3453 return 1; 3453 return -ENOSPC;
3454 } 3454 }
3455 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 3455 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
3456 vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); 3456 vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3487 spin_lock(&vcpu->kvm->mmu_lock); 3487 spin_lock(&vcpu->kvm->mmu_lock);
3488 if (make_mmu_pages_available(vcpu) < 0) { 3488 if (make_mmu_pages_available(vcpu) < 0) {
3489 spin_unlock(&vcpu->kvm->mmu_lock); 3489 spin_unlock(&vcpu->kvm->mmu_lock);
3490 return 1; 3490 return -ENOSPC;
3491 } 3491 }
3492 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 3492 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
3493 0, ACC_ALL); 3493 0, ACC_ALL);
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3782{ 3782{
3783 if (unlikely(!lapic_in_kernel(vcpu) || 3783 if (unlikely(!lapic_in_kernel(vcpu) ||
3784 kvm_event_needs_reinjection(vcpu))) 3784 kvm_event_needs_reinjection(vcpu) ||
3785 vcpu->arch.exception.pending))
3785 return false; 3786 return false;
3786 3787
3787 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) 3788 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
5465 5466
5466int kvm_mmu_module_init(void) 5467int kvm_mmu_module_init(void)
5467{ 5468{
5469 int ret = -ENOMEM;
5470
5468 kvm_mmu_clear_all_pte_masks(); 5471 kvm_mmu_clear_all_pte_masks();
5469 5472
5470 pte_list_desc_cache = kmem_cache_create("pte_list_desc", 5473 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5471 sizeof(struct pte_list_desc), 5474 sizeof(struct pte_list_desc),
5472 0, SLAB_ACCOUNT, NULL); 5475 0, SLAB_ACCOUNT, NULL);
5473 if (!pte_list_desc_cache) 5476 if (!pte_list_desc_cache)
5474 goto nomem; 5477 goto out;
5475 5478
5476 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 5479 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5477 sizeof(struct kvm_mmu_page), 5480 sizeof(struct kvm_mmu_page),
5478 0, SLAB_ACCOUNT, NULL); 5481 0, SLAB_ACCOUNT, NULL);
5479 if (!mmu_page_header_cache) 5482 if (!mmu_page_header_cache)
5480 goto nomem; 5483 goto out;
5481 5484
5482 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) 5485 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5483 goto nomem; 5486 goto out;
5484 5487
5485 register_shrinker(&mmu_shrinker); 5488 ret = register_shrinker(&mmu_shrinker);
5489 if (ret)
5490 goto out;
5486 5491
5487 return 0; 5492 return 0;
5488 5493
5489nomem: 5494out:
5490 mmu_destroy_caches(); 5495 mmu_destroy_caches();
5491 return -ENOMEM; 5496 return ret;
5492} 5497}
5493 5498
5494/* 5499/*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb714f1cdf7e..f40d0da1f1d3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
45#include <asm/debugreg.h> 45#include <asm/debugreg.h>
46#include <asm/kvm_para.h> 46#include <asm/kvm_para.h>
47#include <asm/irq_remapping.h> 47#include <asm/irq_remapping.h>
48#include <asm/nospec-branch.h>
48 49
49#include <asm/virtext.h> 50#include <asm/virtext.h>
50#include "trace.h" 51#include "trace.h"
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
361{ 362{
362 struct vmcb_control_area *c, *h; 363 struct vmcb_control_area *c, *h;
363 struct nested_state *g; 364 struct nested_state *g;
364 u32 h_intercept_exceptions;
365 365
366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
367 367
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
372 h = &svm->nested.hsave->control; 372 h = &svm->nested.hsave->control;
373 g = &svm->nested; 373 g = &svm->nested;
374 374
375 /* No need to intercept #UD if L1 doesn't intercept it */
376 h_intercept_exceptions =
377 h->intercept_exceptions & ~(1U << UD_VECTOR);
378
379 c->intercept_cr = h->intercept_cr | g->intercept_cr; 375 c->intercept_cr = h->intercept_cr | g->intercept_cr;
380 c->intercept_dr = h->intercept_dr | g->intercept_dr; 376 c->intercept_dr = h->intercept_dr | g->intercept_dr;
381 c->intercept_exceptions = 377 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
382 h_intercept_exceptions | g->intercept_exceptions;
383 c->intercept = h->intercept | g->intercept; 378 c->intercept = h->intercept | g->intercept;
384} 379}
385 380
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm)
2202{ 2197{
2203 int er; 2198 int er;
2204 2199
2205 WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
2206 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); 2200 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
2207 if (er == EMULATE_USER_EXIT) 2201 if (er == EMULATE_USER_EXIT)
2208 return 0; 2202 return 0;
@@ -4986,6 +4980,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
4986 "mov %%r14, %c[r14](%[svm]) \n\t" 4980 "mov %%r14, %c[r14](%[svm]) \n\t"
4987 "mov %%r15, %c[r15](%[svm]) \n\t" 4981 "mov %%r15, %c[r15](%[svm]) \n\t"
4988#endif 4982#endif
4983 /*
4984 * Clear host registers marked as clobbered to prevent
4985 * speculative use.
4986 */
4987 "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
4988 "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
4989 "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
4990 "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
4991 "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
4992#ifdef CONFIG_X86_64
4993 "xor %%r8, %%r8 \n\t"
4994 "xor %%r9, %%r9 \n\t"
4995 "xor %%r10, %%r10 \n\t"
4996 "xor %%r11, %%r11 \n\t"
4997 "xor %%r12, %%r12 \n\t"
4998 "xor %%r13, %%r13 \n\t"
4999 "xor %%r14, %%r14 \n\t"
5000 "xor %%r15, %%r15 \n\t"
5001#endif
4989 "pop %%" _ASM_BP 5002 "pop %%" _ASM_BP
4990 : 5003 :
4991 : [svm]"a"(svm), 5004 : [svm]"a"(svm),
@@ -5015,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5015#endif 5028#endif
5016 ); 5029 );
5017 5030
5031 /* Eliminate branch target predictions from guest mode */
5032 vmexit_fill_RSB();
5033
5018#ifdef CONFIG_X86_64 5034#ifdef CONFIG_X86_64
5019 wrmsrl(MSR_GS_BASE, svm->host.gs_base); 5035 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5020#else 5036#else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4704aaf6d19e..c829d89e2e63 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
50#include <asm/apic.h> 50#include <asm/apic.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
53#include <asm/nospec-branch.h>
53 54
54#include "trace.h" 55#include "trace.h"
55#include "pmu.h" 56#include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
899{ 900{
900 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); 901 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
901 902
902 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || 903 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
903 vmcs_field_to_offset_table[field] == 0) 904 return -ENOENT;
905
906 /*
907 * FIXME: Mitigation for CVE-2017-5753. To be replaced with a
908 * generic mechanism.
909 */
910 asm("lfence");
911
912 if (vmcs_field_to_offset_table[field] == 0)
904 return -ENOENT; 913 return -ENOENT;
905 914
906 return vmcs_field_to_offset_table[field]; 915 return vmcs_field_to_offset_table[field];
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1887{ 1896{
1888 u32 eb; 1897 u32 eb;
1889 1898
1890 eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | 1899 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1891 (1u << DB_VECTOR) | (1u << AC_VECTOR); 1900 (1u << DB_VECTOR) | (1u << AC_VECTOR);
1892 if ((vcpu->guest_debug & 1901 if ((vcpu->guest_debug &
1893 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 1902 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1905 */ 1914 */
1906 if (is_guest_mode(vcpu)) 1915 if (is_guest_mode(vcpu))
1907 eb |= get_vmcs12(vcpu)->exception_bitmap; 1916 eb |= get_vmcs12(vcpu)->exception_bitmap;
1908 else
1909 eb |= 1u << UD_VECTOR;
1910 1917
1911 vmcs_write32(EXCEPTION_BITMAP, eb); 1918 vmcs_write32(EXCEPTION_BITMAP, eb);
1912} 1919}
@@ -2302,7 +2309,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2302 * processors. See 22.2.4. 2309 * processors. See 22.2.4.
2303 */ 2310 */
2304 vmcs_writel(HOST_TR_BASE, 2311 vmcs_writel(HOST_TR_BASE,
2305 (unsigned long)this_cpu_ptr(&cpu_tss)); 2312 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2306 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 2313 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
2307 2314
2308 /* 2315 /*
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
5917 return 1; /* already handled by vmx_vcpu_run() */ 5924 return 1; /* already handled by vmx_vcpu_run() */
5918 5925
5919 if (is_invalid_opcode(intr_info)) { 5926 if (is_invalid_opcode(intr_info)) {
5920 WARN_ON_ONCE(is_guest_mode(vcpu));
5921 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); 5927 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
5922 if (er == EMULATE_USER_EXIT) 5928 if (er == EMULATE_USER_EXIT)
5923 return 0; 5929 return 0;
@@ -6751,16 +6757,10 @@ static __init int hardware_setup(void)
6751 goto out; 6757 goto out;
6752 } 6758 }
6753 6759
6754 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
6755 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 6760 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
6756 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 6761 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
6757 6762
6758 /*
6759 * Allow direct access to the PC debug port (it is often used for I/O
6760 * delays, but the vmexits simply slow things down).
6761 */
6762 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); 6763 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
6763 clear_bit(0x80, vmx_io_bitmap_a);
6764 6764
6765 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 6765 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
6766 6766
@@ -9421,6 +9421,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9421 /* Save guest registers, load host registers, keep flags */ 9421 /* Save guest registers, load host registers, keep flags */
9422 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" 9422 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
9423 "pop %0 \n\t" 9423 "pop %0 \n\t"
9424 "setbe %c[fail](%0)\n\t"
9424 "mov %%" _ASM_AX ", %c[rax](%0) \n\t" 9425 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
9425 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" 9426 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
9426 __ASM_SIZE(pop) " %c[rcx](%0) \n\t" 9427 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9437,12 +9438,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9437 "mov %%r13, %c[r13](%0) \n\t" 9438 "mov %%r13, %c[r13](%0) \n\t"
9438 "mov %%r14, %c[r14](%0) \n\t" 9439 "mov %%r14, %c[r14](%0) \n\t"
9439 "mov %%r15, %c[r15](%0) \n\t" 9440 "mov %%r15, %c[r15](%0) \n\t"
9441 "xor %%r8d, %%r8d \n\t"
9442 "xor %%r9d, %%r9d \n\t"
9443 "xor %%r10d, %%r10d \n\t"
9444 "xor %%r11d, %%r11d \n\t"
9445 "xor %%r12d, %%r12d \n\t"
9446 "xor %%r13d, %%r13d \n\t"
9447 "xor %%r14d, %%r14d \n\t"
9448 "xor %%r15d, %%r15d \n\t"
9440#endif 9449#endif
9441 "mov %%cr2, %%" _ASM_AX " \n\t" 9450 "mov %%cr2, %%" _ASM_AX " \n\t"
9442 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" 9451 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
9443 9452
9453 "xor %%eax, %%eax \n\t"
9454 "xor %%ebx, %%ebx \n\t"
9455 "xor %%esi, %%esi \n\t"
9456 "xor %%edi, %%edi \n\t"
9444 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" 9457 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
9445 "setbe %c[fail](%0) \n\t"
9446 ".pushsection .rodata \n\t" 9458 ".pushsection .rodata \n\t"
9447 ".global vmx_return \n\t" 9459 ".global vmx_return \n\t"
9448 "vmx_return: " _ASM_PTR " 2b \n\t" 9460 "vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9479,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9479#endif 9491#endif
9480 ); 9492 );
9481 9493
9494 /* Eliminate branch target predictions from guest mode */
9495 vmexit_fill_RSB();
9496
9482 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 9497 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
9483 if (debugctlmsr) 9498 if (debugctlmsr)
9484 update_debugctlmsr(debugctlmsr); 9499 update_debugctlmsr(debugctlmsr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eee8e7faf1af..c53298dfbf50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2937,7 +2937,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2937 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2937 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2938 pagefault_enable(); 2938 pagefault_enable();
2939 kvm_x86_ops->vcpu_put(vcpu); 2939 kvm_x86_ops->vcpu_put(vcpu);
2940 kvm_put_guest_fpu(vcpu);
2941 vcpu->arch.last_host_tsc = rdtsc(); 2940 vcpu->arch.last_host_tsc = rdtsc();
2942} 2941}
2943 2942
@@ -4385,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4385 addr, n, v)) 4384 addr, n, v))
4386 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 4385 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4387 break; 4386 break;
4388 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); 4387 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
4389 handled += n; 4388 handled += n;
4390 addr += n; 4389 addr += n;
4391 len -= n; 4390 len -= n;
@@ -4644,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4644{ 4643{
4645 if (vcpu->mmio_read_completed) { 4644 if (vcpu->mmio_read_completed) {
4646 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 4645 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4647 vcpu->mmio_fragments[0].gpa, *(u64 *)val); 4646 vcpu->mmio_fragments[0].gpa, val);
4648 vcpu->mmio_read_completed = 0; 4647 vcpu->mmio_read_completed = 0;
4649 return 1; 4648 return 1;
4650 } 4649 }
@@ -4666,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4666 4665
4667static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 4666static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4668{ 4667{
4669 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); 4668 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
4670 return vcpu_mmio_write(vcpu, gpa, bytes, val); 4669 return vcpu_mmio_write(vcpu, gpa, bytes, val);
4671} 4670}
4672 4671
4673static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 4672static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4674 void *val, int bytes) 4673 void *val, int bytes)
4675{ 4674{
4676 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); 4675 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
4677 return X86EMUL_IO_NEEDED; 4676 return X86EMUL_IO_NEEDED;
4678} 4677}
4679 4678
@@ -5252,17 +5251,6 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt)
5252 emul_to_vcpu(ctxt)->arch.halt_request = 1; 5251 emul_to_vcpu(ctxt)->arch.halt_request = 1;
5253} 5252}
5254 5253
5255static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
5256{
5257 preempt_disable();
5258 kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5259}
5260
5261static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
5262{
5263 preempt_enable();
5264}
5265
5266static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 5254static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5267 struct x86_instruction_info *info, 5255 struct x86_instruction_info *info,
5268 enum x86_intercept_stage stage) 5256 enum x86_intercept_stage stage)
@@ -5340,8 +5328,6 @@ static const struct x86_emulate_ops emulate_ops = {
5340 .halt = emulator_halt, 5328 .halt = emulator_halt,
5341 .wbinvd = emulator_wbinvd, 5329 .wbinvd = emulator_wbinvd,
5342 .fix_hypercall = emulator_fix_hypercall, 5330 .fix_hypercall = emulator_fix_hypercall,
5343 .get_fpu = emulator_get_fpu,
5344 .put_fpu = emulator_put_fpu,
5345 .intercept = emulator_intercept, 5331 .intercept = emulator_intercept,
5346 .get_cpuid = emulator_get_cpuid, 5332 .get_cpuid = emulator_get_cpuid,
5347 .set_nmi_mask = emulator_set_nmi_mask, 5333 .set_nmi_mask = emulator_set_nmi_mask,
@@ -6778,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6778 kvm_x86_ops->tlb_flush(vcpu); 6764 kvm_x86_ops->tlb_flush(vcpu);
6779} 6765}
6780 6766
6767void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
6768 unsigned long start, unsigned long end)
6769{
6770 unsigned long apic_address;
6771
6772 /*
6773 * The physical address of apic access page is stored in the VMCS.
6774 * Update it when it becomes invalid.
6775 */
6776 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6777 if (start <= apic_address && apic_address < end)
6778 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6779}
6780
6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6782{ 6782{
6783 struct page *page = NULL; 6783 struct page *page = NULL;
@@ -6952,7 +6952,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6952 preempt_disable(); 6952 preempt_disable();
6953 6953
6954 kvm_x86_ops->prepare_guest_switch(vcpu); 6954 kvm_x86_ops->prepare_guest_switch(vcpu);
6955 kvm_load_guest_fpu(vcpu);
6956 6955
6957 /* 6956 /*
6958 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 6957 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
@@ -7265,13 +7264,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7265 7264
7266int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 7265int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7267{ 7266{
7268 struct fpu *fpu = &current->thread.fpu;
7269 int r; 7267 int r;
7270 7268
7271 fpu__initialize(fpu);
7272
7273 kvm_sigset_activate(vcpu); 7269 kvm_sigset_activate(vcpu);
7274 7270
7271 kvm_load_guest_fpu(vcpu);
7272
7275 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 7273 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
7276 if (kvm_run->immediate_exit) { 7274 if (kvm_run->immediate_exit) {
7277 r = -EINTR; 7275 r = -EINTR;
@@ -7312,6 +7310,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7312 r = vcpu_run(vcpu); 7310 r = vcpu_run(vcpu);
7313 7311
7314out: 7312out:
7313 kvm_put_guest_fpu(vcpu);
7315 post_kvm_run_save(vcpu); 7314 post_kvm_run_save(vcpu);
7316 kvm_sigset_deactivate(vcpu); 7315 kvm_sigset_deactivate(vcpu);
7317 7316
@@ -7381,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7381#endif 7380#endif
7382 7381
7383 kvm_rip_write(vcpu, regs->rip); 7382 kvm_rip_write(vcpu, regs->rip);
7384 kvm_set_rflags(vcpu, regs->rflags); 7383 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
7385 7384
7386 vcpu->arch.exception.pending = false; 7385 vcpu->arch.exception.pending = false;
7387 7386
@@ -7495,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
7495} 7494}
7496EXPORT_SYMBOL_GPL(kvm_task_switch); 7495EXPORT_SYMBOL_GPL(kvm_task_switch);
7497 7496
7497int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7498{
7499 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
7500 /*
7501 * When EFER.LME and CR0.PG are set, the processor is in
7502 * 64-bit mode (though maybe in a 32-bit code segment).
7503 * CR4.PAE and EFER.LMA must be set.
7504 */
7505 if (!(sregs->cr4 & X86_CR4_PAE)
7506 || !(sregs->efer & EFER_LMA))
7507 return -EINVAL;
7508 } else {
7509 /*
7510 * Not in 64-bit mode: EFER.LMA is clear and the code
7511 * segment cannot be 64-bit.
7512 */
7513 if (sregs->efer & EFER_LMA || sregs->cs.l)
7514 return -EINVAL;
7515 }
7516
7517 return 0;
7518}
7519
7498int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 7520int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
7499 struct kvm_sregs *sregs) 7521 struct kvm_sregs *sregs)
7500{ 7522{
@@ -7507,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
7507 (sregs->cr4 & X86_CR4_OSXSAVE)) 7529 (sregs->cr4 & X86_CR4_OSXSAVE))
7508 return -EINVAL; 7530 return -EINVAL;
7509 7531
7532 if (kvm_valid_sregs(vcpu, sregs))
7533 return -EINVAL;
7534
7510 apic_base_msr.data = sregs->apic_base; 7535 apic_base_msr.data = sregs->apic_base;
7511 apic_base_msr.host_initiated = true; 7536 apic_base_msr.host_initiated = true;
7512 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 7537 if (kvm_set_apic_base(vcpu, &apic_base_msr))
@@ -7704,32 +7729,25 @@ static void fx_init(struct kvm_vcpu *vcpu)
7704 vcpu->arch.cr0 |= X86_CR0_ET; 7729 vcpu->arch.cr0 |= X86_CR0_ET;
7705} 7730}
7706 7731
7732/* Swap (qemu) user FPU context for the guest FPU context. */
7707void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 7733void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7708{ 7734{
7709 if (vcpu->guest_fpu_loaded) 7735 preempt_disable();
7710 return; 7736 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
7711
7712 /*
7713 * Restore all possible states in the guest,
7714 * and assume host would use all available bits.
7715 * Guest xcr0 would be loaded later.
7716 */
7717 vcpu->guest_fpu_loaded = 1;
7718 __kernel_fpu_begin();
7719 /* PKRU is separately restored in kvm_x86_ops->run. */ 7737 /* PKRU is separately restored in kvm_x86_ops->run. */
7720 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, 7738 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
7721 ~XFEATURE_MASK_PKRU); 7739 ~XFEATURE_MASK_PKRU);
7740 preempt_enable();
7722 trace_kvm_fpu(1); 7741 trace_kvm_fpu(1);
7723} 7742}
7724 7743
7744/* When vcpu_run ends, restore user space FPU context. */
7725void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7745void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7726{ 7746{
7727 if (!vcpu->guest_fpu_loaded) 7747 preempt_disable();
7728 return;
7729
7730 vcpu->guest_fpu_loaded = 0;
7731 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); 7748 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7732 __kernel_fpu_end(); 7749 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
7750 preempt_enable();
7733 ++vcpu->stat.fpu_reload; 7751 ++vcpu->stat.fpu_reload;
7734 trace_kvm_fpu(0); 7752 trace_kvm_fpu(0);
7735} 7753}
@@ -7846,7 +7864,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7846 * To avoid have the INIT path from kvm_apic_has_events() that be 7864 * To avoid have the INIT path from kvm_apic_has_events() that be
7847 * called with loaded FPU and does not let userspace fix the state. 7865 * called with loaded FPU and does not let userspace fix the state.
7848 */ 7866 */
7849 kvm_put_guest_fpu(vcpu); 7867 if (init_event)
7868 kvm_put_guest_fpu(vcpu);
7850 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, 7869 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
7851 XFEATURE_MASK_BNDREGS); 7870 XFEATURE_MASK_BNDREGS);
7852 if (mpx_state_buffer) 7871 if (mpx_state_buffer)
@@ -7855,6 +7874,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7855 XFEATURE_MASK_BNDCSR); 7874 XFEATURE_MASK_BNDCSR);
7856 if (mpx_state_buffer) 7875 if (mpx_state_buffer)
7857 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); 7876 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
7877 if (init_event)
7878 kvm_load_guest_fpu(vcpu);
7858 } 7879 }
7859 7880
7860 if (!init_event) { 7881 if (!init_event) {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 7b181b61170e..f23934bbaf4e 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o 27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
29lib-$(CONFIG_RETPOLINE) += retpoline.o
29 30
30obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o 31obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
31 32
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb548b41..46e71a74e612 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
29#include <asm/errno.h> 29#include <asm/errno.h>
30#include <asm/asm.h> 30#include <asm/asm.h>
31#include <asm/export.h> 31#include <asm/export.h>
32 32#include <asm/nospec-branch.h>
33
33/* 34/*
34 * computes a partial checksum, e.g. for TCP/UDP fragments 35 * computes a partial checksum, e.g. for TCP/UDP fragments
35 */ 36 */
@@ -156,7 +157,7 @@ ENTRY(csum_partial)
156 negl %ebx 157 negl %ebx
157 lea 45f(%ebx,%ebx,2), %ebx 158 lea 45f(%ebx,%ebx,2), %ebx
158 testl %esi, %esi 159 testl %esi, %esi
159 jmp *%ebx 160 JMP_NOSPEC %ebx
160 161
161 # Handle 2-byte-aligned regions 162 # Handle 2-byte-aligned regions
16220: addw (%esi), %ax 16320: addw (%esi), %ax
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
439 andl $-32,%edx 440 andl $-32,%edx
440 lea 3f(%ebx,%ebx), %ebx 441 lea 3f(%ebx,%ebx), %ebx
441 testl %esi, %esi 442 testl %esi, %esi
442 jmp *%ebx 443 JMP_NOSPEC %ebx
4431: addl $64,%esi 4441: addl $64,%esi
444 addl $64,%edi 445 addl $64,%edi
445 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) 446 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 553f8fd23cc4..4846eff7e4c8 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
107 delay = min_t(u64, MWAITX_MAX_LOOPS, loops); 107 delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
108 108
109 /* 109 /*
110 * Use cpu_tss as a cacheline-aligned, seldomly 110 * Use cpu_tss_rw as a cacheline-aligned, seldomly
111 * accessed per-cpu variable as the monitor target. 111 * accessed per-cpu variable as the monitor target.
112 */ 112 */
113 __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); 113 __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
114 114
115 /* 115 /*
116 * AMD, like Intel, supports the EAX hint and EAX=0xf 116 * AMD, like Intel, supports the EAX hint and EAX=0xf
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 000000000000..c909961e678a
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,48 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/stringify.h>
4#include <linux/linkage.h>
5#include <asm/dwarf2.h>
6#include <asm/cpufeatures.h>
7#include <asm/alternative-asm.h>
8#include <asm/export.h>
9#include <asm/nospec-branch.h>
10
11.macro THUNK reg
12 .section .text.__x86.indirect_thunk
13
14ENTRY(__x86_indirect_thunk_\reg)
15 CFI_STARTPROC
16 JMP_NOSPEC %\reg
17 CFI_ENDPROC
18ENDPROC(__x86_indirect_thunk_\reg)
19.endm
20
21/*
22 * Despite being an assembler file we can't just use .irp here
23 * because __KSYM_DEPS__ only uses the C preprocessor and would
24 * only see one instance of "__x86_indirect_thunk_\reg" rather
25 * than one per register with the correct names. So we do it
26 * the simple and nasty way...
27 */
28#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
29#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
30#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
31
32GENERATE_THUNK(_ASM_AX)
33GENERATE_THUNK(_ASM_BX)
34GENERATE_THUNK(_ASM_CX)
35GENERATE_THUNK(_ASM_DX)
36GENERATE_THUNK(_ASM_SI)
37GENERATE_THUNK(_ASM_DI)
38GENERATE_THUNK(_ASM_BP)
39#ifdef CONFIG_64BIT
40GENERATE_THUNK(r8)
41GENERATE_THUNK(r9)
42GENERATE_THUNK(r10)
43GENERATE_THUNK(r11)
44GENERATE_THUNK(r12)
45GENERATE_THUNK(r13)
46GENERATE_THUNK(r14)
47GENERATE_THUNK(r15)
48#endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index c4d55919fac1..e0b85930dd77 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 8e13b8cc6bed..27e9e90a8d35 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg
10endif 10endif
11 11
12obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 12obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
13 pat.o pgtable.o physaddr.o setup_nx.o tlb.o 13 pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
14 14
15# Make sure __phys_addr has no stackprotector 15# Make sure __phys_addr has no stackprotector
16nostackp := $(call cc-option, -fno-stack-protector) 16nostackp := $(call cc-option, -fno-stack-protector)
@@ -41,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
41obj-$(CONFIG_ACPI_NUMA) += srat.o 41obj-$(CONFIG_ACPI_NUMA) += srat.o
42obj-$(CONFIG_NUMA_EMU) += numa_emulation.o 42obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
43 43
44obj-$(CONFIG_X86_INTEL_MPX) += mpx.o 44obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
45obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o 45obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
46obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o 46obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
47obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
47 48
48obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o 49obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o
49obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o 50obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
new file mode 100644
index 000000000000..b9283cc27622
--- /dev/null
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -0,0 +1,166 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/spinlock.h>
4#include <linux/percpu.h>
5
6#include <asm/cpu_entry_area.h>
7#include <asm/pgtable.h>
8#include <asm/fixmap.h>
9#include <asm/desc.h>
10
11static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
12
13#ifdef CONFIG_X86_64
14static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
16#endif
17
18struct cpu_entry_area *get_cpu_entry_area(int cpu)
19{
20 unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
21 BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
22
23 return (struct cpu_entry_area *) va;
24}
25EXPORT_SYMBOL(get_cpu_entry_area);
26
27void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
28{
29 unsigned long va = (unsigned long) cea_vaddr;
30
31 set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
32}
33
34static void __init
35cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
36{
37 for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
38 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
39}
40
41static void percpu_setup_debug_store(int cpu)
42{
43#ifdef CONFIG_CPU_SUP_INTEL
44 int npages;
45 void *cea;
46
47 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
48 return;
49
50 cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
51 npages = sizeof(struct debug_store) / PAGE_SIZE;
52 BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
53 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
54 PAGE_KERNEL);
55
56 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
57 /*
58 * Force the population of PMDs for not yet allocated per cpu
59 * memory like debug store buffers.
60 */
61 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
62 for (; npages; npages--, cea += PAGE_SIZE)
63 cea_set_pte(cea, 0, PAGE_NONE);
64#endif
65}
66
67/* Setup the fixmap mappings only once per-processor */
68static void __init setup_cpu_entry_area(int cpu)
69{
70#ifdef CONFIG_X86_64
71 extern char _entry_trampoline[];
72
73 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
74 pgprot_t gdt_prot = PAGE_KERNEL_RO;
75 pgprot_t tss_prot = PAGE_KERNEL_RO;
76#else
77 /*
78 * On native 32-bit systems, the GDT cannot be read-only because
79 * our double fault handler uses a task gate, and entering through
80 * a task gate needs to change an available TSS to busy. If the
81 * GDT is read-only, that will triple fault. The TSS cannot be
82 * read-only because the CPU writes to it on task switches.
83 *
84 * On Xen PV, the GDT must be read-only because the hypervisor
85 * requires it.
86 */
87 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
88 PAGE_KERNEL_RO : PAGE_KERNEL;
89 pgprot_t tss_prot = PAGE_KERNEL;
90#endif
91
92 cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
93 gdt_prot);
94
95 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
96 per_cpu_ptr(&entry_stack_storage, cpu), 1,
97 PAGE_KERNEL);
98
99 /*
100 * The Intel SDM says (Volume 3, 7.2.1):
101 *
102 * Avoid placing a page boundary in the part of the TSS that the
103 * processor reads during a task switch (the first 104 bytes). The
104 * processor may not correctly perform address translations if a
105 * boundary occurs in this area. During a task switch, the processor
106 * reads and writes into the first 104 bytes of each TSS (using
107 * contiguous physical addresses beginning with the physical address
108 * of the first byte of the TSS). So, after TSS access begins, if
109 * part of the 104 bytes is not physically contiguous, the processor
110 * will access incorrect information without generating a page-fault
111 * exception.
112 *
113 * There are also a lot of errata involving the TSS spanning a page
114 * boundary. Assert that we're not doing that.
115 */
116 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
117 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
118 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
119 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
120 &per_cpu(cpu_tss_rw, cpu),
121 sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
122
123#ifdef CONFIG_X86_32
124 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
125#endif
126
127#ifdef CONFIG_X86_64
128 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
129 BUILD_BUG_ON(sizeof(exception_stacks) !=
130 sizeof(((struct cpu_entry_area *)0)->exception_stacks));
131 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
132 &per_cpu(exception_stacks, cpu),
133 sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
134
135 cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
136 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
137#endif
138 percpu_setup_debug_store(cpu);
139}
140
141static __init void setup_cpu_entry_area_ptes(void)
142{
143#ifdef CONFIG_X86_32
144 unsigned long start, end;
145
146 BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
147 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
148
149 start = CPU_ENTRY_AREA_BASE;
150 end = start + CPU_ENTRY_AREA_MAP_SIZE;
151
152 /* Careful here: start + PMD_SIZE might wrap around */
153 for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
154 populate_extra_pte(start);
155#endif
156}
157
158void __init setup_cpu_entry_areas(void)
159{
160 unsigned int cpu;
161
162 setup_cpu_entry_area_ptes();
163
164 for_each_possible_cpu(cpu)
165 setup_cpu_entry_area(cpu);
166}
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index bfcffdf6c577..421f2664ffa0 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -5,7 +5,7 @@
5 5
6static int ptdump_show(struct seq_file *m, void *v) 6static int ptdump_show(struct seq_file *m, void *v)
7{ 7{
8 ptdump_walk_pgd_level(m, NULL); 8 ptdump_walk_pgd_level_debugfs(m, NULL, false);
9 return 0; 9 return 0;
10} 10}
11 11
@@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = {
22 .release = single_release, 22 .release = single_release,
23}; 23};
24 24
25static struct dentry *pe; 25static int ptdump_show_curknl(struct seq_file *m, void *v)
26{
27 if (current->mm->pgd) {
28 down_read(&current->mm->mmap_sem);
29 ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
30 up_read(&current->mm->mmap_sem);
31 }
32 return 0;
33}
34
35static int ptdump_open_curknl(struct inode *inode, struct file *filp)
36{
37 return single_open(filp, ptdump_show_curknl, NULL);
38}
39
40static const struct file_operations ptdump_curknl_fops = {
41 .owner = THIS_MODULE,
42 .open = ptdump_open_curknl,
43 .read = seq_read,
44 .llseek = seq_lseek,
45 .release = single_release,
46};
47
48#ifdef CONFIG_PAGE_TABLE_ISOLATION
49static struct dentry *pe_curusr;
50
51static int ptdump_show_curusr(struct seq_file *m, void *v)
52{
53 if (current->mm->pgd) {
54 down_read(&current->mm->mmap_sem);
55 ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
56 up_read(&current->mm->mmap_sem);
57 }
58 return 0;
59}
60
61static int ptdump_open_curusr(struct inode *inode, struct file *filp)
62{
63 return single_open(filp, ptdump_show_curusr, NULL);
64}
65
66static const struct file_operations ptdump_curusr_fops = {
67 .owner = THIS_MODULE,
68 .open = ptdump_open_curusr,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73#endif
74
75static struct dentry *dir, *pe_knl, *pe_curknl;
26 76
27static int __init pt_dump_debug_init(void) 77static int __init pt_dump_debug_init(void)
28{ 78{
29 pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL, 79 dir = debugfs_create_dir("page_tables", NULL);
30 &ptdump_fops); 80 if (!dir)
31 if (!pe)
32 return -ENOMEM; 81 return -ENOMEM;
33 82
83 pe_knl = debugfs_create_file("kernel", 0400, dir, NULL,
84 &ptdump_fops);
85 if (!pe_knl)
86 goto err;
87
88 pe_curknl = debugfs_create_file("current_kernel", 0400,
89 dir, NULL, &ptdump_curknl_fops);
90 if (!pe_curknl)
91 goto err;
92
93#ifdef CONFIG_PAGE_TABLE_ISOLATION
94 pe_curusr = debugfs_create_file("current_user", 0400,
95 dir, NULL, &ptdump_curusr_fops);
96 if (!pe_curusr)
97 goto err;
98#endif
34 return 0; 99 return 0;
100err:
101 debugfs_remove_recursive(dir);
102 return -ENOMEM;
35} 103}
36 104
37static void __exit pt_dump_debug_exit(void) 105static void __exit pt_dump_debug_exit(void)
38{ 106{
39 debugfs_remove_recursive(pe); 107 debugfs_remove_recursive(dir);
40} 108}
41 109
42module_init(pt_dump_debug_init); 110module_init(pt_dump_debug_init);
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 5e3ac6fe6c9e..2a4849e92831 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -44,68 +44,97 @@ struct addr_marker {
44 unsigned long max_lines; 44 unsigned long max_lines;
45}; 45};
46 46
47/* indices for address_markers; keep sync'd w/ address_markers below */ 47/* Address space markers hints */
48
49#ifdef CONFIG_X86_64
50
48enum address_markers_idx { 51enum address_markers_idx {
49 USER_SPACE_NR = 0, 52 USER_SPACE_NR = 0,
50#ifdef CONFIG_X86_64
51 KERNEL_SPACE_NR, 53 KERNEL_SPACE_NR,
52 LOW_KERNEL_NR, 54 LOW_KERNEL_NR,
55#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
56 LDT_NR,
57#endif
53 VMALLOC_START_NR, 58 VMALLOC_START_NR,
54 VMEMMAP_START_NR, 59 VMEMMAP_START_NR,
55#ifdef CONFIG_KASAN 60#ifdef CONFIG_KASAN
56 KASAN_SHADOW_START_NR, 61 KASAN_SHADOW_START_NR,
57 KASAN_SHADOW_END_NR, 62 KASAN_SHADOW_END_NR,
58#endif 63#endif
59# ifdef CONFIG_X86_ESPFIX64 64 CPU_ENTRY_AREA_NR,
65#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
66 LDT_NR,
67#endif
68#ifdef CONFIG_X86_ESPFIX64
60 ESPFIX_START_NR, 69 ESPFIX_START_NR,
61# endif 70#endif
71#ifdef CONFIG_EFI
72 EFI_END_NR,
73#endif
62 HIGH_KERNEL_NR, 74 HIGH_KERNEL_NR,
63 MODULES_VADDR_NR, 75 MODULES_VADDR_NR,
64 MODULES_END_NR, 76 MODULES_END_NR,
65#else 77 FIXADDR_START_NR,
78 END_OF_SPACE_NR,
79};
80
81static struct addr_marker address_markers[] = {
82 [USER_SPACE_NR] = { 0, "User Space" },
83 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
84 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
85 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
86 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
87#ifdef CONFIG_KASAN
88 [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
89 [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" },
90#endif
91#ifdef CONFIG_MODIFY_LDT_SYSCALL
92 [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" },
93#endif
94 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
95#ifdef CONFIG_X86_ESPFIX64
96 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
97#endif
98#ifdef CONFIG_EFI
99 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
100#endif
101 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
102 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
103 [MODULES_END_NR] = { MODULES_END, "End Modules" },
104 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
105 [END_OF_SPACE_NR] = { -1, NULL }
106};
107
108#else /* CONFIG_X86_64 */
109
110enum address_markers_idx {
111 USER_SPACE_NR = 0,
66 KERNEL_SPACE_NR, 112 KERNEL_SPACE_NR,
67 VMALLOC_START_NR, 113 VMALLOC_START_NR,
68 VMALLOC_END_NR, 114 VMALLOC_END_NR,
69# ifdef CONFIG_HIGHMEM 115#ifdef CONFIG_HIGHMEM
70 PKMAP_BASE_NR, 116 PKMAP_BASE_NR,
71# endif
72 FIXADDR_START_NR,
73#endif 117#endif
118 CPU_ENTRY_AREA_NR,
119 FIXADDR_START_NR,
120 END_OF_SPACE_NR,
74}; 121};
75 122
76/* Address space markers hints */
77static struct addr_marker address_markers[] = { 123static struct addr_marker address_markers[] = {
78 { 0, "User Space" }, 124 [USER_SPACE_NR] = { 0, "User Space" },
79#ifdef CONFIG_X86_64 125 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
80 { 0x8000000000000000UL, "Kernel Space" }, 126 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
81 { 0/* PAGE_OFFSET */, "Low Kernel Mapping" }, 127 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
82 { 0/* VMALLOC_START */, "vmalloc() Area" }, 128#ifdef CONFIG_HIGHMEM
83 { 0/* VMEMMAP_START */, "Vmemmap" }, 129 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
84#ifdef CONFIG_KASAN
85 { KASAN_SHADOW_START, "KASAN shadow" },
86 { KASAN_SHADOW_END, "KASAN shadow end" },
87#endif 130#endif
88# ifdef CONFIG_X86_ESPFIX64 131 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
89 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 132 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
90# endif 133 [END_OF_SPACE_NR] = { -1, NULL }
91# ifdef CONFIG_EFI
92 { EFI_VA_END, "EFI Runtime Services" },
93# endif
94 { __START_KERNEL_map, "High Kernel Mapping" },
95 { MODULES_VADDR, "Modules" },
96 { MODULES_END, "End Modules" },
97#else
98 { PAGE_OFFSET, "Kernel Mapping" },
99 { 0/* VMALLOC_START */, "vmalloc() Area" },
100 { 0/*VMALLOC_END*/, "vmalloc() End" },
101# ifdef CONFIG_HIGHMEM
102 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
103# endif
104 { 0/*FIXADDR_START*/, "Fixmap Area" },
105#endif
106 { -1, NULL } /* End of list */
107}; 134};
108 135
136#endif /* !CONFIG_X86_64 */
137
109/* Multipliers for offsets within the PTEs */ 138/* Multipliers for offsets within the PTEs */
110#define PTE_LEVEL_MULT (PAGE_SIZE) 139#define PTE_LEVEL_MULT (PAGE_SIZE)
111#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) 140#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
@@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
140 static const char * const level_name[] = 169 static const char * const level_name[] =
141 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; 170 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
142 171
143 if (!pgprot_val(prot)) { 172 if (!(pr & _PAGE_PRESENT)) {
144 /* Not present */ 173 /* Not present */
145 pt_dump_cont_printf(m, dmsg, " "); 174 pt_dump_cont_printf(m, dmsg, " ");
146 } else { 175 } else {
@@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx)
447} 476}
448 477
449static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 478static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
450 bool checkwx) 479 bool checkwx, bool dmesg)
451{ 480{
452#ifdef CONFIG_X86_64 481#ifdef CONFIG_X86_64
453 pgd_t *start = (pgd_t *) &init_top_pgt; 482 pgd_t *start = (pgd_t *) &init_top_pgt;
@@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
460 489
461 if (pgd) { 490 if (pgd) {
462 start = pgd; 491 start = pgd;
463 st.to_dmesg = true; 492 st.to_dmesg = dmesg;
464 } 493 }
465 494
466 st.check_wx = checkwx; 495 st.check_wx = checkwx;
@@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
498 527
499void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) 528void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
500{ 529{
501 ptdump_walk_pgd_level_core(m, pgd, false); 530 ptdump_walk_pgd_level_core(m, pgd, false, true);
531}
532
533void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
534{
535#ifdef CONFIG_PAGE_TABLE_ISOLATION
536 if (user && static_cpu_has(X86_FEATURE_PTI))
537 pgd = kernel_to_user_pgdp(pgd);
538#endif
539 ptdump_walk_pgd_level_core(m, pgd, false, false);
540}
541EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
542
543static void ptdump_walk_user_pgd_level_checkwx(void)
544{
545#ifdef CONFIG_PAGE_TABLE_ISOLATION
546 pgd_t *pgd = (pgd_t *) &init_top_pgt;
547
548 if (!static_cpu_has(X86_FEATURE_PTI))
549 return;
550
551 pr_info("x86/mm: Checking user space page tables\n");
552 pgd = kernel_to_user_pgdp(pgd);
553 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
554#endif
502} 555}
503EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
504 556
505void ptdump_walk_pgd_level_checkwx(void) 557void ptdump_walk_pgd_level_checkwx(void)
506{ 558{
507 ptdump_walk_pgd_level_core(NULL, NULL, true); 559 ptdump_walk_pgd_level_core(NULL, NULL, true, false);
560 ptdump_walk_user_pgd_level_checkwx();
508} 561}
509 562
510static int __init pt_dump_init(void) 563static int __init pt_dump_init(void)
@@ -525,8 +578,8 @@ static int __init pt_dump_init(void)
525 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 578 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
526# endif 579# endif
527 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 580 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
581 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
528#endif 582#endif
529
530 return 0; 583 return 0;
531} 584}
532__initcall(pt_dump_init); 585__initcall(pt_dump_init);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 3321b446b66c..9fe656c42aa5 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,6 +1,7 @@
1#include <linux/extable.h> 1#include <linux/extable.h>
2#include <linux/uaccess.h> 2#include <linux/uaccess.h>
3#include <linux/sched/debug.h> 3#include <linux/sched/debug.h>
4#include <xen/xen.h>
4 5
5#include <asm/fpu/internal.h> 6#include <asm/fpu/internal.h>
6#include <asm/traps.h> 7#include <asm/traps.h>
@@ -82,7 +83,7 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
82 83
83 return true; 84 return true;
84} 85}
85EXPORT_SYMBOL_GPL(ex_handler_refcount); 86EXPORT_SYMBOL(ex_handler_refcount);
86 87
87/* 88/*
88 * Handler for when we fail to restore a task's FPU state. We should never get 89 * Handler for when we fail to restore a task's FPU state. We should never get
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
212 * Old CPUs leave the high bits of CS on the stack 213 * Old CPUs leave the high bits of CS on the stack
213 * undefined. I'm not sure which CPUs do this, but at least 214 * undefined. I'm not sure which CPUs do this, but at least
214 * the 486 DX works this way. 215 * the 486 DX works this way.
216 * Xen pv domains are not using the default __KERNEL_CS.
215 */ 217 */
216 if (regs->cs != __KERNEL_CS) 218 if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
217 goto fail; 219 goto fail;
218 220
219 /* 221 /*
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 78ca9a8ee454..800de815519c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -172,14 +172,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
172 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 172 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
173 * faulted on a pte with its pkey=4. 173 * faulted on a pte with its pkey=4.
174 */ 174 */
175static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) 175static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
176 u32 *pkey)
176{ 177{
177 /* This is effectively an #ifdef */ 178 /* This is effectively an #ifdef */
178 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 179 if (!boot_cpu_has(X86_FEATURE_OSPKE))
179 return; 180 return;
180 181
181 /* Fault not from Protection Keys: nothing to do */ 182 /* Fault not from Protection Keys: nothing to do */
182 if (si_code != SEGV_PKUERR) 183 if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
183 return; 184 return;
184 /* 185 /*
185 * force_sig_info_fault() is called from a number of 186 * force_sig_info_fault() is called from a number of
@@ -218,7 +219,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
218 lsb = PAGE_SHIFT; 219 lsb = PAGE_SHIFT;
219 info.si_addr_lsb = lsb; 220 info.si_addr_lsb = lsb;
220 221
221 fill_sig_info_pkey(si_code, &info, pkey); 222 fill_sig_info_pkey(si_signo, si_code, &info, pkey);
222 223
223 force_sig_info(si_signo, &info, tsk); 224 force_sig_info(si_signo, &info, tsk);
224} 225}
@@ -438,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
438 if (pgd_none(*pgd_ref)) 439 if (pgd_none(*pgd_ref))
439 return -1; 440 return -1;
440 441
441 if (pgd_none(*pgd)) { 442 if (CONFIG_PGTABLE_LEVELS > 4) {
442 set_pgd(pgd, *pgd_ref); 443 if (pgd_none(*pgd)) {
443 arch_flush_lazy_mmu_mode(); 444 set_pgd(pgd, *pgd_ref);
444 } else if (CONFIG_PGTABLE_LEVELS > 4) { 445 arch_flush_lazy_mmu_mode();
445 /* 446 } else {
446 * With folded p4d, pgd_none() is always false, so the pgd may 447 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
447 * point to an empty page table entry and pgd_page_vaddr() 448 }
448 * will return garbage.
449 *
450 * We will do the correct sanity check on the p4d level.
451 */
452 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
453 } 449 }
454 450
455 /* With 4-level paging, copying happens on the p4d level. */ 451 /* With 4-level paging, copying happens on the p4d level. */
@@ -458,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
458 if (p4d_none(*p4d_ref)) 454 if (p4d_none(*p4d_ref))
459 return -1; 455 return -1;
460 456
461 if (p4d_none(*p4d)) { 457 if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
462 set_p4d(p4d, *p4d_ref); 458 set_p4d(p4d, *p4d_ref);
463 arch_flush_lazy_mmu_mode(); 459 arch_flush_lazy_mmu_mode();
464 } else { 460 } else {
@@ -469,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
469 * Below here mismatches are bugs because these lower tables 465 * Below here mismatches are bugs because these lower tables
470 * are shared: 466 * are shared:
471 */ 467 */
468 BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
472 469
473 pud = pud_offset(p4d, address); 470 pud = pud_offset(p4d, address);
474 pud_ref = pud_offset(p4d_ref, address); 471 pud_ref = pud_offset(p4d_ref, address);
@@ -701,7 +698,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
701 else 698 else
702 printk(KERN_CONT "paging request"); 699 printk(KERN_CONT "paging request");
703 700
704 printk(KERN_CONT " at %p\n", (void *) address); 701 printk(KERN_CONT " at %px\n", (void *) address);
705 printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip); 702 printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
706 703
707 dump_pagetable(address); 704 dump_pagetable(address);
@@ -860,7 +857,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
860 if (!printk_ratelimit()) 857 if (!printk_ratelimit())
861 return; 858 return;
862 859
863 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", 860 printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
864 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 861 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
865 tsk->comm, task_pid_nr(tsk), address, 862 tsk->comm, task_pid_nr(tsk), address,
866 (void *)regs->ip, (void *)regs->sp, error_code); 863 (void *)regs->ip, (void *)regs->sp, error_code);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6fdf91ef130a..82f5252c723a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -20,6 +20,7 @@
20#include <asm/kaslr.h> 20#include <asm/kaslr.h>
21#include <asm/hypervisor.h> 21#include <asm/hypervisor.h>
22#include <asm/cpufeature.h> 22#include <asm/cpufeature.h>
23#include <asm/pti.h>
23 24
24/* 25/*
25 * We need to define the tracepoints somewhere, and tlb.c 26 * We need to define the tracepoints somewhere, and tlb.c
@@ -160,6 +161,12 @@ struct map_range {
160 161
161static int page_size_mask; 162static int page_size_mask;
162 163
164static void enable_global_pages(void)
165{
166 if (!static_cpu_has(X86_FEATURE_PTI))
167 __supported_pte_mask |= _PAGE_GLOBAL;
168}
169
163static void __init probe_page_size_mask(void) 170static void __init probe_page_size_mask(void)
164{ 171{
165 /* 172 /*
@@ -177,11 +184,11 @@ static void __init probe_page_size_mask(void)
177 cr4_set_bits_and_update_boot(X86_CR4_PSE); 184 cr4_set_bits_and_update_boot(X86_CR4_PSE);
178 185
179 /* Enable PGE if available */ 186 /* Enable PGE if available */
187 __supported_pte_mask &= ~_PAGE_GLOBAL;
180 if (boot_cpu_has(X86_FEATURE_PGE)) { 188 if (boot_cpu_has(X86_FEATURE_PGE)) {
181 cr4_set_bits_and_update_boot(X86_CR4_PGE); 189 cr4_set_bits_and_update_boot(X86_CR4_PGE);
182 __supported_pte_mask |= _PAGE_GLOBAL; 190 enable_global_pages();
183 } else 191 }
184 __supported_pte_mask &= ~_PAGE_GLOBAL;
185 192
186 /* Enable 1 GB linear kernel mappings if available: */ 193 /* Enable 1 GB linear kernel mappings if available: */
187 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { 194 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
@@ -194,34 +201,44 @@ static void __init probe_page_size_mask(void)
194 201
195static void setup_pcid(void) 202static void setup_pcid(void)
196{ 203{
197#ifdef CONFIG_X86_64 204 if (!IS_ENABLED(CONFIG_X86_64))
198 if (boot_cpu_has(X86_FEATURE_PCID)) { 205 return;
199 if (boot_cpu_has(X86_FEATURE_PGE)) { 206
200 /* 207 if (!boot_cpu_has(X86_FEATURE_PCID))
201 * This can't be cr4_set_bits_and_update_boot() -- 208 return;
202 * the trampoline code can't handle CR4.PCIDE and 209
203 * it wouldn't do any good anyway. Despite the name, 210 if (boot_cpu_has(X86_FEATURE_PGE)) {
204 * cr4_set_bits_and_update_boot() doesn't actually 211 /*
205 * cause the bits in question to remain set all the 212 * This can't be cr4_set_bits_and_update_boot() -- the
206 * way through the secondary boot asm. 213 * trampoline code can't handle CR4.PCIDE and it wouldn't
207 * 214 * do any good anyway. Despite the name,
208 * Instead, we brute-force it and set CR4.PCIDE 215 * cr4_set_bits_and_update_boot() doesn't actually cause
209 * manually in start_secondary(). 216 * the bits in question to remain set all the way through
210 */ 217 * the secondary boot asm.
211 cr4_set_bits(X86_CR4_PCIDE); 218 *
212 } else { 219 * Instead, we brute-force it and set CR4.PCIDE manually in
213 /* 220 * start_secondary().
214 * flush_tlb_all(), as currently implemented, won't 221 */
215 * work if PCID is on but PGE is not. Since that 222 cr4_set_bits(X86_CR4_PCIDE);
216 * combination doesn't exist on real hardware, there's 223
217 * no reason to try to fully support it, but it's 224 /*
218 * polite to avoid corrupting data if we're on 225 * INVPCID's single-context modes (2/3) only work if we set
219 * an improperly configured VM. 226 * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
220 */ 227 * on systems that have X86_CR4_PCIDE clear, or that have
221 setup_clear_cpu_cap(X86_FEATURE_PCID); 228 * no INVPCID support at all.
222 } 229 */
230 if (boot_cpu_has(X86_FEATURE_INVPCID))
231 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
232 } else {
233 /*
234 * flush_tlb_all(), as currently implemented, won't work if
235 * PCID is on but PGE is not. Since that combination
236 * doesn't exist on real hardware, there's no reason to try
237 * to fully support it, but it's polite to avoid corrupting
238 * data if we're on an improperly configured VM.
239 */
240 setup_clear_cpu_cap(X86_FEATURE_PCID);
223 } 241 }
224#endif
225} 242}
226 243
227#ifdef CONFIG_X86_32 244#ifdef CONFIG_X86_32
@@ -622,6 +639,7 @@ void __init init_mem_mapping(void)
622{ 639{
623 unsigned long end; 640 unsigned long end;
624 641
642 pti_check_boottime_disable();
625 probe_page_size_mask(); 643 probe_page_size_mask();
626 setup_pcid(); 644 setup_pcid();
627 645
@@ -845,12 +863,12 @@ void __init zone_sizes_init(void)
845 free_area_init_nodes(max_zone_pfns); 863 free_area_init_nodes(max_zone_pfns);
846} 864}
847 865
848DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { 866__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
849 .loaded_mm = &init_mm, 867 .loaded_mm = &init_mm,
850 .next_asid = 1, 868 .next_asid = 1,
851 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ 869 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
852}; 870};
853EXPORT_SYMBOL_GPL(cpu_tlbstate); 871EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
854 872
855void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) 873void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
856{ 874{
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8a64a6f2848d..135c9a7898c7 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -50,6 +50,7 @@
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/set_memory.h> 51#include <asm/set_memory.h>
52#include <asm/page_types.h> 52#include <asm/page_types.h>
53#include <asm/cpu_entry_area.h>
53#include <asm/init.h> 54#include <asm/init.h>
54 55
55#include "mm_internal.h" 56#include "mm_internal.h"
@@ -766,6 +767,7 @@ void __init mem_init(void)
766 mem_init_print_info(NULL); 767 mem_init_print_info(NULL);
767 printk(KERN_INFO "virtual kernel memory layout:\n" 768 printk(KERN_INFO "virtual kernel memory layout:\n"
768 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 769 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
770 " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
769#ifdef CONFIG_HIGHMEM 771#ifdef CONFIG_HIGHMEM
770 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 772 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
771#endif 773#endif
@@ -777,6 +779,10 @@ void __init mem_init(void)
777 FIXADDR_START, FIXADDR_TOP, 779 FIXADDR_START, FIXADDR_TOP,
778 (FIXADDR_TOP - FIXADDR_START) >> 10, 780 (FIXADDR_TOP - FIXADDR_START) >> 10,
779 781
782 CPU_ENTRY_AREA_BASE,
783 CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
784 CPU_ENTRY_AREA_MAP_SIZE >> 10,
785
780#ifdef CONFIG_HIGHMEM 786#ifdef CONFIG_HIGHMEM
781 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 787 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
782 (LAST_PKMAP*PAGE_SIZE) >> 10, 788 (LAST_PKMAP*PAGE_SIZE) >> 10,
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 6e4573b1da34..c45b6ec5357b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr)
404 return; 404 return;
405 } 405 }
406 406
407 mmiotrace_iounmap(addr);
408
407 addr = (volatile void __iomem *) 409 addr = (volatile void __iomem *)
408 (PAGE_MASK & (unsigned long __force)addr); 410 (PAGE_MASK & (unsigned long __force)addr);
409 411
410 mmiotrace_iounmap(addr);
411
412 /* Use the vm area unlocked, assuming the caller 412 /* Use the vm area unlocked, assuming the caller
413 ensures there isn't another iounmap for the same address 413 ensures there isn't another iounmap for the same address
414 in parallel. Reuse of the virtual address is prevented by 414 in parallel. Reuse of the virtual address is prevented by
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 99dfed6dfef8..af6f2f9c6a26 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -15,15 +15,20 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/cpu_entry_area.h>
18 19
19extern struct range pfn_mapped[E820_MAX_ENTRIES]; 20extern struct range pfn_mapped[E820_MAX_ENTRIES];
20 21
21static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 22static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
22 23
23static __init void *early_alloc(size_t size, int nid) 24static __init void *early_alloc(size_t size, int nid, bool panic)
24{ 25{
25 return memblock_virt_alloc_try_nid_nopanic(size, size, 26 if (panic)
26 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 27 return memblock_virt_alloc_try_nid(size, size,
28 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
29 else
30 return memblock_virt_alloc_try_nid_nopanic(size, size,
31 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
27} 32}
28 33
29static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, 34static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
@@ -37,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
37 if (boot_cpu_has(X86_FEATURE_PSE) && 42 if (boot_cpu_has(X86_FEATURE_PSE) &&
38 ((end - addr) == PMD_SIZE) && 43 ((end - addr) == PMD_SIZE) &&
39 IS_ALIGNED(addr, PMD_SIZE)) { 44 IS_ALIGNED(addr, PMD_SIZE)) {
40 p = early_alloc(PMD_SIZE, nid); 45 p = early_alloc(PMD_SIZE, nid, false);
41 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) 46 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
42 return; 47 return;
43 else if (p) 48 else if (p)
44 memblock_free(__pa(p), PMD_SIZE); 49 memblock_free(__pa(p), PMD_SIZE);
45 } 50 }
46 51
47 p = early_alloc(PAGE_SIZE, nid); 52 p = early_alloc(PAGE_SIZE, nid, true);
48 pmd_populate_kernel(&init_mm, pmd, p); 53 pmd_populate_kernel(&init_mm, pmd, p);
49 } 54 }
50 55
@@ -56,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
56 if (!pte_none(*pte)) 61 if (!pte_none(*pte))
57 continue; 62 continue;
58 63
59 p = early_alloc(PAGE_SIZE, nid); 64 p = early_alloc(PAGE_SIZE, nid, true);
60 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); 65 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
61 set_pte_at(&init_mm, addr, pte, entry); 66 set_pte_at(&init_mm, addr, pte, entry);
62 } while (pte++, addr += PAGE_SIZE, addr != end); 67 } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -74,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
74 if (boot_cpu_has(X86_FEATURE_GBPAGES) && 79 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
75 ((end - addr) == PUD_SIZE) && 80 ((end - addr) == PUD_SIZE) &&
76 IS_ALIGNED(addr, PUD_SIZE)) { 81 IS_ALIGNED(addr, PUD_SIZE)) {
77 p = early_alloc(PUD_SIZE, nid); 82 p = early_alloc(PUD_SIZE, nid, false);
78 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) 83 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
79 return; 84 return;
80 else if (p) 85 else if (p)
81 memblock_free(__pa(p), PUD_SIZE); 86 memblock_free(__pa(p), PUD_SIZE);
82 } 87 }
83 88
84 p = early_alloc(PAGE_SIZE, nid); 89 p = early_alloc(PAGE_SIZE, nid, true);
85 pud_populate(&init_mm, pud, p); 90 pud_populate(&init_mm, pud, p);
86 } 91 }
87 92
@@ -100,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
100 unsigned long next; 105 unsigned long next;
101 106
102 if (p4d_none(*p4d)) { 107 if (p4d_none(*p4d)) {
103 void *p = early_alloc(PAGE_SIZE, nid); 108 void *p = early_alloc(PAGE_SIZE, nid, true);
104 109
105 p4d_populate(&init_mm, p4d, p); 110 p4d_populate(&init_mm, p4d, p);
106 } 111 }
@@ -121,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
121 unsigned long next; 126 unsigned long next;
122 127
123 if (pgd_none(*pgd)) { 128 if (pgd_none(*pgd)) {
124 p = early_alloc(PAGE_SIZE, nid); 129 p = early_alloc(PAGE_SIZE, nid, true);
125 pgd_populate(&init_mm, pgd, p); 130 pgd_populate(&init_mm, pgd, p);
126 } 131 }
127 132
@@ -277,6 +282,7 @@ void __init kasan_early_init(void)
277void __init kasan_init(void) 282void __init kasan_init(void)
278{ 283{
279 int i; 284 int i;
285 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
280 286
281#ifdef CONFIG_KASAN_INLINE 287#ifdef CONFIG_KASAN_INLINE
282 register_die_notifier(&kasan_die_notifier); 288 register_die_notifier(&kasan_die_notifier);
@@ -321,16 +327,33 @@ void __init kasan_init(void)
321 map_range(&pfn_mapped[i]); 327 map_range(&pfn_mapped[i]);
322 } 328 }
323 329
330 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
331 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
332 shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
333 PAGE_SIZE);
334
335 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
336 CPU_ENTRY_AREA_MAP_SIZE);
337 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
338 shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
339 PAGE_SIZE);
340
324 kasan_populate_zero_shadow( 341 kasan_populate_zero_shadow(
325 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 342 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
326 kasan_mem_to_shadow((void *)__START_KERNEL_map)); 343 shadow_cpu_entry_begin);
344
345 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
346 (unsigned long)shadow_cpu_entry_end, 0);
347
348 kasan_populate_zero_shadow(shadow_cpu_entry_end,
349 kasan_mem_to_shadow((void *)__START_KERNEL_map));
327 350
328 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 351 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
329 (unsigned long)kasan_mem_to_shadow(_end), 352 (unsigned long)kasan_mem_to_shadow(_end),
330 early_pfn_to_nid(__pa(_stext))); 353 early_pfn_to_nid(__pa(_stext)));
331 354
332 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 355 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
333 (void *)KASAN_SHADOW_END); 356 (void *)KASAN_SHADOW_END);
334 357
335 load_cr3(init_top_pgt); 358 load_cr3(init_top_pgt);
336 __flush_tlb_all(); 359 __flush_tlb_all();
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 879ef930e2c2..aedebd2ebf1e 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -34,25 +34,14 @@
34#define TB_SHIFT 40 34#define TB_SHIFT 40
35 35
36/* 36/*
37 * Virtual address start and end range for randomization. The end changes base 37 * Virtual address start and end range for randomization.
38 * on configuration to have the highest amount of space for randomization.
39 * It increases the possible random position for each randomized region.
40 * 38 *
41 * You need to add an if/def entry if you introduce a new memory region 39 * The end address could depend on more configuration options to make the
42 * compatible with KASLR. Your entry must be in logical order with memory 40 * highest amount of space for randomization available, but that's too hard
43 * layout. For example, ESPFIX is before EFI because its virtual address is 41 * to keep straight and caused issues already.
44 * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
45 * ensure that this order is correct and won't be changed.
46 */ 42 */
47static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; 43static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48 44static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
49#if defined(CONFIG_X86_ESPFIX64)
50static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
51#elif defined(CONFIG_EFI)
52static const unsigned long vaddr_end = EFI_VA_END;
53#else
54static const unsigned long vaddr_end = __START_KERNEL_map;
55#endif
56 45
57/* Default values */ 46/* Default values */
58unsigned long page_offset_base = __PAGE_OFFSET_BASE; 47unsigned long page_offset_base = __PAGE_OFFSET_BASE;
@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
101 unsigned long remain_entropy; 90 unsigned long remain_entropy;
102 91
103 /* 92 /*
104 * All these BUILD_BUG_ON checks ensures the memory layout is 93 * These BUILD_BUG_ON checks ensure the memory layout is consistent
105 * consistent with the vaddr_start/vaddr_end variables. 94 * with the vaddr_start/vaddr_end variables. These checks are very
95 * limited....
106 */ 96 */
107 BUILD_BUG_ON(vaddr_start >= vaddr_end); 97 BUILD_BUG_ON(vaddr_start >= vaddr_end);
108 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 98 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
109 vaddr_end >= EFI_VA_END);
110 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
111 IS_ENABLED(CONFIG_EFI)) &&
112 vaddr_end >= __START_KERNEL_map);
113 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); 99 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
114 100
115 if (!kaslr_memory_enabled()) 101 if (!kaslr_memory_enabled())
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
deleted file mode 100644
index cec594032515..000000000000
--- a/arch/x86/mm/kmemcheck/error.c
+++ /dev/null
@@ -1 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/mm/kmemcheck/error.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
deleted file mode 100644
index cec594032515..000000000000
--- a/arch/x86/mm/kmemcheck/opcode.c
+++ /dev/null
@@ -1 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/mm/kmemcheck/opcode.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
deleted file mode 100644
index cec594032515..000000000000
--- a/arch/x86/mm/kmemcheck/pte.c
+++ /dev/null
@@ -1 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/mm/kmemcheck/pte.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
deleted file mode 100644
index cec594032515..000000000000
--- a/arch/x86/mm/kmemcheck/selftest.c
+++ /dev/null
@@ -1 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/mm/kmemcheck/selftest.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/arch/x86/mm/kmemcheck/shadow.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index c21c2ed04612..58477ec3d66d 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
435 unsigned long flags; 435 unsigned long flags;
436 int ret = 0; 436 int ret = 0;
437 unsigned long size = 0; 437 unsigned long size = 0;
438 unsigned long addr = p->addr & PAGE_MASK;
438 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 439 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
439 unsigned int l; 440 unsigned int l;
440 pte_t *pte; 441 pte_t *pte;
441 442
442 spin_lock_irqsave(&kmmio_lock, flags); 443 spin_lock_irqsave(&kmmio_lock, flags);
443 if (get_kmmio_probe(p->addr)) { 444 if (get_kmmio_probe(addr)) {
444 ret = -EEXIST; 445 ret = -EEXIST;
445 goto out; 446 goto out;
446 } 447 }
447 448
448 pte = lookup_address(p->addr, &l); 449 pte = lookup_address(addr, &l);
449 if (!pte) { 450 if (!pte) {
450 ret = -EINVAL; 451 ret = -EINVAL;
451 goto out; 452 goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
454 kmmio_count++; 455 kmmio_count++;
455 list_add_rcu(&p->list, &kmmio_probes); 456 list_add_rcu(&p->list, &kmmio_probes);
456 while (size < size_lim) { 457 while (size < size_lim) {
457 if (add_kmmio_fault_page(p->addr + size)) 458 if (add_kmmio_fault_page(addr + size))
458 pr_err("Unable to set page fault.\n"); 459 pr_err("Unable to set page fault.\n");
459 size += page_level_size(l); 460 size += page_level_size(l);
460 } 461 }
@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
528{ 529{
529 unsigned long flags; 530 unsigned long flags;
530 unsigned long size = 0; 531 unsigned long size = 0;
532 unsigned long addr = p->addr & PAGE_MASK;
531 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 533 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
532 struct kmmio_fault_page *release_list = NULL; 534 struct kmmio_fault_page *release_list = NULL;
533 struct kmmio_delayed_release *drelease; 535 struct kmmio_delayed_release *drelease;
534 unsigned int l; 536 unsigned int l;
535 pte_t *pte; 537 pte_t *pte;
536 538
537 pte = lookup_address(p->addr, &l); 539 pte = lookup_address(addr, &l);
538 if (!pte) 540 if (!pte)
539 return; 541 return;
540 542
541 spin_lock_irqsave(&kmmio_lock, flags); 543 spin_lock_irqsave(&kmmio_lock, flags);
542 while (size < size_lim) { 544 while (size < size_lim) {
543 release_kmmio_fault_page(p->addr + size, &release_list); 545 release_kmmio_fault_page(addr + size, &release_list);
544 size += page_level_size(l); 546 size += page_level_size(l);
545 } 547 }
546 list_del_rcu(&p->list); 548 list_del_rcu(&p->list);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index d9a9e9fc75dd..e1d61e8500f9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -405,13 +405,13 @@ bool sme_active(void)
405{ 405{
406 return sme_me_mask && !sev_enabled; 406 return sme_me_mask && !sev_enabled;
407} 407}
408EXPORT_SYMBOL_GPL(sme_active); 408EXPORT_SYMBOL(sme_active);
409 409
410bool sev_active(void) 410bool sev_active(void)
411{ 411{
412 return sme_me_mask && sev_enabled; 412 return sme_me_mask && sev_enabled;
413} 413}
414EXPORT_SYMBOL_GPL(sev_active); 414EXPORT_SYMBOL(sev_active);
415 415
416static const struct dma_map_ops sev_dma_ops = { 416static const struct dma_map_ops sev_dma_ops = {
417 .alloc = sev_alloc, 417 .alloc = sev_alloc,
@@ -464,37 +464,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
464 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); 464 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
465} 465}
466 466
467static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, 467struct sme_populate_pgd_data {
468 unsigned long end) 468 void *pgtable_area;
469 pgd_t *pgd;
470
471 pmdval_t pmd_flags;
472 pteval_t pte_flags;
473 unsigned long paddr;
474
475 unsigned long vaddr;
476 unsigned long vaddr_end;
477};
478
479static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
469{ 480{
470 unsigned long pgd_start, pgd_end, pgd_size; 481 unsigned long pgd_start, pgd_end, pgd_size;
471 pgd_t *pgd_p; 482 pgd_t *pgd_p;
472 483
473 pgd_start = start & PGDIR_MASK; 484 pgd_start = ppd->vaddr & PGDIR_MASK;
474 pgd_end = end & PGDIR_MASK; 485 pgd_end = ppd->vaddr_end & PGDIR_MASK;
475 486
476 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); 487 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
477 pgd_size *= sizeof(pgd_t);
478 488
479 pgd_p = pgd_base + pgd_index(start); 489 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
480 490
481 memset(pgd_p, 0, pgd_size); 491 memset(pgd_p, 0, pgd_size);
482} 492}
483 493
484#define PGD_FLAGS _KERNPG_TABLE_NOENC 494#define PGD_FLAGS _KERNPG_TABLE_NOENC
485#define P4D_FLAGS _KERNPG_TABLE_NOENC 495#define P4D_FLAGS _KERNPG_TABLE_NOENC
486#define PUD_FLAGS _KERNPG_TABLE_NOENC 496#define PUD_FLAGS _KERNPG_TABLE_NOENC
487#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) 497#define PMD_FLAGS _KERNPG_TABLE_NOENC
498
499#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
500
501#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
502#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
503 (_PAGE_PAT | _PAGE_PWT))
504
505#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
506
507#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
508
509#define PTE_FLAGS_DEC PTE_FLAGS
510#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
511 (_PAGE_PAT | _PAGE_PWT))
512
513#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
488 514
489static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, 515static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
490 unsigned long vaddr, pmdval_t pmd_val)
491{ 516{
492 pgd_t *pgd_p; 517 pgd_t *pgd_p;
493 p4d_t *p4d_p; 518 p4d_t *p4d_p;
494 pud_t *pud_p; 519 pud_t *pud_p;
495 pmd_t *pmd_p; 520 pmd_t *pmd_p;
496 521
497 pgd_p = pgd_base + pgd_index(vaddr); 522 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
498 if (native_pgd_val(*pgd_p)) { 523 if (native_pgd_val(*pgd_p)) {
499 if (IS_ENABLED(CONFIG_X86_5LEVEL)) 524 if (IS_ENABLED(CONFIG_X86_5LEVEL))
500 p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); 525 p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@@ -504,15 +529,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
504 pgd_t pgd; 529 pgd_t pgd;
505 530
506 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 531 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
507 p4d_p = pgtable_area; 532 p4d_p = ppd->pgtable_area;
508 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); 533 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
509 pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; 534 ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
510 535
511 pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); 536 pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
512 } else { 537 } else {
513 pud_p = pgtable_area; 538 pud_p = ppd->pgtable_area;
514 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 539 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
515 pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; 540 ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
516 541
517 pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); 542 pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
518 } 543 }
@@ -520,58 +545,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
520 } 545 }
521 546
522 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 547 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
523 p4d_p += p4d_index(vaddr); 548 p4d_p += p4d_index(ppd->vaddr);
524 if (native_p4d_val(*p4d_p)) { 549 if (native_p4d_val(*p4d_p)) {
525 pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); 550 pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
526 } else { 551 } else {
527 p4d_t p4d; 552 p4d_t p4d;
528 553
529 pud_p = pgtable_area; 554 pud_p = ppd->pgtable_area;
530 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 555 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
531 pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; 556 ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
532 557
533 p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); 558 p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
534 native_set_p4d(p4d_p, p4d); 559 native_set_p4d(p4d_p, p4d);
535 } 560 }
536 } 561 }
537 562
538 pud_p += pud_index(vaddr); 563 pud_p += pud_index(ppd->vaddr);
539 if (native_pud_val(*pud_p)) { 564 if (native_pud_val(*pud_p)) {
540 if (native_pud_val(*pud_p) & _PAGE_PSE) 565 if (native_pud_val(*pud_p) & _PAGE_PSE)
541 goto out; 566 return NULL;
542 567
543 pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); 568 pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
544 } else { 569 } else {
545 pud_t pud; 570 pud_t pud;
546 571
547 pmd_p = pgtable_area; 572 pmd_p = ppd->pgtable_area;
548 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); 573 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
549 pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; 574 ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
550 575
551 pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); 576 pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
552 native_set_pud(pud_p, pud); 577 native_set_pud(pud_p, pud);
553 } 578 }
554 579
555 pmd_p += pmd_index(vaddr); 580 return pmd_p;
581}
582
583static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
584{
585 pmd_t *pmd_p;
586
587 pmd_p = sme_prepare_pgd(ppd);
588 if (!pmd_p)
589 return;
590
591 pmd_p += pmd_index(ppd->vaddr);
556 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) 592 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
557 native_set_pmd(pmd_p, native_make_pmd(pmd_val)); 593 native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
594}
558 595
559out: 596static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
560 return pgtable_area; 597{
598 pmd_t *pmd_p;
599 pte_t *pte_p;
600
601 pmd_p = sme_prepare_pgd(ppd);
602 if (!pmd_p)
603 return;
604
605 pmd_p += pmd_index(ppd->vaddr);
606 if (native_pmd_val(*pmd_p)) {
607 if (native_pmd_val(*pmd_p) & _PAGE_PSE)
608 return;
609
610 pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
611 } else {
612 pmd_t pmd;
613
614 pte_p = ppd->pgtable_area;
615 memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
616 ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
617
618 pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
619 native_set_pmd(pmd_p, pmd);
620 }
621
622 pte_p += pte_index(ppd->vaddr);
623 if (!native_pte_val(*pte_p))
624 native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
625}
626
627static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
628{
629 while (ppd->vaddr < ppd->vaddr_end) {
630 sme_populate_pgd_large(ppd);
631
632 ppd->vaddr += PMD_PAGE_SIZE;
633 ppd->paddr += PMD_PAGE_SIZE;
634 }
635}
636
637static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
638{
639 while (ppd->vaddr < ppd->vaddr_end) {
640 sme_populate_pgd(ppd);
641
642 ppd->vaddr += PAGE_SIZE;
643 ppd->paddr += PAGE_SIZE;
644 }
645}
646
647static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
648 pmdval_t pmd_flags, pteval_t pte_flags)
649{
650 unsigned long vaddr_end;
651
652 ppd->pmd_flags = pmd_flags;
653 ppd->pte_flags = pte_flags;
654
655 /* Save original end value since we modify the struct value */
656 vaddr_end = ppd->vaddr_end;
657
658 /* If start is not 2MB aligned, create PTE entries */
659 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
660 __sme_map_range_pte(ppd);
661
662 /* Create PMD entries */
663 ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
664 __sme_map_range_pmd(ppd);
665
666 /* If end is not 2MB aligned, create PTE entries */
667 ppd->vaddr_end = vaddr_end;
668 __sme_map_range_pte(ppd);
669}
670
671static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
672{
673 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
674}
675
676static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
677{
678 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
679}
680
681static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
682{
683 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
561} 684}
562 685
563static unsigned long __init sme_pgtable_calc(unsigned long len) 686static unsigned long __init sme_pgtable_calc(unsigned long len)
564{ 687{
565 unsigned long p4d_size, pud_size, pmd_size; 688 unsigned long p4d_size, pud_size, pmd_size, pte_size;
566 unsigned long total; 689 unsigned long total;
567 690
568 /* 691 /*
569 * Perform a relatively simplistic calculation of the pagetable 692 * Perform a relatively simplistic calculation of the pagetable
570 * entries that are needed. That mappings will be covered by 2MB 693 * entries that are needed. Those mappings will be covered mostly
571 * PMD entries so we can conservatively calculate the required 694 * by 2MB PMD entries so we can conservatively calculate the required
572 * number of P4D, PUD and PMD structures needed to perform the 695 * number of P4D, PUD and PMD structures needed to perform the
573 * mappings. Incrementing the count for each covers the case where 696 * mappings. For mappings that are not 2MB aligned, PTE mappings
574 * the addresses cross entries. 697 * would be needed for the start and end portion of the address range
698 * that fall outside of the 2MB alignment. This results in, at most,
699 * two extra pages to hold PTE entries for each range that is mapped.
700 * Incrementing the count for each covers the case where the addresses
701 * cross entries.
575 */ 702 */
576 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 703 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
577 p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; 704 p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -585,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
585 } 712 }
586 pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; 713 pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
587 pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; 714 pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
715 pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
588 716
589 total = p4d_size + pud_size + pmd_size; 717 total = p4d_size + pud_size + pmd_size + pte_size;
590 718
591 /* 719 /*
592 * Now calculate the added pagetable structures needed to populate 720 * Now calculate the added pagetable structures needed to populate
@@ -610,29 +738,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
610 return total; 738 return total;
611} 739}
612 740
613void __init sme_encrypt_kernel(void) 741void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
614{ 742{
615 unsigned long workarea_start, workarea_end, workarea_len; 743 unsigned long workarea_start, workarea_end, workarea_len;
616 unsigned long execute_start, execute_end, execute_len; 744 unsigned long execute_start, execute_end, execute_len;
617 unsigned long kernel_start, kernel_end, kernel_len; 745 unsigned long kernel_start, kernel_end, kernel_len;
746 unsigned long initrd_start, initrd_end, initrd_len;
747 struct sme_populate_pgd_data ppd;
618 unsigned long pgtable_area_len; 748 unsigned long pgtable_area_len;
619 unsigned long paddr, pmd_flags;
620 unsigned long decrypted_base; 749 unsigned long decrypted_base;
621 void *pgtable_area;
622 pgd_t *pgd;
623 750
624 if (!sme_active()) 751 if (!sme_active())
625 return; 752 return;
626 753
627 /* 754 /*
628 * Prepare for encrypting the kernel by building new pagetables with 755 * Prepare for encrypting the kernel and initrd by building new
629 * the necessary attributes needed to encrypt the kernel in place. 756 * pagetables with the necessary attributes needed to encrypt the
757 * kernel in place.
630 * 758 *
631 * One range of virtual addresses will map the memory occupied 759 * One range of virtual addresses will map the memory occupied
632 * by the kernel as encrypted. 760 * by the kernel and initrd as encrypted.
633 * 761 *
634 * Another range of virtual addresses will map the memory occupied 762 * Another range of virtual addresses will map the memory occupied
635 * by the kernel as decrypted and write-protected. 763 * by the kernel and initrd as decrypted and write-protected.
636 * 764 *
637 * The use of write-protect attribute will prevent any of the 765 * The use of write-protect attribute will prevent any of the
638 * memory from being cached. 766 * memory from being cached.
@@ -643,6 +771,20 @@ void __init sme_encrypt_kernel(void)
643 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); 771 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
644 kernel_len = kernel_end - kernel_start; 772 kernel_len = kernel_end - kernel_start;
645 773
774 initrd_start = 0;
775 initrd_end = 0;
776 initrd_len = 0;
777#ifdef CONFIG_BLK_DEV_INITRD
778 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
779 ((unsigned long)bp->ext_ramdisk_size << 32);
780 if (initrd_len) {
781 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
782 ((unsigned long)bp->ext_ramdisk_image << 32);
783 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
784 initrd_len = initrd_end - initrd_start;
785 }
786#endif
787
646 /* Set the encryption workarea to be immediately after the kernel */ 788 /* Set the encryption workarea to be immediately after the kernel */
647 workarea_start = kernel_end; 789 workarea_start = kernel_end;
648 790
@@ -665,16 +807,21 @@ void __init sme_encrypt_kernel(void)
665 */ 807 */
666 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; 808 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
667 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; 809 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
810 if (initrd_len)
811 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
668 812
669 /* PUDs and PMDs needed in the current pagetables for the workarea */ 813 /* PUDs and PMDs needed in the current pagetables for the workarea */
670 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); 814 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
671 815
672 /* 816 /*
673 * The total workarea includes the executable encryption area and 817 * The total workarea includes the executable encryption area and
674 * the pagetable area. 818 * the pagetable area. The start of the workarea is already 2MB
819 * aligned, align the end of the workarea on a 2MB boundary so that
820 * we don't try to create/allocate PTE entries from the workarea
821 * before it is mapped.
675 */ 822 */
676 workarea_len = execute_len + pgtable_area_len; 823 workarea_len = execute_len + pgtable_area_len;
677 workarea_end = workarea_start + workarea_len; 824 workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
678 825
679 /* 826 /*
680 * Set the address to the start of where newly created pagetable 827 * Set the address to the start of where newly created pagetable
@@ -683,45 +830,30 @@ void __init sme_encrypt_kernel(void)
683 * pagetables and when the new encrypted and decrypted kernel 830 * pagetables and when the new encrypted and decrypted kernel
684 * mappings are populated. 831 * mappings are populated.
685 */ 832 */
686 pgtable_area = (void *)execute_end; 833 ppd.pgtable_area = (void *)execute_end;
687 834
688 /* 835 /*
689 * Make sure the current pagetable structure has entries for 836 * Make sure the current pagetable structure has entries for
690 * addressing the workarea. 837 * addressing the workarea.
691 */ 838 */
692 pgd = (pgd_t *)native_read_cr3_pa(); 839 ppd.pgd = (pgd_t *)native_read_cr3_pa();
693 paddr = workarea_start; 840 ppd.paddr = workarea_start;
694 while (paddr < workarea_end) { 841 ppd.vaddr = workarea_start;
695 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 842 ppd.vaddr_end = workarea_end;
696 paddr, 843 sme_map_range_decrypted(&ppd);
697 paddr + PMD_FLAGS);
698
699 paddr += PMD_PAGE_SIZE;
700 }
701 844
702 /* Flush the TLB - no globals so cr3 is enough */ 845 /* Flush the TLB - no globals so cr3 is enough */
703 native_write_cr3(__native_read_cr3()); 846 native_write_cr3(__native_read_cr3());
704 847
705 /* 848 /*
706 * A new pagetable structure is being built to allow for the kernel 849 * A new pagetable structure is being built to allow for the kernel
707 * to be encrypted. It starts with an empty PGD that will then be 850 * and initrd to be encrypted. It starts with an empty PGD that will
708 * populated with new PUDs and PMDs as the encrypted and decrypted 851 * then be populated with new PUDs and PMDs as the encrypted and
709 * kernel mappings are created. 852 * decrypted kernel mappings are created.
710 */ 853 */
711 pgd = pgtable_area; 854 ppd.pgd = ppd.pgtable_area;
712 memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); 855 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
713 pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; 856 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
714
715 /* Add encrypted kernel (identity) mappings */
716 pmd_flags = PMD_FLAGS | _PAGE_ENC;
717 paddr = kernel_start;
718 while (paddr < kernel_end) {
719 pgtable_area = sme_populate_pgd(pgd, pgtable_area,
720 paddr,
721 paddr + pmd_flags);
722
723 paddr += PMD_PAGE_SIZE;
724 }
725 857
726 /* 858 /*
727 * A different PGD index/entry must be used to get different 859 * A different PGD index/entry must be used to get different
@@ -730,47 +862,79 @@ void __init sme_encrypt_kernel(void)
730 * the base of the mapping. 862 * the base of the mapping.
731 */ 863 */
732 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); 864 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
865 if (initrd_len) {
866 unsigned long check_base;
867
868 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
869 decrypted_base = max(decrypted_base, check_base);
870 }
733 decrypted_base <<= PGDIR_SHIFT; 871 decrypted_base <<= PGDIR_SHIFT;
734 872
873 /* Add encrypted kernel (identity) mappings */
874 ppd.paddr = kernel_start;
875 ppd.vaddr = kernel_start;
876 ppd.vaddr_end = kernel_end;
877 sme_map_range_encrypted(&ppd);
878
735 /* Add decrypted, write-protected kernel (non-identity) mappings */ 879 /* Add decrypted, write-protected kernel (non-identity) mappings */
736 pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); 880 ppd.paddr = kernel_start;
737 paddr = kernel_start; 881 ppd.vaddr = kernel_start + decrypted_base;
738 while (paddr < kernel_end) { 882 ppd.vaddr_end = kernel_end + decrypted_base;
739 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 883 sme_map_range_decrypted_wp(&ppd);
740 paddr + decrypted_base, 884
741 paddr + pmd_flags); 885 if (initrd_len) {
742 886 /* Add encrypted initrd (identity) mappings */
743 paddr += PMD_PAGE_SIZE; 887 ppd.paddr = initrd_start;
888 ppd.vaddr = initrd_start;
889 ppd.vaddr_end = initrd_end;
890 sme_map_range_encrypted(&ppd);
891 /*
892 * Add decrypted, write-protected initrd (non-identity) mappings
893 */
894 ppd.paddr = initrd_start;
895 ppd.vaddr = initrd_start + decrypted_base;
896 ppd.vaddr_end = initrd_end + decrypted_base;
897 sme_map_range_decrypted_wp(&ppd);
744 } 898 }
745 899
746 /* Add decrypted workarea mappings to both kernel mappings */ 900 /* Add decrypted workarea mappings to both kernel mappings */
747 paddr = workarea_start; 901 ppd.paddr = workarea_start;
748 while (paddr < workarea_end) { 902 ppd.vaddr = workarea_start;
749 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 903 ppd.vaddr_end = workarea_end;
750 paddr, 904 sme_map_range_decrypted(&ppd);
751 paddr + PMD_FLAGS);
752 905
753 pgtable_area = sme_populate_pgd(pgd, pgtable_area, 906 ppd.paddr = workarea_start;
754 paddr + decrypted_base, 907 ppd.vaddr = workarea_start + decrypted_base;
755 paddr + PMD_FLAGS); 908 ppd.vaddr_end = workarea_end + decrypted_base;
756 909 sme_map_range_decrypted(&ppd);
757 paddr += PMD_PAGE_SIZE;
758 }
759 910
760 /* Perform the encryption */ 911 /* Perform the encryption */
761 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, 912 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
762 kernel_len, workarea_start, (unsigned long)pgd); 913 kernel_len, workarea_start, (unsigned long)ppd.pgd);
914
915 if (initrd_len)
916 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
917 initrd_len, workarea_start,
918 (unsigned long)ppd.pgd);
763 919
764 /* 920 /*
765 * At this point we are running encrypted. Remove the mappings for 921 * At this point we are running encrypted. Remove the mappings for
766 * the decrypted areas - all that is needed for this is to remove 922 * the decrypted areas - all that is needed for this is to remove
767 * the PGD entry/entries. 923 * the PGD entry/entries.
768 */ 924 */
769 sme_clear_pgd(pgd, kernel_start + decrypted_base, 925 ppd.vaddr = kernel_start + decrypted_base;
770 kernel_end + decrypted_base); 926 ppd.vaddr_end = kernel_end + decrypted_base;
927 sme_clear_pgd(&ppd);
928
929 if (initrd_len) {
930 ppd.vaddr = initrd_start + decrypted_base;
931 ppd.vaddr_end = initrd_end + decrypted_base;
932 sme_clear_pgd(&ppd);
933 }
771 934
772 sme_clear_pgd(pgd, workarea_start + decrypted_base, 935 ppd.vaddr = workarea_start + decrypted_base;
773 workarea_end + decrypted_base); 936 ppd.vaddr_end = workarea_end + decrypted_base;
937 sme_clear_pgd(&ppd);
774 938
775 /* Flush the TLB - no globals so cr3 is enough */ 939 /* Flush the TLB - no globals so cr3 is enough */
776 native_write_cr3(__native_read_cr3()); 940 native_write_cr3(__native_read_cr3());
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 730e6d541df1..01f682cf77a8 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
22 22
23 /* 23 /*
24 * Entry parameters: 24 * Entry parameters:
25 * RDI - virtual address for the encrypted kernel mapping 25 * RDI - virtual address for the encrypted mapping
26 * RSI - virtual address for the decrypted kernel mapping 26 * RSI - virtual address for the decrypted mapping
27 * RDX - length of kernel 27 * RDX - length to encrypt
28 * RCX - virtual address of the encryption workarea, including: 28 * RCX - virtual address of the encryption workarea, including:
29 * - stack page (PAGE_SIZE) 29 * - stack page (PAGE_SIZE)
30 * - encryption routine page (PAGE_SIZE) 30 * - encryption routine page (PAGE_SIZE)
@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
41 addq $PAGE_SIZE, %rax /* Workarea encryption routine */ 41 addq $PAGE_SIZE, %rax /* Workarea encryption routine */
42 42
43 push %r12 43 push %r12
44 movq %rdi, %r10 /* Encrypted kernel */ 44 movq %rdi, %r10 /* Encrypted area */
45 movq %rsi, %r11 /* Decrypted kernel */ 45 movq %rsi, %r11 /* Decrypted area */
46 movq %rdx, %r12 /* Kernel length */ 46 movq %rdx, %r12 /* Area length */
47 47
48 /* Copy encryption routine into the workarea */ 48 /* Copy encryption routine into the workarea */
49 movq %rax, %rdi /* Workarea encryption routine */ 49 movq %rax, %rdi /* Workarea encryption routine */
@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
52 rep movsb 52 rep movsb
53 53
54 /* Setup registers for call */ 54 /* Setup registers for call */
55 movq %r10, %rdi /* Encrypted kernel */ 55 movq %r10, %rdi /* Encrypted area */
56 movq %r11, %rsi /* Decrypted kernel */ 56 movq %r11, %rsi /* Decrypted area */
57 movq %r8, %rdx /* Pagetables used for encryption */ 57 movq %r8, %rdx /* Pagetables used for encryption */
58 movq %r12, %rcx /* Kernel length */ 58 movq %r12, %rcx /* Area length */
59 movq %rax, %r8 /* Workarea encryption routine */ 59 movq %rax, %r8 /* Workarea encryption routine */
60 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ 60 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
61 61
@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
71 71
72ENTRY(__enc_copy) 72ENTRY(__enc_copy)
73/* 73/*
74 * Routine used to encrypt kernel. 74 * Routine used to encrypt memory in place.
75 * This routine must be run outside of the kernel proper since 75 * This routine must be run outside of the kernel proper since
76 * the kernel will be encrypted during the process. So this 76 * the kernel will be encrypted during the process. So this
77 * routine is defined here and then copied to an area outside 77 * routine is defined here and then copied to an area outside
@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
79 * during execution. 79 * during execution.
80 * 80 *
81 * On entry the registers must be: 81 * On entry the registers must be:
82 * RDI - virtual address for the encrypted kernel mapping 82 * RDI - virtual address for the encrypted mapping
83 * RSI - virtual address for the decrypted kernel mapping 83 * RSI - virtual address for the decrypted mapping
84 * RDX - address of the pagetables to use for encryption 84 * RDX - address of the pagetables to use for encryption
85 * RCX - length of kernel 85 * RCX - length of area
86 * R8 - intermediate copy buffer 86 * R8 - intermediate copy buffer
87 * 87 *
88 * RAX - points to this routine 88 * RAX - points to this routine
89 * 89 *
90 * The kernel will be encrypted by copying from the non-encrypted 90 * The area will be encrypted by copying from the non-encrypted
91 * kernel space to an intermediate buffer and then copying from the 91 * memory space to an intermediate buffer and then copying from the
92 * intermediate buffer back to the encrypted kernel space. The physical 92 * intermediate buffer back to the encrypted memory space. The physical
93 * addresses of the two kernel space mappings are the same which 93 * addresses of the two mappings are the same which results in the area
94 * results in the kernel being encrypted "in place". 94 * being encrypted "in place".
95 */ 95 */
96 /* Enable the new page tables */ 96 /* Enable the new page tables */
97 mov %rdx, %cr3 97 mov %rdx, %cr3
@@ -103,47 +103,55 @@ ENTRY(__enc_copy)
103 orq $X86_CR4_PGE, %rdx 103 orq $X86_CR4_PGE, %rdx
104 mov %rdx, %cr4 104 mov %rdx, %cr4
105 105
106 push %r15
107 push %r12
108
109 movq %rcx, %r9 /* Save area length */
110 movq %rdi, %r10 /* Save encrypted area address */
111 movq %rsi, %r11 /* Save decrypted area address */
112
106 /* Set the PAT register PA5 entry to write-protect */ 113 /* Set the PAT register PA5 entry to write-protect */
107 push %rcx
108 movl $MSR_IA32_CR_PAT, %ecx 114 movl $MSR_IA32_CR_PAT, %ecx
109 rdmsr 115 rdmsr
110 push %rdx /* Save original PAT value */ 116 mov %rdx, %r15 /* Save original PAT value */
111 andl $0xffff00ff, %edx /* Clear PA5 */ 117 andl $0xffff00ff, %edx /* Clear PA5 */
112 orl $0x00000500, %edx /* Set PA5 to WP */ 118 orl $0x00000500, %edx /* Set PA5 to WP */
113 wrmsr 119 wrmsr
114 pop %rdx /* RDX contains original PAT value */
115 pop %rcx
116
117 movq %rcx, %r9 /* Save kernel length */
118 movq %rdi, %r10 /* Save encrypted kernel address */
119 movq %rsi, %r11 /* Save decrypted kernel address */
120 120
121 wbinvd /* Invalidate any cache entries */ 121 wbinvd /* Invalidate any cache entries */
122 122
123 /* Copy/encrypt 2MB at a time */ 123 /* Copy/encrypt up to 2MB at a time */
124 movq $PMD_PAGE_SIZE, %r12
1241: 1251:
125 movq %r11, %rsi /* Source - decrypted kernel */ 126 cmpq %r12, %r9
127 jnb 2f
128 movq %r9, %r12
129
1302:
131 movq %r11, %rsi /* Source - decrypted area */
126 movq %r8, %rdi /* Dest - intermediate copy buffer */ 132 movq %r8, %rdi /* Dest - intermediate copy buffer */
127 movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ 133 movq %r12, %rcx
128 rep movsb 134 rep movsb
129 135
130 movq %r8, %rsi /* Source - intermediate copy buffer */ 136 movq %r8, %rsi /* Source - intermediate copy buffer */
131 movq %r10, %rdi /* Dest - encrypted kernel */ 137 movq %r10, %rdi /* Dest - encrypted area */
132 movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ 138 movq %r12, %rcx
133 rep movsb 139 rep movsb
134 140
135 addq $PMD_PAGE_SIZE, %r11 141 addq %r12, %r11
136 addq $PMD_PAGE_SIZE, %r10 142 addq %r12, %r10
137 subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ 143 subq %r12, %r9 /* Kernel length decrement */
138 jnz 1b /* Kernel length not zero? */ 144 jnz 1b /* Kernel length not zero? */
139 145
140 /* Restore PAT register */ 146 /* Restore PAT register */
141 push %rdx /* Save original PAT value */
142 movl $MSR_IA32_CR_PAT, %ecx 147 movl $MSR_IA32_CR_PAT, %ecx
143 rdmsr 148 rdmsr
144 pop %rdx /* Restore original PAT value */ 149 mov %r15, %rdx /* Restore original PAT value */
145 wrmsr 150 wrmsr
146 151
152 pop %r12
153 pop %r15
154
147 ret 155 ret
148.L__enc_copy_end: 156.L__enc_copy_end:
149ENDPROC(__enc_copy) 157ENDPROC(__enc_copy)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 96d456a94b03..004abf9ebf12 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd)
355 kmem_cache_free(pgd_cache, pgd); 355 kmem_cache_free(pgd_cache, pgd);
356} 356}
357#else 357#else
358
358static inline pgd_t *_pgd_alloc(void) 359static inline pgd_t *_pgd_alloc(void)
359{ 360{
360 return (pgd_t *)__get_free_page(PGALLOC_GFP); 361 return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
361} 362}
362 363
363static inline void _pgd_free(pgd_t *pgd) 364static inline void _pgd_free(pgd_t *pgd)
364{ 365{
365 free_page((unsigned long)pgd); 366 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
366} 367}
367#endif /* CONFIG_X86_PAE */ 368#endif /* CONFIG_X86_PAE */
368 369
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 6b9bf023a700..c3c5274410a9 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -10,6 +10,7 @@
10#include <linux/pagemap.h> 10#include <linux/pagemap.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12 12
13#include <asm/cpu_entry_area.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
new file mode 100644
index 000000000000..ce38f165489b
--- /dev/null
+++ b/arch/x86/mm/pti.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * This code is based in part on work published here:
14 *
15 * https://github.com/IAIK/KAISER
16 *
17 * The original work was written by and and signed off by for the Linux
18 * kernel by:
19 *
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24 *
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
28 */
29#include <linux/kernel.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/bug.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/mm.h>
37#include <linux/uaccess.h>
38
39#include <asm/cpufeature.h>
40#include <asm/hypervisor.h>
41#include <asm/vsyscall.h>
42#include <asm/cmdline.h>
43#include <asm/pti.h>
44#include <asm/pgtable.h>
45#include <asm/pgalloc.h>
46#include <asm/tlbflush.h>
47#include <asm/desc.h>
48
49#undef pr_fmt
50#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
51
52/* Backporting helper */
53#ifndef __GFP_NOTRACK
54#define __GFP_NOTRACK 0
55#endif
56
57static void __init pti_print_if_insecure(const char *reason)
58{
59 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
60 pr_info("%s\n", reason);
61}
62
63static void __init pti_print_if_secure(const char *reason)
64{
65 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
66 pr_info("%s\n", reason);
67}
68
69void __init pti_check_boottime_disable(void)
70{
71 char arg[5];
72 int ret;
73
74 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
75 pti_print_if_insecure("disabled on XEN PV.");
76 return;
77 }
78
79 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
80 if (ret > 0) {
81 if (ret == 3 && !strncmp(arg, "off", 3)) {
82 pti_print_if_insecure("disabled on command line.");
83 return;
84 }
85 if (ret == 2 && !strncmp(arg, "on", 2)) {
86 pti_print_if_secure("force enabled on command line.");
87 goto enable;
88 }
89 if (ret == 4 && !strncmp(arg, "auto", 4))
90 goto autosel;
91 }
92
93 if (cmdline_find_option_bool(boot_command_line, "nopti")) {
94 pti_print_if_insecure("disabled on command line.");
95 return;
96 }
97
98autosel:
99 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
100 return;
101enable:
102 setup_force_cpu_cap(X86_FEATURE_PTI);
103}
104
105pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
106{
107 /*
108 * Changes to the high (kernel) portion of the kernelmode page
109 * tables are not automatically propagated to the usermode tables.
110 *
111 * Users should keep in mind that, unlike the kernelmode tables,
112 * there is no vmalloc_fault equivalent for the usermode tables.
113 * Top-level entries added to init_mm's usermode pgd after boot
114 * will not be automatically propagated to other mms.
115 */
116 if (!pgdp_maps_userspace(pgdp))
117 return pgd;
118
119 /*
120 * The user page tables get the full PGD, accessible from
121 * userspace:
122 */
123 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
124
125 /*
126 * If this is normal user memory, make it NX in the kernel
127 * pagetables so that, if we somehow screw up and return to
128 * usermode with the kernel CR3 loaded, we'll get a page fault
129 * instead of allowing user code to execute with the wrong CR3.
130 *
131 * As exceptions, we don't set NX if:
132 * - _PAGE_USER is not set. This could be an executable
133 * EFI runtime mapping or something similar, and the kernel
134 * may execute from it
135 * - we don't have NX support
136 * - we're clearing the PGD (i.e. the new pgd is not present).
137 */
138 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
139 (__supported_pte_mask & _PAGE_NX))
140 pgd.pgd |= _PAGE_NX;
141
142 /* return the copy of the PGD we want the kernel to use: */
143 return pgd;
144}
145
146/*
147 * Walk the user copy of the page tables (optionally) trying to allocate
148 * page table pages on the way down.
149 *
150 * Returns a pointer to a P4D on success, or NULL on failure.
151 */
152static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
153{
154 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
155 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
156
157 if (address < PAGE_OFFSET) {
158 WARN_ONCE(1, "attempt to walk user address\n");
159 return NULL;
160 }
161
162 if (pgd_none(*pgd)) {
163 unsigned long new_p4d_page = __get_free_page(gfp);
164 if (!new_p4d_page)
165 return NULL;
166
167 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
168 }
169 BUILD_BUG_ON(pgd_large(*pgd) != 0);
170
171 return p4d_offset(pgd, address);
172}
173
174/*
175 * Walk the user copy of the page tables (optionally) trying to allocate
176 * page table pages on the way down.
177 *
178 * Returns a pointer to a PMD on success, or NULL on failure.
179 */
180static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
181{
182 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
183 p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
184 pud_t *pud;
185
186 BUILD_BUG_ON(p4d_large(*p4d) != 0);
187 if (p4d_none(*p4d)) {
188 unsigned long new_pud_page = __get_free_page(gfp);
189 if (!new_pud_page)
190 return NULL;
191
192 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
193 }
194
195 pud = pud_offset(p4d, address);
196 /* The user page tables do not use large mappings: */
197 if (pud_large(*pud)) {
198 WARN_ON(1);
199 return NULL;
200 }
201 if (pud_none(*pud)) {
202 unsigned long new_pmd_page = __get_free_page(gfp);
203 if (!new_pmd_page)
204 return NULL;
205
206 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
207 }
208
209 return pmd_offset(pud, address);
210}
211
212#ifdef CONFIG_X86_VSYSCALL_EMULATION
213/*
214 * Walk the shadow copy of the page tables (optionally) trying to allocate
215 * page table pages on the way down. Does not support large pages.
216 *
217 * Note: this is only used when mapping *new* kernel data into the
218 * user/shadow page tables. It is never used for userspace data.
219 *
220 * Returns a pointer to a PTE on success, or NULL on failure.
221 */
222static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
223{
224 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
225 pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
226 pte_t *pte;
227
228 /* We can't do anything sensible if we hit a large mapping. */
229 if (pmd_large(*pmd)) {
230 WARN_ON(1);
231 return NULL;
232 }
233
234 if (pmd_none(*pmd)) {
235 unsigned long new_pte_page = __get_free_page(gfp);
236 if (!new_pte_page)
237 return NULL;
238
239 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
240 }
241
242 pte = pte_offset_kernel(pmd, address);
243 if (pte_flags(*pte) & _PAGE_USER) {
244 WARN_ONCE(1, "attempt to walk to user pte\n");
245 return NULL;
246 }
247 return pte;
248}
249
250static void __init pti_setup_vsyscall(void)
251{
252 pte_t *pte, *target_pte;
253 unsigned int level;
254
255 pte = lookup_address(VSYSCALL_ADDR, &level);
256 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
257 return;
258
259 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
260 if (WARN_ON(!target_pte))
261 return;
262
263 *target_pte = *pte;
264 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
265}
266#else
267static void __init pti_setup_vsyscall(void) { }
268#endif
269
270static void __init
271pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
272{
273 unsigned long addr;
274
275 /*
276 * Clone the populated PMDs which cover start to end. These PMD areas
277 * can have holes.
278 */
279 for (addr = start; addr < end; addr += PMD_SIZE) {
280 pmd_t *pmd, *target_pmd;
281 pgd_t *pgd;
282 p4d_t *p4d;
283 pud_t *pud;
284
285 pgd = pgd_offset_k(addr);
286 if (WARN_ON(pgd_none(*pgd)))
287 return;
288 p4d = p4d_offset(pgd, addr);
289 if (WARN_ON(p4d_none(*p4d)))
290 return;
291 pud = pud_offset(p4d, addr);
292 if (pud_none(*pud))
293 continue;
294 pmd = pmd_offset(pud, addr);
295 if (pmd_none(*pmd))
296 continue;
297
298 target_pmd = pti_user_pagetable_walk_pmd(addr);
299 if (WARN_ON(!target_pmd))
300 return;
301
302 /*
303 * Copy the PMD. That is, the kernelmode and usermode
304 * tables will share the last-level page tables of this
305 * address range
306 */
307 *target_pmd = pmd_clear_flags(*pmd, clear);
308 }
309}
310
311/*
312 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
313 * next-level entry on 5-level systems.
314 */
315static void __init pti_clone_p4d(unsigned long addr)
316{
317 p4d_t *kernel_p4d, *user_p4d;
318 pgd_t *kernel_pgd;
319
320 user_p4d = pti_user_pagetable_walk_p4d(addr);
321 kernel_pgd = pgd_offset_k(addr);
322 kernel_p4d = p4d_offset(kernel_pgd, addr);
323 *user_p4d = *kernel_p4d;
324}
325
326/*
327 * Clone the CPU_ENTRY_AREA into the user space visible page table.
328 */
329static void __init pti_clone_user_shared(void)
330{
331 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
332}
333
334/*
335 * Clone the ESPFIX P4D into the user space visinble page table
336 */
337static void __init pti_setup_espfix64(void)
338{
339#ifdef CONFIG_X86_ESPFIX64
340 pti_clone_p4d(ESPFIX_BASE_ADDR);
341#endif
342}
343
344/*
345 * Clone the populated PMDs of the entry and irqentry text and force it RO.
346 */
347static void __init pti_clone_entry_text(void)
348{
349 pti_clone_pmds((unsigned long) __entry_text_start,
350 (unsigned long) __irqentry_text_end,
351 _PAGE_RW | _PAGE_GLOBAL);
352}
353
354/*
355 * Initialize kernel page table isolation
356 */
357void __init pti_init(void)
358{
359 if (!static_cpu_has(X86_FEATURE_PTI))
360 return;
361
362 pr_info("enabled\n");
363
364 pti_clone_user_shared();
365 pti_clone_entry_text();
366 pti_setup_espfix64();
367 pti_setup_vsyscall();
368}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3118392cdf75..5bfe61a5e8e3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -28,6 +28,38 @@
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
29 */ 29 */
30 30
31/*
32 * We get here when we do something requiring a TLB invalidation
33 * but could not go invalidate all of the contexts. We do the
34 * necessary invalidation by clearing out the 'ctx_id' which
35 * forces a TLB flush when the context is loaded.
36 */
37void clear_asid_other(void)
38{
39 u16 asid;
40
41 /*
42 * This is only expected to be set if we have disabled
43 * kernel _PAGE_GLOBAL pages.
44 */
45 if (!static_cpu_has(X86_FEATURE_PTI)) {
46 WARN_ON_ONCE(1);
47 return;
48 }
49
50 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
51 /* Do not need to flush the current asid */
52 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
53 continue;
54 /*
55 * Make sure the next time we go to switch to
56 * this asid, we do a flush:
57 */
58 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
59 }
60 this_cpu_write(cpu_tlbstate.invalidate_other, false);
61}
62
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 63atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32 64
33 65
@@ -42,6 +74,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
42 return; 74 return;
43 } 75 }
44 76
77 if (this_cpu_read(cpu_tlbstate.invalidate_other))
78 clear_asid_other();
79
45 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 80 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
46 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != 81 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
47 next->context.ctx_id) 82 next->context.ctx_id)
@@ -65,6 +100,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
65 *need_flush = true; 100 *need_flush = true;
66} 101}
67 102
103static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
104{
105 unsigned long new_mm_cr3;
106
107 if (need_flush) {
108 invalidate_user_asid(new_asid);
109 new_mm_cr3 = build_cr3(pgdir, new_asid);
110 } else {
111 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
112 }
113
114 /*
115 * Caution: many callers of this function expect
116 * that load_cr3() is serializing and orders TLB
117 * fills with respect to the mm_cpumask writes.
118 */
119 write_cr3(new_mm_cr3);
120}
121
68void leave_mm(int cpu) 122void leave_mm(int cpu)
69{ 123{
70 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 124 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
@@ -97,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
97 local_irq_restore(flags); 151 local_irq_restore(flags);
98} 152}
99 153
154static void sync_current_stack_to_mm(struct mm_struct *mm)
155{
156 unsigned long sp = current_stack_pointer;
157 pgd_t *pgd = pgd_offset(mm, sp);
158
159 if (CONFIG_PGTABLE_LEVELS > 4) {
160 if (unlikely(pgd_none(*pgd))) {
161 pgd_t *pgd_ref = pgd_offset_k(sp);
162
163 set_pgd(pgd, *pgd_ref);
164 }
165 } else {
166 /*
167 * "pgd" is faked. The top level entries are "p4d"s, so sync
168 * the p4d. This compiles to approximately the same code as
169 * the 5-level case.
170 */
171 p4d_t *p4d = p4d_offset(pgd, sp);
172
173 if (unlikely(p4d_none(*p4d))) {
174 pgd_t *pgd_ref = pgd_offset_k(sp);
175 p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
176
177 set_p4d(p4d, *p4d_ref);
178 }
179 }
180}
181
100void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 182void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
101 struct task_struct *tsk) 183 struct task_struct *tsk)
102{ 184{
@@ -128,7 +210,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
128 * isn't free. 210 * isn't free.
129 */ 211 */
130#ifdef CONFIG_DEBUG_VM 212#ifdef CONFIG_DEBUG_VM
131 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { 213 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
132 /* 214 /*
133 * If we were to BUG here, we'd be very likely to kill 215 * If we were to BUG here, we'd be very likely to kill
134 * the system so hard that we don't see the call trace. 216 * the system so hard that we don't see the call trace.
@@ -172,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
172 * mapped in the new pgd, we'll double-fault. Forcibly 254 * mapped in the new pgd, we'll double-fault. Forcibly
173 * map it. 255 * map it.
174 */ 256 */
175 unsigned int index = pgd_index(current_stack_pointer); 257 sync_current_stack_to_mm(next);
176 pgd_t *pgd = next->pgd + index;
177
178 if (unlikely(pgd_none(*pgd)))
179 set_pgd(pgd, init_mm.pgd[index]);
180 } 258 }
181 259
182 /* Stop remote flushes for the previous mm */ 260 /* Stop remote flushes for the previous mm */
@@ -195,7 +273,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
195 if (need_flush) { 273 if (need_flush) {
196 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 274 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
197 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 275 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
198 write_cr3(build_cr3(next, new_asid)); 276 load_new_mm_cr3(next->pgd, new_asid, true);
199 277
200 /* 278 /*
201 * NB: This gets called via leave_mm() in the idle path 279 * NB: This gets called via leave_mm() in the idle path
@@ -208,7 +286,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
208 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 286 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
209 } else { 287 } else {
210 /* The new ASID is already up to date. */ 288 /* The new ASID is already up to date. */
211 write_cr3(build_cr3_noflush(next, new_asid)); 289 load_new_mm_cr3(next->pgd, new_asid, false);
212 290
213 /* See above wrt _rcuidle. */ 291 /* See above wrt _rcuidle. */
214 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); 292 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
@@ -288,7 +366,7 @@ void initialize_tlbstate_and_flush(void)
288 !(cr4_read_shadow() & X86_CR4_PCIDE)); 366 !(cr4_read_shadow() & X86_CR4_PCIDE));
289 367
290 /* Force ASID 0 and force a TLB flush. */ 368 /* Force ASID 0 and force a TLB flush. */
291 write_cr3(build_cr3(mm, 0)); 369 write_cr3(build_cr3(mm->pgd, 0));
292 370
293 /* Reinitialize tlbstate. */ 371 /* Reinitialize tlbstate. */
294 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 372 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
@@ -551,7 +629,7 @@ static void do_kernel_range_flush(void *info)
551 629
552 /* flush range by one by one 'invlpg' */ 630 /* flush range by one by one 'invlpg' */
553 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 631 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
554 __flush_tlb_single(addr); 632 __flush_tlb_one(addr);
555} 633}
556 634
557void flush_tlb_kernel_range(unsigned long start, unsigned long end) 635void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index bb461cfd01ab..526536c81ddc 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
97 * We should get host bridge information from ACPI unless the BIOS 97 * We should get host bridge information from ACPI unless the BIOS
98 * doesn't support it. 98 * doesn't support it.
99 */ 99 */
100 if (acpi_os_get_root_pointer()) 100 if (!acpi_disabled && acpi_os_get_root_pointer())
101 return 0; 101 return 0;
102#endif 102#endif
103 103
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7a5350d08cef..563049c483a1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
594 } else if (!strcmp(str, "nocrs")) { 594 } else if (!strcmp(str, "nocrs")) {
595 pci_probe |= PCI_ROOT_NO_CRS; 595 pci_probe |= PCI_ROOT_NO_CRS;
596 return NULL; 596 return NULL;
597#ifdef CONFIG_PHYS_ADDR_T_64BIT
598 } else if (!strcmp(str, "big_root_window")) {
599 pci_probe |= PCI_BIG_ROOT_WINDOW;
600 return NULL;
601#endif
597 } else if (!strcmp(str, "earlydump")) { 602 } else if (!strcmp(str, "earlydump")) {
598 pci_early_dump_regs = 1; 603 pci_early_dump_regs = 1;
599 return NULL; 604 return NULL;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 1e996df687a3..54ef19e90705 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -662,9 +662,23 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
662 */ 662 */
663static void pci_amd_enable_64bit_bar(struct pci_dev *dev) 663static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
664{ 664{
665 unsigned i; 665 static const char *name = "PCI Bus 0000:00";
666 u32 base, limit, high;
667 struct resource *res, *conflict; 666 struct resource *res, *conflict;
667 u32 base, limit, high;
668 struct pci_dev *other;
669 unsigned i;
670
671 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
672 return;
673
674 /* Check that we are the only device of that type */
675 other = pci_get_device(dev->vendor, dev->device, NULL);
676 if (other != dev ||
677 (other = pci_get_device(dev->vendor, dev->device, other))) {
678 /* This is a multi-socket system, don't touch it for now */
679 pci_dev_put(other);
680 return;
681 }
668 682
669 for (i = 0; i < 8; i++) { 683 for (i = 0; i < 8; i++) {
670 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base); 684 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
@@ -689,17 +703,30 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
689 if (!res) 703 if (!res)
690 return; 704 return;
691 705
692 res->name = "PCI Bus 0000:00"; 706 /*
707 * Allocate a 256GB window directly below the 0xfd00000000 hardware
708 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
709 */
710 res->name = name;
693 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | 711 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
694 IORESOURCE_MEM_64 | IORESOURCE_WINDOW; 712 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
695 res->start = 0x100000000ull; 713 res->start = 0xbd00000000ull;
696 res->end = 0xfd00000000ull - 1; 714 res->end = 0xfd00000000ull - 1;
697 715
698 /* Just grab the free area behind system memory for this */ 716 conflict = request_resource_conflict(&iomem_resource, res);
699 while ((conflict = request_resource_conflict(&iomem_resource, res))) 717 if (conflict) {
700 res->start = conflict->end + 1; 718 kfree(res);
719 if (conflict->name != name)
720 return;
701 721
702 dev_info(&dev->dev, "adding root bus resource %pR\n", res); 722 /* We are resuming from suspend; just reenable the window */
723 res = conflict;
724 } else {
725 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
726 res);
727 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
728 pci_bus_add_resource(dev->bus, res, 0);
729 }
703 730
704 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | 731 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
705 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; 732 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@@ -711,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
711 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high); 738 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
712 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit); 739 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
713 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base); 740 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
714
715 pci_bus_add_resource(dev->bus, res, 0);
716} 741}
717DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); 742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
718DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); 743DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
719DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); 744DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
720DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); 745DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
721DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); 746DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
747DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
748DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
749DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
750DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
751DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
722 752
723#endif 753#endif
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6a151ce70e86..2dd15e967c3f 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void)
135 pud[j] = *pud_offset(p4d_k, vaddr); 135 pud[j] = *pud_offset(p4d_k, vaddr);
136 } 136 }
137 } 137 }
138 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
138 } 139 }
140
139out: 141out:
140 __flush_tlb_all(); 142 __flush_tlb_all();
141 143
@@ -196,6 +198,9 @@ static pgd_t *efi_pgd;
196 * because we want to avoid inserting EFI region mappings (EFI_VA_END 198 * because we want to avoid inserting EFI region mappings (EFI_VA_END
197 * to EFI_VA_START) into the standard kernel page tables. Everything 199 * to EFI_VA_START) into the standard kernel page tables. Everything
198 * else can be shared, see efi_sync_low_kernel_mappings(). 200 * else can be shared, see efi_sync_low_kernel_mappings().
201 *
202 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
203 * allocation.
199 */ 204 */
200int __init efi_alloc_page_tables(void) 205int __init efi_alloc_page_tables(void)
201{ 206{
@@ -208,7 +213,7 @@ int __init efi_alloc_page_tables(void)
208 return 0; 213 return 0;
209 214
210 gfp_mask = GFP_KERNEL | __GFP_ZERO; 215 gfp_mask = GFP_KERNEL | __GFP_ZERO;
211 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 216 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
212 if (!efi_pgd) 217 if (!efi_pgd)
213 return -ENOMEM; 218 return -ENOMEM;
214 219
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 8a99a2e96537..5b513ccffde4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
592 /* 592 /*
593 * Update the first page pointer to skip over the CSH header. 593 * Update the first page pointer to skip over the CSH header.
594 */ 594 */
595 cap_info->pages[0] += csh->headersize; 595 cap_info->phys[0] += csh->headersize;
596
597 /*
598 * cap_info->capsule should point at a virtual mapping of the entire
599 * capsule, starting at the capsule header. Our image has the Quark
600 * security header prepended, so we cannot rely on the default vmap()
601 * mapping created by the generic capsule code.
602 * Given that the Quark firmware does not appear to care about the
603 * virtual mapping, let's just point cap_info->capsule at our copy
604 * of the capsule header.
605 */
606 cap_info->capsule = &cap_info->header;
596 607
597 return 1; 608 return 1;
598} 609}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index dc036e511f48..5a0483e7bf66 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
60 return 0; 60 return 0;
61} 61}
62 62
63static const struct bt_sfi_data tng_bt_sfi_data __initdata = { 63static struct bt_sfi_data tng_bt_sfi_data __initdata = {
64 .setup = tng_bt_sfi_setup, 64 .setup = tng_bt_sfi_setup,
65}; 65};
66 66
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index f44c0bc95aa2..8538a6723171 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
299 local_flush_tlb(); 299 local_flush_tlb();
300 stat->d_alltlb++; 300 stat->d_alltlb++;
301 } else { 301 } else {
302 __flush_tlb_one(msg->address); 302 __flush_tlb_single(msg->address);
303 stat->d_onetlb++; 303 stat->d_onetlb++;
304 } 304 }
305 stat->d_requestee++; 305 stat->d_requestee++;
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 5f6fd860820a..e4cb9f4cde8a 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -128,7 +128,7 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
128 * on the specified blade to allow the sending of MSIs to the specified CPU. 128 * on the specified blade to allow the sending of MSIs to the specified CPU.
129 */ 129 */
130static int uv_domain_activate(struct irq_domain *domain, 130static int uv_domain_activate(struct irq_domain *domain,
131 struct irq_data *irq_data, bool early) 131 struct irq_data *irq_data, bool reserve)
132{ 132{
133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); 133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
134 return 0; 134 return 0;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c34bd8233f7c..5f64f30873e2 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
905/* 905/*
906 * UV NMI handler 906 * UV NMI handler
907 */ 907 */
908int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) 908static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
909{ 909{
910 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; 910 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
911 int cpu = smp_processor_id(); 911 int cpu = smp_processor_id();
@@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
1013} 1013}
1014 1014
1015/* Setup HUB NMI info */ 1015/* Setup HUB NMI info */
1016void __init uv_nmi_setup_common(bool hubbed) 1016static void __init uv_nmi_setup_common(bool hubbed)
1017{ 1017{
1018 int size = sizeof(void *) * (1 << NODES_SHIFT); 1018 int size = sizeof(void *) * (1 << NODES_SHIFT);
1019 int cpu; 1019 int cpu;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 84fcfde53f8f..a7d966964c6f 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
82 /* 82 /*
83 * descriptor tables 83 * descriptor tables
84 */ 84 */
85#ifdef CONFIG_X86_32
86 store_idt(&ctxt->idt); 85 store_idt(&ctxt->idt);
87#else 86
88/* CONFIG_X86_64 */
89 store_idt((struct desc_ptr *)&ctxt->idt_limit);
90#endif
91 /* 87 /*
92 * We save it here, but restore it only in the hibernate case. 88 * We save it here, but restore it only in the hibernate case.
93 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit 89 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
103 /* 99 /*
104 * segment registers 100 * segment registers
105 */ 101 */
106#ifdef CONFIG_X86_32 102#ifdef CONFIG_X86_32_LAZY_GS
107 savesegment(es, ctxt->es);
108 savesegment(fs, ctxt->fs);
109 savesegment(gs, ctxt->gs); 103 savesegment(gs, ctxt->gs);
110 savesegment(ss, ctxt->ss); 104#endif
111#else 105#ifdef CONFIG_X86_64
112/* CONFIG_X86_64 */ 106 savesegment(gs, ctxt->gs);
113 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 107 savesegment(fs, ctxt->fs);
114 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 108 savesegment(ds, ctxt->ds);
115 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 109 savesegment(es, ctxt->es);
116 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
117 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
118 110
119 rdmsrl(MSR_FS_BASE, ctxt->fs_base); 111 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
120 rdmsrl(MSR_GS_BASE, ctxt->gs_base); 112 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
121 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 113 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
122 mtrr_save_fixed_ranges(NULL); 114 mtrr_save_fixed_ranges(NULL);
123 115
124 rdmsrl(MSR_EFER, ctxt->efer); 116 rdmsrl(MSR_EFER, ctxt->efer);
@@ -160,17 +152,19 @@ static void do_fpu_end(void)
160static void fix_processor_context(void) 152static void fix_processor_context(void)
161{ 153{
162 int cpu = smp_processor_id(); 154 int cpu = smp_processor_id();
163 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
164#ifdef CONFIG_X86_64 155#ifdef CONFIG_X86_64
165 struct desc_struct *desc = get_cpu_gdt_rw(cpu); 156 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
166 tss_desc tss; 157 tss_desc tss;
167#endif 158#endif
168 set_tss_desc(cpu, t); /* 159
169 * This just modifies memory; should not be 160 /*
170 * necessary. But... This is necessary, because 161 * We need to reload TR, which requires that we change the
171 * 386 hardware has concept of busy TSS or some 162 * GDT entry to indicate "available" first.
172 * similar stupidity. 163 *
173 */ 164 * XXX: This could probably all be replaced by a call to
165 * force_reload_TR().
166 */
167 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
174 168
175#ifdef CONFIG_X86_64 169#ifdef CONFIG_X86_64
176 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); 170 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
@@ -178,6 +172,9 @@ static void fix_processor_context(void)
178 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); 172 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
179 173
180 syscall_init(); /* This sets MSR_*STAR and related */ 174 syscall_init(); /* This sets MSR_*STAR and related */
175#else
176 if (boot_cpu_has(X86_FEATURE_SEP))
177 enable_sep_cpu();
181#endif 178#endif
182 load_TR_desc(); /* This does ltr */ 179 load_TR_desc(); /* This does ltr */
183 load_mm_ldt(current->active_mm); /* This does lldt */ 180 load_mm_ldt(current->active_mm); /* This does lldt */
@@ -190,9 +187,12 @@ static void fix_processor_context(void)
190} 187}
191 188
192/** 189/**
193 * __restore_processor_state - restore the contents of CPU registers saved 190 * __restore_processor_state - restore the contents of CPU registers saved
194 * by __save_processor_state() 191 * by __save_processor_state()
195 * @ctxt - structure to load the registers contents from 192 * @ctxt - structure to load the registers contents from
193 *
194 * The asm code that gets us here will have restored a usable GDT, although
195 * it will be pointing to the wrong alias.
196 */ 196 */
197static void notrace __restore_processor_state(struct saved_context *ctxt) 197static void notrace __restore_processor_state(struct saved_context *ctxt)
198{ 198{
@@ -215,46 +215,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
215 write_cr2(ctxt->cr2); 215 write_cr2(ctxt->cr2);
216 write_cr0(ctxt->cr0); 216 write_cr0(ctxt->cr0);
217 217
218 /* Restore the IDT. */
219 load_idt(&ctxt->idt);
220
218 /* 221 /*
219 * now restore the descriptor tables to their proper values 222 * Just in case the asm code got us here with the SS, DS, or ES
220 * ltr is done i fix_processor_context(). 223 * out of sync with the GDT, update them.
221 */ 224 */
222#ifdef CONFIG_X86_32 225 loadsegment(ss, __KERNEL_DS);
223 load_idt(&ctxt->idt); 226 loadsegment(ds, __USER_DS);
227 loadsegment(es, __USER_DS);
228
229 /*
230 * Restore percpu access. Percpu access can happen in exception
231 * handlers or in complicated helpers like load_gs_index().
232 */
233#ifdef CONFIG_X86_64
234 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
224#else 235#else
225/* CONFIG_X86_64 */ 236 loadsegment(fs, __KERNEL_PERCPU);
226 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 237 loadsegment(gs, __KERNEL_STACK_CANARY);
227#endif 238#endif
228 239
240 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
241 fix_processor_context();
242
229 /* 243 /*
230 * segment registers 244 * Now that we have descriptor tables fully restored and working
245 * exception handling, restore the usermode segments.
231 */ 246 */
232#ifdef CONFIG_X86_32 247#ifdef CONFIG_X86_64
248 loadsegment(ds, ctxt->es);
233 loadsegment(es, ctxt->es); 249 loadsegment(es, ctxt->es);
234 loadsegment(fs, ctxt->fs); 250 loadsegment(fs, ctxt->fs);
235 loadsegment(gs, ctxt->gs); 251 load_gs_index(ctxt->gs);
236 loadsegment(ss, ctxt->ss);
237 252
238 /* 253 /*
239 * sysenter MSRs 254 * Restore FSBASE and GSBASE after restoring the selectors, since
255 * restoring the selectors clobbers the bases. Keep in mind
256 * that MSR_KERNEL_GS_BASE is horribly misnamed.
240 */ 257 */
241 if (boot_cpu_has(X86_FEATURE_SEP))
242 enable_sep_cpu();
243#else
244/* CONFIG_X86_64 */
245 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
246 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
247 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
248 load_gs_index(ctxt->gs);
249 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
250
251 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 258 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
252 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 259 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
253 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 260#elif defined(CONFIG_X86_32_LAZY_GS)
261 loadsegment(gs, ctxt->gs);
254#endif 262#endif
255 263
256 fix_processor_context();
257
258 do_fpu_end(); 264 do_fpu_end();
259 tsc_verify_tsc_adjust(true); 265 tsc_verify_tsc_adjust(true);
260 x86_platform.restore_sched_clock_state(); 266 x86_platform.restore_sched_clock_state();
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 6b830d4cb4c8..de58533d3664 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -57,7 +57,7 @@ static u32 xen_apic_read(u32 reg)
57 return 0; 57 return 0;
58 58
59 if (reg == APIC_LVR) 59 if (reg == APIC_LVR)
60 return 0x10; 60 return 0x14;
61#ifdef CONFIG_X86_32 61#ifdef CONFIG_X86_32
62 if (reg == APIC_LDR) 62 if (reg == APIC_LDR)
63 return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 63 return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d669e9d89001..c9081c6671f0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,8 +1,12 @@
1#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
2#include <linux/bootmem.h>
3#endif
1#include <linux/cpu.h> 4#include <linux/cpu.h>
2#include <linux/kexec.h> 5#include <linux/kexec.h>
3 6
4#include <xen/features.h> 7#include <xen/features.h>
5#include <xen/page.h> 8#include <xen/page.h>
9#include <xen/interface/memory.h>
6 10
7#include <asm/xen/hypercall.h> 11#include <asm/xen/hypercall.h>
8#include <asm/xen/hypervisor.h> 12#include <asm/xen/hypervisor.h>
@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num)
331} 335}
332EXPORT_SYMBOL(xen_arch_unregister_cpu); 336EXPORT_SYMBOL(xen_arch_unregister_cpu);
333#endif 337#endif
338
339#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
340void __init arch_xen_balloon_init(struct resource *hostmem_resource)
341{
342 struct xen_memory_map memmap;
343 int rc;
344 unsigned int i, last_guest_ram;
345 phys_addr_t max_addr = PFN_PHYS(max_pfn);
346 struct e820_table *xen_e820_table;
347 const struct e820_entry *entry;
348 struct resource *res;
349
350 if (!xen_initial_domain())
351 return;
352
353 xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
354 if (!xen_e820_table)
355 return;
356
357 memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
358 set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
359 rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
360 if (rc) {
361 pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
362 goto out;
363 }
364
365 last_guest_ram = 0;
366 for (i = 0; i < memmap.nr_entries; i++) {
367 if (xen_e820_table->entries[i].addr >= max_addr)
368 break;
369 if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
370 last_guest_ram = i;
371 }
372
373 entry = &xen_e820_table->entries[last_guest_ram];
374 if (max_addr >= entry->addr + entry->size)
375 goto out; /* No unallocated host RAM. */
376
377 hostmem_resource->start = max_addr;
378 hostmem_resource->end = entry->addr + entry->size;
379
380 /*
381 * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
382 * as unavailable. The rest of that region can be used for hotplug-based
383 * ballooning.
384 */
385 for (; i < memmap.nr_entries; i++) {
386 entry = &xen_e820_table->entries[i];
387
388 if (entry->type == E820_TYPE_RAM)
389 continue;
390
391 if (entry->addr >= hostmem_resource->end)
392 break;
393
394 res = kzalloc(sizeof(*res), GFP_KERNEL);
395 if (!res)
396 goto out;
397
398 res->name = "Unavailable host RAM";
399 res->start = entry->addr;
400 res->end = (entry->addr + entry->size < hostmem_resource->end) ?
401 entry->addr + entry->size : hostmem_resource->end;
402 rc = insert_resource(hostmem_resource, res);
403 if (rc) {
404 pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
405 __func__, res->start, res->end, rc);
406 kfree(res);
407 goto out;
408 }
409 }
410
411 out:
412 kfree(xen_e820_table);
413}
414#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5b2b3f3f6531..c047f42552e1 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -88,6 +88,8 @@
88#include "multicalls.h" 88#include "multicalls.h"
89#include "pmu.h" 89#include "pmu.h"
90 90
91#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
92
91void *xen_initial_gdt; 93void *xen_initial_gdt;
92 94
93static int xen_cpu_up_prepare_pv(unsigned int cpu); 95static int xen_cpu_up_prepare_pv(unsigned int cpu);
@@ -622,7 +624,7 @@ static struct trap_array_entry trap_array[] = {
622 { simd_coprocessor_error, xen_simd_coprocessor_error, false }, 624 { simd_coprocessor_error, xen_simd_coprocessor_error, false },
623}; 625};
624 626
625static bool get_trap_addr(void **addr, unsigned int ist) 627static bool __ref get_trap_addr(void **addr, unsigned int ist)
626{ 628{
627 unsigned int nr; 629 unsigned int nr;
628 bool ist_okay = false; 630 bool ist_okay = false;
@@ -644,6 +646,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
644 } 646 }
645 } 647 }
646 648
649 if (nr == ARRAY_SIZE(trap_array) &&
650 *addr >= (void *)early_idt_handler_array[0] &&
651 *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
652 nr = (*addr - (void *)early_idt_handler_array[0]) /
653 EARLY_IDT_HANDLER_SIZE;
654 *addr = (void *)xen_early_idt_handler_array[nr];
655 }
656
647 if (WARN_ON(ist != 0 && !ist_okay)) 657 if (WARN_ON(ist != 0 && !ist_okay))
648 return false; 658 return false;
649 659
@@ -818,7 +828,7 @@ static void xen_load_sp0(unsigned long sp0)
818 mcs = xen_mc_entry(0); 828 mcs = xen_mc_entry(0);
819 MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); 829 MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
820 xen_mc_issue(PARAVIRT_LAZY_CPU); 830 xen_mc_issue(PARAVIRT_LAZY_CPU);
821 this_cpu_write(cpu_tss.x86_tss.sp0, sp0); 831 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
822} 832}
823 833
824void xen_set_iopl_mask(unsigned mask) 834void xen_set_iopl_mask(unsigned mask)
@@ -1250,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
1250 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 1260 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1251 1261
1252 /* Work out if we support NX */ 1262 /* Work out if we support NX */
1263 get_cpu_cap(&boot_cpu_data);
1253 x86_configure_nx(); 1264 x86_configure_nx();
1254 1265
1255 /* Get mfn list */ 1266 /* Get mfn list */
@@ -1262,6 +1273,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
1262 xen_setup_gdt(0); 1273 xen_setup_gdt(0);
1263 1274
1264 xen_init_irq_ops(); 1275 xen_init_irq_ops();
1276
1277 /* Let's presume PV guests always boot on vCPU with id 0. */
1278 per_cpu(xen_vcpu_id, 0) = 0;
1279
1280 /*
1281 * Setup xen_vcpu early because idt_setup_early_handler needs it for
1282 * local_irq_disable(), irqs_disabled().
1283 *
1284 * Don't do the full vcpu_info placement stuff until we have
1285 * the cpu_possible_mask and a non-dummy shared_info.
1286 */
1287 xen_vcpu_info_reset(0);
1288
1289 idt_setup_early_handler();
1290
1265 xen_init_capabilities(); 1291 xen_init_capabilities();
1266 1292
1267#ifdef CONFIG_X86_LOCAL_APIC 1293#ifdef CONFIG_X86_LOCAL_APIC
@@ -1295,18 +1321,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
1295 */ 1321 */
1296 acpi_numa = -1; 1322 acpi_numa = -1;
1297#endif 1323#endif
1298 /* Let's presume PV guests always boot on vCPU with id 0. */
1299 per_cpu(xen_vcpu_id, 0) = 0;
1300
1301 /*
1302 * Setup xen_vcpu early because start_kernel needs it for
1303 * local_irq_disable(), irqs_disabled().
1304 *
1305 * Don't do the full vcpu_info placement stuff until we have
1306 * the cpu_possible_mask and a non-dummy shared_info.
1307 */
1308 xen_vcpu_info_reset(0);
1309
1310 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); 1324 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
1311 1325
1312 local_irq_disable(); 1326 local_irq_disable();
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index fc048ec686e7..d85076223a69 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1325{ 1325{
1326 struct { 1326 struct {
1327 struct mmuext_op op; 1327 struct mmuext_op op;
1328#ifdef CONFIG_SMP
1329 DECLARE_BITMAP(mask, num_processors);
1330#else
1331 DECLARE_BITMAP(mask, NR_CPUS); 1328 DECLARE_BITMAP(mask, NR_CPUS);
1332#endif
1333 } *args; 1329 } *args;
1334 struct multicall_space mcs; 1330 struct multicall_space mcs;
1331 const size_t mc_entry_size = sizeof(args->op) +
1332 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1335 1333
1336 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); 1334 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1337 1335
1338 if (cpumask_empty(cpus)) 1336 if (cpumask_empty(cpus))
1339 return; /* nothing to do */ 1337 return; /* nothing to do */
1340 1338
1341 mcs = xen_mc_entry(sizeof(*args)); 1339 mcs = xen_mc_entry(mc_entry_size);
1342 args = mcs.args; 1340 args = mcs.args;
1343 args->op.arg2.vcpumask = to_cpumask(args->mask); 1341 args->op.arg2.vcpumask = to_cpumask(args->mask);
1344 1342
@@ -1902,6 +1900,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1902 /* Graft it onto L4[511][510] */ 1900 /* Graft it onto L4[511][510] */
1903 copy_page(level2_kernel_pgt, l2); 1901 copy_page(level2_kernel_pgt, l2);
1904 1902
1903 /*
1904 * Zap execute permission from the ident map. Due to the sharing of
1905 * L1 entries we need to do this in the L2.
1906 */
1907 if (__supported_pte_mask & _PAGE_NX) {
1908 for (i = 0; i < PTRS_PER_PMD; ++i) {
1909 if (pmd_none(level2_ident_pgt[i]))
1910 continue;
1911 level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1912 }
1913 }
1914
1905 /* Copy the initial P->M table mappings if necessary. */ 1915 /* Copy the initial P->M table mappings if necessary. */
1906 i = pgd_index(xen_start_info->mfn_list); 1916 i = pgd_index(xen_start_info->mfn_list);
1907 if (i && i < pgd_index(__START_KERNEL_map)) 1917 if (i && i < pgd_index(__START_KERNEL_map))
@@ -2261,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2261 2271
2262 switch (idx) { 2272 switch (idx) {
2263 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2273 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2264 case FIX_RO_IDT:
2265#ifdef CONFIG_X86_32 2274#ifdef CONFIG_X86_32
2266 case FIX_WP_TEST: 2275 case FIX_WP_TEST:
2267# ifdef CONFIG_HIGHMEM 2276# ifdef CONFIG_HIGHMEM
@@ -2272,7 +2281,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2272#endif 2281#endif
2273 case FIX_TEXT_POKE0: 2282 case FIX_TEXT_POKE0:
2274 case FIX_TEXT_POKE1: 2283 case FIX_TEXT_POKE1:
2275 case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
2276 /* All local page mappings */ 2284 /* All local page mappings */
2277 pte = pfn_pte(phys, prot); 2285 pte = pfn_pte(phys, prot);
2278 break; 2286 break;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c114ca767b3b..6e0d2086eacb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void)
808 addr = xen_e820_table.entries[0].addr; 808 addr = xen_e820_table.entries[0].addr;
809 size = xen_e820_table.entries[0].size; 809 size = xen_e820_table.entries[0].size;
810 while (i < xen_e820_table.nr_entries) { 810 while (i < xen_e820_table.nr_entries) {
811 bool discard = false;
812 811
813 chunk_size = size; 812 chunk_size = size;
814 type = xen_e820_table.entries[i].type; 813 type = xen_e820_table.entries[i].type;
@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void)
824 xen_add_extra_mem(pfn_s, n_pfns); 823 xen_add_extra_mem(pfn_s, n_pfns);
825 xen_max_p2m_pfn = pfn_s + n_pfns; 824 xen_max_p2m_pfn = pfn_s + n_pfns;
826 } else 825 } else
827 discard = true; 826 type = E820_TYPE_UNUSABLE;
828 } 827 }
829 828
830 if (!discard) 829 xen_align_and_add_e820_region(addr, chunk_size, type);
831 xen_align_and_add_e820_region(addr, chunk_size, type);
832 830
833 addr += chunk_size; 831 addr += chunk_size;
834 size -= chunk_size; 832 size -= chunk_size;
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 8a10c9a9e2b5..417b339e5c8e 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -15,6 +15,7 @@
15 15
16#include <xen/interface/xen.h> 16#include <xen/interface/xen.h>
17 17
18#include <linux/init.h>
18#include <linux/linkage.h> 19#include <linux/linkage.h>
19 20
20.macro xen_pv_trap name 21.macro xen_pv_trap name
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
54#endif 55#endif
55xen_pv_trap hypervisor_callback 56xen_pv_trap hypervisor_callback
56 57
58 __INIT
59ENTRY(xen_early_idt_handler_array)
60 i = 0
61 .rept NUM_EXCEPTION_VECTORS
62 pop %rcx
63 pop %r11
64 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
65 i = i + 1
66 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
67 .endr
68END(xen_early_idt_handler_array)
69 __FINIT
70
57hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 71hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
58/* 72/*
59 * Xen64 iret frame: 73 * Xen64 iret frame:
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 75011b80660f..3b34745d0a52 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
72void xen_setup_cpu_clockevents(void); 72void xen_setup_cpu_clockevents(void);
73void xen_save_time_memory_area(void); 73void xen_save_time_memory_area(void);
74void xen_restore_time_memory_area(void); 74void xen_restore_time_memory_area(void);
75void __init xen_init_time_ops(void); 75void __ref xen_init_time_ops(void);
76void __init xen_hvm_init_time_ops(void); 76void __init xen_hvm_init_time_ops(void);
77 77
78irqreturn_t xen_debug_interrupt(int irq, void *dev_id); 78irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index a5bcdfb890f1..837d4dd76785 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bitsperlong.h 4generic-y += bitsperlong.h
5generic-y += bpf_perf_event.h
5generic-y += errno.h 6generic-y += errno.h
6generic-y += fcntl.h 7generic-y += fcntl.h
7generic-y += ioctl.h 8generic-y += ioctl.h
diff --git a/block/bio.c b/block/bio.c
index 8bfdea58159b..9ef6cf3addb3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
599 bio->bi_disk = bio_src->bi_disk; 599 bio->bi_disk = bio_src->bi_disk;
600 bio->bi_partno = bio_src->bi_partno; 600 bio->bi_partno = bio_src->bi_partno;
601 bio_set_flag(bio, BIO_CLONED); 601 bio_set_flag(bio, BIO_CLONED);
602 if (bio_flagged(bio_src, BIO_THROTTLED))
603 bio_set_flag(bio, BIO_THROTTLED);
602 bio->bi_opf = bio_src->bi_opf; 604 bio->bi_opf = bio_src->bi_opf;
603 bio->bi_write_hint = bio_src->bi_write_hint; 605 bio->bi_write_hint = bio_src->bi_write_hint;
604 bio->bi_iter = bio_src->bi_iter; 606 bio->bi_iter = bio_src->bi_iter;
diff --git a/block/blk-core.c b/block/blk-core.c
index b8881750a3ac..3ba4326a63b5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
562 } 562 }
563} 563}
564 564
565void blk_drain_queue(struct request_queue *q)
566{
567 spin_lock_irq(q->queue_lock);
568 __blk_drain_queue(q, true);
569 spin_unlock_irq(q->queue_lock);
570}
571
565/** 572/**
566 * blk_queue_bypass_start - enter queue bypass mode 573 * blk_queue_bypass_start - enter queue bypass mode
567 * @q: queue of interest 574 * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
689 */ 696 */
690 blk_freeze_queue(q); 697 blk_freeze_queue(q);
691 spin_lock_irq(lock); 698 spin_lock_irq(lock);
692 if (!q->mq_ops)
693 __blk_drain_queue(q, true);
694 queue_flag_set(QUEUE_FLAG_DEAD, q); 699 queue_flag_set(QUEUE_FLAG_DEAD, q);
695 spin_unlock_irq(lock); 700 spin_unlock_irq(lock);
696 701
diff --git a/block/blk-map.c b/block/blk-map.c
index b21f8e86f120..d3a94719f03f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -12,22 +12,29 @@
12#include "blk.h" 12#include "blk.h"
13 13
14/* 14/*
15 * Append a bio to a passthrough request. Only works can be merged into 15 * Append a bio to a passthrough request. Only works if the bio can be merged
16 * the request based on the driver constraints. 16 * into the request based on the driver constraints.
17 */ 17 */
18int blk_rq_append_bio(struct request *rq, struct bio *bio) 18int blk_rq_append_bio(struct request *rq, struct bio **bio)
19{ 19{
20 blk_queue_bounce(rq->q, &bio); 20 struct bio *orig_bio = *bio;
21
22 blk_queue_bounce(rq->q, bio);
21 23
22 if (!rq->bio) { 24 if (!rq->bio) {
23 blk_rq_bio_prep(rq->q, rq, bio); 25 blk_rq_bio_prep(rq->q, rq, *bio);
24 } else { 26 } else {
25 if (!ll_back_merge_fn(rq->q, rq, bio)) 27 if (!ll_back_merge_fn(rq->q, rq, *bio)) {
28 if (orig_bio != *bio) {
29 bio_put(*bio);
30 *bio = orig_bio;
31 }
26 return -EINVAL; 32 return -EINVAL;
33 }
27 34
28 rq->biotail->bi_next = bio; 35 rq->biotail->bi_next = *bio;
29 rq->biotail = bio; 36 rq->biotail = *bio;
30 rq->__data_len += bio->bi_iter.bi_size; 37 rq->__data_len += (*bio)->bi_iter.bi_size;
31 } 38 }
32 39
33 return 0; 40 return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
73 * We link the bounce buffer in and could have to traverse it 80 * We link the bounce buffer in and could have to traverse it
74 * later so we have to get a ref to prevent it from being freed 81 * later so we have to get a ref to prevent it from being freed
75 */ 82 */
76 ret = blk_rq_append_bio(rq, bio); 83 ret = blk_rq_append_bio(rq, &bio);
77 bio_get(bio);
78 if (ret) { 84 if (ret) {
79 bio_endio(bio);
80 __blk_rq_unmap_user(orig_bio); 85 __blk_rq_unmap_user(orig_bio);
81 bio_put(bio);
82 return ret; 86 return ret;
83 } 87 }
88 bio_get(bio);
84 89
85 return 0; 90 return 0;
86} 91}
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
213 int reading = rq_data_dir(rq) == READ; 218 int reading = rq_data_dir(rq) == READ;
214 unsigned long addr = (unsigned long) kbuf; 219 unsigned long addr = (unsigned long) kbuf;
215 int do_copy = 0; 220 int do_copy = 0;
216 struct bio *bio; 221 struct bio *bio, *orig_bio;
217 int ret; 222 int ret;
218 223
219 if (len > (queue_max_hw_sectors(q) << 9)) 224 if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
236 if (do_copy) 241 if (do_copy)
237 rq->rq_flags |= RQF_COPY_USER; 242 rq->rq_flags |= RQF_COPY_USER;
238 243
239 ret = blk_rq_append_bio(rq, bio); 244 orig_bio = bio;
245 ret = blk_rq_append_bio(rq, &bio);
240 if (unlikely(ret)) { 246 if (unlikely(ret)) {
241 /* request is too big */ 247 /* request is too big */
242 bio_put(bio); 248 bio_put(orig_bio);
243 return ret; 249 return ret;
244 } 250 }
245 251
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11097477eeab..3d3797327491 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
161 * exported to drivers as the only user for unfreeze is blk_mq. 161 * exported to drivers as the only user for unfreeze is blk_mq.
162 */ 162 */
163 blk_freeze_queue_start(q); 163 blk_freeze_queue_start(q);
164 if (!q->mq_ops)
165 blk_drain_queue(q);
164 blk_mq_freeze_queue_wait(q); 166 blk_mq_freeze_queue_wait(q);
165} 167}
166 168
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 825bc29767e6..d19f416d6101 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2226,13 +2226,7 @@ again:
2226out_unlock: 2226out_unlock:
2227 spin_unlock_irq(q->queue_lock); 2227 spin_unlock_irq(q->queue_lock);
2228out: 2228out:
2229 /* 2229 bio_set_flag(bio, BIO_THROTTLED);
2230 * As multiple blk-throtls may stack in the same issue path, we
2231 * don't want bios to leave with the flag set. Clear the flag if
2232 * being issued.
2233 */
2234 if (!throttled)
2235 bio_clear_flag(bio, BIO_THROTTLED);
2236 2230
2237#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2231#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2238 if (throttled || !td->track_bio_latency) 2232 if (throttled || !td->track_bio_latency)
diff --git a/block/blk.h b/block/blk.h
index 3f1446937aec..442098aa9463 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
330} 330}
331#endif /* CONFIG_BOUNCE */ 331#endif /* CONFIG_BOUNCE */
332 332
333extern void blk_drain_queue(struct request_queue *q);
334
333#endif /* BLK_INTERNAL_H */ 335#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index fceb1a96480b..1d05c422c932 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
200 unsigned i = 0; 200 unsigned i = 0;
201 bool bounce = false; 201 bool bounce = false;
202 int sectors = 0; 202 int sectors = 0;
203 bool passthrough = bio_is_passthrough(*bio_orig);
203 204
204 bio_for_each_segment(from, *bio_orig, iter) { 205 bio_for_each_segment(from, *bio_orig, iter) {
205 if (i++ < BIO_MAX_PAGES) 206 if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
210 if (!bounce) 211 if (!bounce)
211 return; 212 return;
212 213
213 if (sectors < bio_sectors(*bio_orig)) { 214 if (!passthrough && sectors < bio_sectors(*bio_orig)) {
214 bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); 215 bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
215 bio_chain(bio, *bio_orig); 216 bio_chain(bio, *bio_orig);
216 generic_make_request(*bio_orig); 217 generic_make_request(*bio_orig);
217 *bio_orig = bio; 218 *bio_orig = bio;
218 } 219 }
219 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); 220 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
221 bounce_bio_set);
220 222
221 bio_for_each_segment_all(to, bio, i) { 223 bio_for_each_segment_all(to, bio, i) {
222 struct page *page = to->bv_page; 224 struct page *page = to->bv_page;
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index b4df317c2916..f95c60774ce8 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
100 unsigned int cur_domain; 100 unsigned int cur_domain;
101 unsigned int batching; 101 unsigned int batching;
102 wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; 102 wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
103 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
103 atomic_t wait_index[KYBER_NUM_DOMAINS]; 104 atomic_t wait_index[KYBER_NUM_DOMAINS];
104}; 105};
105 106
107static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
108 void *key);
109
106static int rq_sched_domain(const struct request *rq) 110static int rq_sched_domain(const struct request *rq)
107{ 111{
108 unsigned int op = rq->cmd_flags; 112 unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
385 389
386 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 390 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
387 INIT_LIST_HEAD(&khd->rqs[i]); 391 INIT_LIST_HEAD(&khd->rqs[i]);
392 init_waitqueue_func_entry(&khd->domain_wait[i],
393 kyber_domain_wake);
394 khd->domain_wait[i].private = hctx;
388 INIT_LIST_HEAD(&khd->domain_wait[i].entry); 395 INIT_LIST_HEAD(&khd->domain_wait[i].entry);
389 atomic_set(&khd->wait_index[i], 0); 396 atomic_set(&khd->wait_index[i], 0);
390 } 397 }
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
524 int nr; 531 int nr;
525 532
526 nr = __sbitmap_queue_get(domain_tokens); 533 nr = __sbitmap_queue_get(domain_tokens);
527 if (nr >= 0)
528 return nr;
529 534
530 /* 535 /*
531 * If we failed to get a domain token, make sure the hardware queue is 536 * If we failed to get a domain token, make sure the hardware queue is
532 * run when one becomes available. Note that this is serialized on 537 * run when one becomes available. Note that this is serialized on
533 * khd->lock, but we still need to be careful about the waker. 538 * khd->lock, but we still need to be careful about the waker.
534 */ 539 */
535 if (list_empty_careful(&wait->entry)) { 540 if (nr < 0 && list_empty_careful(&wait->entry)) {
536 init_waitqueue_func_entry(wait, kyber_domain_wake);
537 wait->private = hctx;
538 ws = sbq_wait_ptr(domain_tokens, 541 ws = sbq_wait_ptr(domain_tokens,
539 &khd->wait_index[sched_domain]); 542 &khd->wait_index[sched_domain]);
543 khd->domain_ws[sched_domain] = ws;
540 add_wait_queue(&ws->wait, wait); 544 add_wait_queue(&ws->wait, wait);
541 545
542 /* 546 /*
543 * Try again in case a token was freed before we got on the wait 547 * Try again in case a token was freed before we got on the wait
544 * queue. The waker may have already removed the entry from the 548 * queue.
545 * wait queue, but list_del_init() is okay with that.
546 */ 549 */
547 nr = __sbitmap_queue_get(domain_tokens); 550 nr = __sbitmap_queue_get(domain_tokens);
548 if (nr >= 0) { 551 }
549 unsigned long flags;
550 552
551 spin_lock_irqsave(&ws->wait.lock, flags); 553 /*
552 list_del_init(&wait->entry); 554 * If we got a token while we were on the wait queue, remove ourselves
553 spin_unlock_irqrestore(&ws->wait.lock, flags); 555 * from the wait queue to ensure that all wake ups make forward
554 } 556 * progress. It's possible that the waker already deleted the entry
557 * between the !list_empty_careful() check and us grabbing the lock, but
558 * list_del_init() is okay with that.
559 */
560 if (nr >= 0 && !list_empty_careful(&wait->entry)) {
561 ws = khd->domain_ws[sched_domain];
562 spin_lock_irq(&ws->wait.lock);
563 list_del_init(&wait->entry);
564 spin_unlock_irq(&ws->wait.lock);
555 } 565 }
566
556 return nr; 567 return nr;
557} 568}
558 569
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 358749c38894..35d4dcea381f 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
664 unsigned int i; 664 unsigned int i;
665 665
666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
667 ctx->rcvused -= rsgl->sg_num_bytes; 667 atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
668 af_alg_free_sg(&rsgl->sgl); 668 af_alg_free_sg(&rsgl->sgl);
669 list_del(&rsgl->list); 669 list_del(&rsgl->list);
670 if (rsgl != &areq->first_rsgl) 670 if (rsgl != &areq->first_rsgl)
@@ -672,14 +672,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
672 } 672 }
673 673
674 tsgl = areq->tsgl; 674 tsgl = areq->tsgl;
675 for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 675 if (tsgl) {
676 if (!sg_page(sg)) 676 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
677 continue; 677 if (!sg_page(sg))
678 put_page(sg_page(sg)); 678 continue;
679 } 679 put_page(sg_page(sg));
680 }
680 681
681 if (areq->tsgl && areq->tsgl_entries)
682 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 682 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
683 }
683} 684}
684EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); 685EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
685 686
@@ -1137,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1137 if (!af_alg_readable(sk)) 1138 if (!af_alg_readable(sk))
1138 break; 1139 break;
1139 1140
1140 if (!ctx->used) {
1141 err = af_alg_wait_for_data(sk, flags);
1142 if (err)
1143 return err;
1144 }
1145
1146 seglen = min_t(size_t, (maxsize - len), 1141 seglen = min_t(size_t, (maxsize - len),
1147 msg_data_left(msg)); 1142 msg_data_left(msg));
1148 1143
@@ -1168,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1168 1163
1169 areq->last_rsgl = rsgl; 1164 areq->last_rsgl = rsgl;
1170 len += err; 1165 len += err;
1171 ctx->rcvused += err; 1166 atomic_add(err, &ctx->rcvused);
1172 rsgl->sg_num_bytes = err; 1167 rsgl->sg_num_bytes = err;
1173 iov_iter_advance(&msg->msg_iter, err); 1168 iov_iter_advance(&msg->msg_iter, err);
1174 } 1169 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 60d7366ed343..9a636f961572 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
167 167
168 spawn->alg = NULL; 168 spawn->alg = NULL;
169 spawns = &inst->alg.cra_users; 169 spawns = &inst->alg.cra_users;
170
171 /*
172 * We may encounter an unregistered instance here, since
173 * an instance's spawns are set up prior to the instance
174 * being registered. An unregistered instance will have
175 * NULL ->cra_users.next, since ->cra_users isn't
176 * properly initialized until registration. But an
177 * unregistered instance cannot have any users, so treat
178 * it the same as ->cra_users being empty.
179 */
180 if (spawns->next == NULL)
181 break;
170 } 182 }
171 } while ((spawns = crypto_more_spawns(alg, &stack, &top, 183 } while ((spawns = crypto_more_spawns(alg, &stack, &top,
172 &secondary_spawns))); 184 &secondary_spawns)));
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 805f485ddf1b..e9885a35ef6e 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
111 size_t usedpages = 0; /* [in] RX bufs to be used from user */ 111 size_t usedpages = 0; /* [in] RX bufs to be used from user */
112 size_t processed = 0; /* [in] TX bufs to be consumed */ 112 size_t processed = 0; /* [in] TX bufs to be consumed */
113 113
114 if (!ctx->used) {
115 err = af_alg_wait_for_data(sk, flags);
116 if (err)
117 return err;
118 }
119
114 /* 120 /*
115 * Data length provided by caller via sendmsg/sendpage that has not 121 * Data length provided by caller via sendmsg/sendpage that has not
116 * yet been processed. 122 * yet been processed.
@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
285 /* AIO operation */ 291 /* AIO operation */
286 sock_hold(sk); 292 sock_hold(sk);
287 areq->iocb = msg->msg_iocb; 293 areq->iocb = msg->msg_iocb;
294
295 /* Remember output size that will be generated. */
296 areq->outlen = outlen;
297
288 aead_request_set_callback(&areq->cra_u.aead_req, 298 aead_request_set_callback(&areq->cra_u.aead_req,
289 CRYPTO_TFM_REQ_MAY_BACKLOG, 299 CRYPTO_TFM_REQ_MAY_BACKLOG,
290 af_alg_async_cb, areq); 300 af_alg_async_cb, areq);
@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
292 crypto_aead_decrypt(&areq->cra_u.aead_req); 302 crypto_aead_decrypt(&areq->cra_u.aead_req);
293 303
294 /* AIO operation in progress */ 304 /* AIO operation in progress */
295 if (err == -EINPROGRESS || err == -EBUSY) { 305 if (err == -EINPROGRESS || err == -EBUSY)
296 /* Remember output size that will be generated. */
297 areq->outlen = outlen;
298
299 return -EIOCBQUEUED; 306 return -EIOCBQUEUED;
300 }
301 307
302 sock_put(sk); 308 sock_put(sk);
303 } else { 309 } else {
@@ -503,6 +509,7 @@ static void aead_release(void *private)
503 struct aead_tfm *tfm = private; 509 struct aead_tfm *tfm = private;
504 510
505 crypto_free_aead(tfm->aead); 511 crypto_free_aead(tfm->aead);
512 crypto_put_default_null_skcipher2();
506 kfree(tfm); 513 kfree(tfm);
507} 514}
508 515
@@ -535,7 +542,6 @@ static void aead_sock_destruct(struct sock *sk)
535 unsigned int ivlen = crypto_aead_ivsize(tfm); 542 unsigned int ivlen = crypto_aead_ivsize(tfm);
536 543
537 af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 544 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
538 crypto_put_default_null_skcipher2();
539 sock_kzfree_s(sk, ctx->iv, ivlen); 545 sock_kzfree_s(sk, ctx->iv, ivlen);
540 sock_kfree_s(sk, ctx, ctx->len); 546 sock_kfree_s(sk, ctx, ctx->len);
541 af_alg_release_parent(sk); 547 af_alg_release_parent(sk);
@@ -565,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
565 INIT_LIST_HEAD(&ctx->tsgl_list); 571 INIT_LIST_HEAD(&ctx->tsgl_list);
566 ctx->len = len; 572 ctx->len = len;
567 ctx->used = 0; 573 ctx->used = 0;
568 ctx->rcvused = 0; 574 atomic_set(&ctx->rcvused, 0);
569 ctx->more = 0; 575 ctx->more = 0;
570 ctx->merge = 0; 576 ctx->merge = 0;
571 ctx->enc = 0; 577 ctx->enc = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 30cff827dd8f..c5c47b680152 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
72 int err = 0; 72 int err = 0;
73 size_t len = 0; 73 size_t len = 0;
74 74
75 if (!ctx->used) {
76 err = af_alg_wait_for_data(sk, flags);
77 if (err)
78 return err;
79 }
80
75 /* Allocate cipher request for current operation. */ 81 /* Allocate cipher request for current operation. */
76 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 82 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
77 crypto_skcipher_reqsize(tfm)); 83 crypto_skcipher_reqsize(tfm));
@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
119 /* AIO operation */ 125 /* AIO operation */
120 sock_hold(sk); 126 sock_hold(sk);
121 areq->iocb = msg->msg_iocb; 127 areq->iocb = msg->msg_iocb;
128
129 /* Remember output size that will be generated. */
130 areq->outlen = len;
131
122 skcipher_request_set_callback(&areq->cra_u.skcipher_req, 132 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
123 CRYPTO_TFM_REQ_MAY_SLEEP, 133 CRYPTO_TFM_REQ_MAY_SLEEP,
124 af_alg_async_cb, areq); 134 af_alg_async_cb, areq);
@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
127 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); 137 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
128 138
129 /* AIO operation in progress */ 139 /* AIO operation in progress */
130 if (err == -EINPROGRESS || err == -EBUSY) { 140 if (err == -EINPROGRESS || err == -EBUSY)
131 /* Remember output size that will be generated. */
132 areq->outlen = len;
133
134 return -EIOCBQUEUED; 141 return -EIOCBQUEUED;
135 }
136 142
137 sock_put(sk); 143 sock_put(sk);
138 } else { 144 } else {
@@ -384,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
384 INIT_LIST_HEAD(&ctx->tsgl_list); 390 INIT_LIST_HEAD(&ctx->tsgl_list);
385 ctx->len = len; 391 ctx->len = len;
386 ctx->used = 0; 392 ctx->used = 0;
387 ctx->rcvused = 0; 393 atomic_set(&ctx->rcvused, 0);
388 ctx->more = 0; 394 ctx->more = 0;
389 ctx->merge = 0; 395 ctx->merge = 0;
390 ctx->enc = 0; 396 ctx->enc = 0;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index c1ca1e86f5c4..a6dcaa659aa8 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -148,8 +148,10 @@ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
148 } 148 }
149 149
150 ret = pkcs7_check_authattrs(ctx->msg); 150 ret = pkcs7_check_authattrs(ctx->msg);
151 if (ret < 0) 151 if (ret < 0) {
152 msg = ERR_PTR(ret);
152 goto out; 153 goto out;
154 }
153 155
154 msg = ctx->msg; 156 msg = ctx->msg;
155 ctx->msg = NULL; 157 ctx->msg = NULL;
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index f6a009d88a33..1f4e25f10049 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -69,7 +69,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
69 /* Self-signed certificates form roots of their own, and if we 69 /* Self-signed certificates form roots of their own, and if we
70 * don't know them, then we can't accept them. 70 * don't know them, then we can't accept them.
71 */ 71 */
72 if (x509->next == x509) { 72 if (x509->signer == x509) {
73 kleave(" = -ENOKEY [unknown self-signed]"); 73 kleave(" = -ENOKEY [unknown self-signed]");
74 return -ENOKEY; 74 return -ENOKEY;
75 } 75 }
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 2d93d9eccb4d..39e6de0c2761 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -59,11 +59,8 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
59 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 59 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
60 60
61 /* Digest the message [RFC2315 9.3] */ 61 /* Digest the message [RFC2315 9.3] */
62 ret = crypto_shash_init(desc); 62 ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
63 if (ret < 0) 63 sig->digest);
64 goto error;
65 ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len,
66 sig->digest);
67 if (ret < 0) 64 if (ret < 0)
68 goto error; 65 goto error;
69 pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest); 66 pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
@@ -150,7 +147,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
150 pr_devel("Sig %u: Found cert serial match X.509[%u]\n", 147 pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
151 sinfo->index, certix); 148 sinfo->index, certix);
152 149
153 if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) { 150 if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
154 pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", 151 pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
155 sinfo->index); 152 sinfo->index);
156 continue; 153 continue;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bc3035ef27a2..de996586762a 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -73,7 +73,7 @@ int public_key_verify_signature(const struct public_key *pkey,
73 char alg_name_buf[CRYPTO_MAX_ALG_NAME]; 73 char alg_name_buf[CRYPTO_MAX_ALG_NAME];
74 void *output; 74 void *output;
75 unsigned int outlen; 75 unsigned int outlen;
76 int ret = -ENOMEM; 76 int ret;
77 77
78 pr_devel("==>%s()\n", __func__); 78 pr_devel("==>%s()\n", __func__);
79 79
@@ -99,6 +99,7 @@ int public_key_verify_signature(const struct public_key *pkey,
99 if (IS_ERR(tfm)) 99 if (IS_ERR(tfm))
100 return PTR_ERR(tfm); 100 return PTR_ERR(tfm);
101 101
102 ret = -ENOMEM;
102 req = akcipher_request_alloc(tfm, GFP_KERNEL); 103 req = akcipher_request_alloc(tfm, GFP_KERNEL);
103 if (!req) 104 if (!req)
104 goto error_free_tfm; 105 goto error_free_tfm;
@@ -127,7 +128,7 @@ int public_key_verify_signature(const struct public_key *pkey,
127 * signature and returns that to us. 128 * signature and returns that to us.
128 */ 129 */
129 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 130 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
130 if (ret < 0) 131 if (ret)
131 goto out_free_output; 132 goto out_free_output;
132 133
133 /* Do the actual verification step. */ 134 /* Do the actual verification step. */
@@ -142,6 +143,8 @@ error_free_req:
142error_free_tfm: 143error_free_tfm:
143 crypto_free_akcipher(tfm); 144 crypto_free_akcipher(tfm);
144 pr_devel("<==%s() = %d\n", __func__, ret); 145 pr_devel("<==%s() = %d\n", __func__, ret);
146 if (WARN_ON_ONCE(ret > 0))
147 ret = -EINVAL;
145 return ret; 148 return ret;
146} 149}
147EXPORT_SYMBOL_GPL(public_key_verify_signature); 150EXPORT_SYMBOL_GPL(public_key_verify_signature);
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index dd03fead1ca3..ce2df8c9c583 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
409 ctx->cert->pub->pkey_algo = "rsa"; 409 ctx->cert->pub->pkey_algo = "rsa";
410 410
411 /* Discard the BIT STRING metadata */ 411 /* Discard the BIT STRING metadata */
412 if (vlen < 1 || *(const u8 *)value != 0)
413 return -EBADMSG;
412 ctx->key = value + 1; 414 ctx->key = value + 1;
413 ctx->key_size = vlen - 1; 415 ctx->key_size = vlen - 1;
414 return 0; 416 return 0;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index c9013582c026..9338b4558cdc 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -79,11 +79,7 @@ int x509_get_sig_params(struct x509_certificate *cert)
79 desc->tfm = tfm; 79 desc->tfm = tfm;
80 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 80 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
81 81
82 ret = crypto_shash_init(desc); 82 ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
83 if (ret < 0)
84 goto error_2;
85 might_sleep();
86 ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
87 if (ret < 0) 83 if (ret < 0)
88 goto error_2; 84 goto error_2;
89 85
@@ -135,7 +131,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
135 } 131 }
136 132
137 ret = -EKEYREJECTED; 133 ret = -EKEYREJECTED;
138 if (cert->pub->pkey_algo != cert->sig->pkey_algo) 134 if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
139 goto out; 135 goto out;
140 136
141 ret = public_key_verify_signature(cert->pub, cert->sig); 137 ret = public_key_verify_signature(cert->pub, cert->sig);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index db1bc3147bc4..600afa99941f 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
610 algt->mask)); 610 algt->mask));
611 if (IS_ERR(poly)) 611 if (IS_ERR(poly))
612 return PTR_ERR(poly); 612 return PTR_ERR(poly);
613 poly_hash = __crypto_hash_alg_common(poly);
614
615 err = -EINVAL;
616 if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
617 goto out_put_poly;
613 618
614 err = -ENOMEM; 619 err = -ENOMEM;
615 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 620 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
618 623
619 ctx = aead_instance_ctx(inst); 624 ctx = aead_instance_ctx(inst);
620 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; 625 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
621 poly_hash = __crypto_hash_alg_common(poly);
622 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, 626 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
623 aead_crypto_instance(inst)); 627 aead_crypto_instance(inst));
624 if (err) 628 if (err)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 92871dc2a63e..e74730224f0a 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
195 salg = shash_attr_alg(tb[1], 0, 0); 195 salg = shash_attr_alg(tb[1], 0, 0);
196 if (IS_ERR(salg)) 196 if (IS_ERR(salg))
197 return PTR_ERR(salg); 197 return PTR_ERR(salg);
198 alg = &salg->base;
198 199
200 /* The underlying hash algorithm must be unkeyed */
199 err = -EINVAL; 201 err = -EINVAL;
202 if (crypto_shash_alg_has_setkey(salg))
203 goto out_put_alg;
204
200 ds = salg->digestsize; 205 ds = salg->digestsize;
201 ss = salg->statesize; 206 ss = salg->statesize;
202 alg = &salg->base;
203 if (ds > alg->cra_blocksize || 207 if (ds > alg->cra_blocksize ||
204 ss < alg->cra_blocksize) 208 ss < alg->cra_blocksize)
205 goto out_put_alg; 209 goto out_put_alg;
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 4e6472658852..eca04d3729b3 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); 81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); 83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
84 spin_lock_init(&cpu_queue->q_lock);
84 } 85 }
85 return 0; 86 return 0;
86} 87}
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
104 int cpu, err; 105 int cpu, err;
105 struct mcryptd_cpu_queue *cpu_queue; 106 struct mcryptd_cpu_queue *cpu_queue;
106 107
107 cpu = get_cpu(); 108 cpu_queue = raw_cpu_ptr(queue->cpu_queue);
108 cpu_queue = this_cpu_ptr(queue->cpu_queue); 109 spin_lock(&cpu_queue->q_lock);
109 rctx->tag.cpu = cpu; 110 cpu = smp_processor_id();
111 rctx->tag.cpu = smp_processor_id();
110 112
111 err = crypto_enqueue_request(&cpu_queue->queue, request); 113 err = crypto_enqueue_request(&cpu_queue->queue, request);
112 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", 114 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
113 cpu, cpu_queue, request); 115 cpu, cpu_queue, request);
116 spin_unlock(&cpu_queue->q_lock);
114 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 117 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
115 put_cpu();
116 118
117 return err; 119 return err;
118} 120}
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
161 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); 163 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
162 i = 0; 164 i = 0;
163 while (i < MCRYPTD_BATCH || single_task_running()) { 165 while (i < MCRYPTD_BATCH || single_task_running()) {
164 /* 166
165 * preempt_disable/enable is used to prevent 167 spin_lock_bh(&cpu_queue->q_lock);
166 * being preempted by mcryptd_enqueue_request()
167 */
168 local_bh_disable();
169 preempt_disable();
170 backlog = crypto_get_backlog(&cpu_queue->queue); 168 backlog = crypto_get_backlog(&cpu_queue->queue);
171 req = crypto_dequeue_request(&cpu_queue->queue); 169 req = crypto_dequeue_request(&cpu_queue->queue);
172 preempt_enable(); 170 spin_unlock_bh(&cpu_queue->q_lock);
173 local_bh_enable();
174 171
175 if (!req) { 172 if (!req) {
176 mcryptd_opportunistic_flush(); 173 mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
185 ++i; 182 ++i;
186 } 183 }
187 if (cpu_queue->queue.qlen) 184 if (cpu_queue->queue.qlen)
188 queue_work(kcrypto_wq, &cpu_queue->work); 185 queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
189} 186}
190 187
191void mcryptd_flusher(struct work_struct *__work) 188void mcryptd_flusher(struct work_struct *__work)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb99fe25..f8ec3d4ba4a8 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
254 crypto_free_aead(ctx->child); 254 crypto_free_aead(ctx->child);
255} 255}
256 256
257static void pcrypt_free(struct aead_instance *inst)
258{
259 struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
260
261 crypto_drop_aead(&ctx->spawn);
262 kfree(inst);
263}
264
257static int pcrypt_init_instance(struct crypto_instance *inst, 265static int pcrypt_init_instance(struct crypto_instance *inst,
258 struct crypto_alg *alg) 266 struct crypto_alg *alg)
259{ 267{
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
319 inst->alg.encrypt = pcrypt_aead_encrypt; 327 inst->alg.encrypt = pcrypt_aead_encrypt;
320 inst->alg.decrypt = pcrypt_aead_decrypt; 328 inst->alg.decrypt = pcrypt_aead_decrypt;
321 329
330 inst->free = pcrypt_free;
331
322 err = aead_register_instance(tmpl, inst); 332 err = aead_register_instance(tmpl, inst);
323 if (err) 333 if (err)
324 goto out_drop_aead; 334 goto out_drop_aead;
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
349 return -EINVAL; 359 return -EINVAL;
350} 360}
351 361
352static void pcrypt_free(struct crypto_instance *inst)
353{
354 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
355
356 crypto_drop_aead(&ctx->spawn);
357 kfree(inst);
358}
359
360static int pcrypt_cpumask_change_notify(struct notifier_block *self, 362static int pcrypt_cpumask_change_notify(struct notifier_block *self,
361 unsigned long val, void *data) 363 unsigned long val, void *data)
362{ 364{
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
469static struct crypto_template pcrypt_tmpl = { 471static struct crypto_template pcrypt_tmpl = {
470 .name = "pcrypt", 472 .name = "pcrypt",
471 .create = pcrypt_create, 473 .create = pcrypt_create,
472 .free = pcrypt_free,
473 .module = THIS_MODULE, 474 .module = THIS_MODULE,
474}; 475};
475 476
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 0b66dc824606..cad395d70d78 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
30 return -EINVAL; 30 return -EINVAL;
31 31
32 if (fips_enabled) { 32 if (fips_enabled) {
33 while (!*ptr && n_sz) { 33 while (n_sz && !*ptr) {
34 ptr++; 34 ptr++;
35 n_sz--; 35 n_sz--;
36 } 36 }
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index f550b5d94630..d7da0eea5622 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
188 188
189 salsa20_ivsetup(ctx, walk.iv); 189 salsa20_ivsetup(ctx, walk.iv);
190 190
191 if (likely(walk.nbytes == nbytes))
192 {
193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
194 walk.src.virt.addr, nbytes);
195 return blkcipher_walk_done(desc, &walk, 0);
196 }
197
198 while (walk.nbytes >= 64) { 191 while (walk.nbytes >= 64) {
199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, 192 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
200 walk.src.virt.addr, 193 walk.src.virt.addr,
diff --git a/crypto/shash.c b/crypto/shash.c
index 325a14da5827..e849d3ee2e27 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -25,11 +25,12 @@
25 25
26static const struct crypto_type crypto_shash_type; 26static const struct crypto_type crypto_shash_type;
27 27
28static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, 28int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
29 unsigned int keylen) 29 unsigned int keylen)
30{ 30{
31 return -ENOSYS; 31 return -ENOSYS;
32} 32}
33EXPORT_SYMBOL_GPL(shash_no_setkey);
33 34
34static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, 35static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
35 unsigned int keylen) 36 unsigned int keylen)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 778e0ff42bfa..11af5fd6a443 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 449
450 walk->total = req->cryptlen; 450 walk->total = req->cryptlen;
451 walk->nbytes = 0; 451 walk->nbytes = 0;
452 walk->iv = req->iv;
453 walk->oiv = req->iv;
452 454
453 if (unlikely(!walk->total)) 455 if (unlikely(!walk->total))
454 return 0; 456 return 0;
@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
456 scatterwalk_start(&walk->in, req->src); 458 scatterwalk_start(&walk->in, req->src);
457 scatterwalk_start(&walk->out, req->dst); 459 scatterwalk_start(&walk->out, req->dst);
458 460
459 walk->iv = req->iv;
460 walk->oiv = req->iv;
461
462 walk->flags &= ~SKCIPHER_WALK_SLEEP; 461 walk->flags &= ~SKCIPHER_WALK_SLEEP;
463 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 462 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
464 SKCIPHER_WALK_SLEEP : 0; 463 SKCIPHER_WALK_SLEEP : 0;
@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
510 int err; 509 int err;
511 510
512 walk->nbytes = 0; 511 walk->nbytes = 0;
512 walk->iv = req->iv;
513 walk->oiv = req->iv;
513 514
514 if (unlikely(!walk->total)) 515 if (unlikely(!walk->total))
515 return 0; 516 return 0;
@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
525 scatterwalk_done(&walk->in, 0, walk->total); 526 scatterwalk_done(&walk->in, 0, walk->total);
526 scatterwalk_done(&walk->out, 0, walk->total); 527 scatterwalk_done(&walk->out, 0, walk->total);
527 528
528 walk->iv = req->iv;
529 walk->oiv = req->iv;
530
531 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) 529 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
532 walk->flags |= SKCIPHER_WALK_SLEEP; 530 walk->flags |= SKCIPHER_WALK_SLEEP;
533 else 531 else
diff --git a/drivers/Makefile b/drivers/Makefile
index 1d034b680431..e06f7f633f73 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_TC) += tc/
105obj-$(CONFIG_UWB) += uwb/ 105obj-$(CONFIG_UWB) += uwb/
106obj-$(CONFIG_USB_PHY) += usb/ 106obj-$(CONFIG_USB_PHY) += usb/
107obj-$(CONFIG_USB) += usb/ 107obj-$(CONFIG_USB) += usb/
108obj-$(CONFIG_USB_SUPPORT) += usb/
108obj-$(CONFIG_PCI) += usb/ 109obj-$(CONFIG_PCI) += usb/
109obj-$(CONFIG_USB_GADGET) += usb/ 110obj-$(CONFIG_USB_GADGET) += usb/
110obj-$(CONFIG_OF) += usb/ 111obj-$(CONFIG_OF) += usb/
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6742f6c68034..9bff853e85f3 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1007,7 +1007,7 @@ skip:
1007 /* The record may be cleared by others, try read next record */ 1007 /* The record may be cleared by others, try read next record */
1008 if (len == -ENOENT) 1008 if (len == -ENOENT)
1009 goto skip; 1009 goto skip;
1010 else if (len < sizeof(*rcd)) { 1010 else if (len < 0 || len < sizeof(*rcd)) {
1011 rc = -EIO; 1011 rc = -EIO;
1012 goto out; 1012 goto out;
1013 } 1013 }
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 21c28433c590..06ea4749ebd9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -949,7 +949,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
949 } 949 }
950 950
951 *val = 0; 951 *val = 0;
952 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) 952 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
953 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 953 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
954 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 954 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
955 vaddr = reg_res->sys_mem_vaddr; 955 vaddr = reg_res->sys_mem_vaddr;
@@ -988,7 +988,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
988 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 988 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
989 struct cpc_reg *reg = &reg_res->cpc_entry.reg; 989 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
990 990
991 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) 991 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
992 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 992 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
993 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 993 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
994 vaddr = reg_res->sys_mem_vaddr; 994 vaddr = reg_res->sys_mem_vaddr;
@@ -1035,14 +1035,15 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1035 *lowest_non_linear_reg, *nominal_reg; 1035 *lowest_non_linear_reg, *nominal_reg;
1036 u64 high, low, nom, min_nonlinear; 1036 u64 high, low, nom, min_nonlinear;
1037 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1037 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1038 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 1038 struct cppc_pcc_data *pcc_ss_data;
1039 int ret = 0, regs_in_pcc = 0; 1039 int ret = 0, regs_in_pcc = 0;
1040 1040
1041 if (!cpc_desc) { 1041 if (!cpc_desc || pcc_ss_id < 0) {
1042 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1042 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1043 return -ENODEV; 1043 return -ENODEV;
1044 } 1044 }
1045 1045
1046 pcc_ss_data = pcc_data[pcc_ss_id];
1046 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; 1047 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1047 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; 1048 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1048 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; 1049 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
@@ -1095,15 +1096,16 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1095 struct cpc_register_resource *delivered_reg, *reference_reg, 1096 struct cpc_register_resource *delivered_reg, *reference_reg,
1096 *ref_perf_reg, *ctr_wrap_reg; 1097 *ref_perf_reg, *ctr_wrap_reg;
1097 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1098 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1098 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 1099 struct cppc_pcc_data *pcc_ss_data;
1099 u64 delivered, reference, ref_perf, ctr_wrap_time; 1100 u64 delivered, reference, ref_perf, ctr_wrap_time;
1100 int ret = 0, regs_in_pcc = 0; 1101 int ret = 0, regs_in_pcc = 0;
1101 1102
1102 if (!cpc_desc) { 1103 if (!cpc_desc || pcc_ss_id < 0) {
1103 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1104 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1104 return -ENODEV; 1105 return -ENODEV;
1105 } 1106 }
1106 1107
1108 pcc_ss_data = pcc_data[pcc_ss_id];
1107 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 1109 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1108 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 1110 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1109 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 1111 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
@@ -1169,14 +1171,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1169 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1171 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1170 struct cpc_register_resource *desired_reg; 1172 struct cpc_register_resource *desired_reg;
1171 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1173 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1172 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 1174 struct cppc_pcc_data *pcc_ss_data;
1173 int ret = 0; 1175 int ret = 0;
1174 1176
1175 if (!cpc_desc) { 1177 if (!cpc_desc || pcc_ss_id < 0) {
1176 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1178 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1177 return -ENODEV; 1179 return -ENODEV;
1178 } 1180 }
1179 1181
1182 pcc_ss_data = pcc_data[pcc_ss_id];
1180 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1183 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1181 1184
1182 /* 1185 /*
@@ -1301,7 +1304,7 @@ unsigned int cppc_get_transition_latency(int cpu_num)
1301 struct cpc_desc *cpc_desc; 1304 struct cpc_desc *cpc_desc;
1302 struct cpc_register_resource *desired_reg; 1305 struct cpc_register_resource *desired_reg;
1303 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); 1306 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1304 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 1307 struct cppc_pcc_data *pcc_ss_data;
1305 1308
1306 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); 1309 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1307 if (!cpc_desc) 1310 if (!cpc_desc)
@@ -1311,6 +1314,10 @@ unsigned int cppc_get_transition_latency(int cpu_num)
1311 if (!CPC_IN_PCC(desired_reg)) 1314 if (!CPC_IN_PCC(desired_reg))
1312 return CPUFREQ_ETERNAL; 1315 return CPUFREQ_ETERNAL;
1313 1316
1317 if (pcc_ss_id < 0)
1318 return CPUFREQ_ETERNAL;
1319
1320 pcc_ss_data = pcc_data[pcc_ss_id];
1314 if (pcc_ss_data->pcc_mpar) 1321 if (pcc_ss_data->pcc_mpar)
1315 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); 1322 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1316 1323
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..a4c8ad98560d 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
1138 * skip all of the subsequent "thaw" callbacks for the device. 1138 * skip all of the subsequent "thaw" callbacks for the device.
1139 */ 1139 */
1140 if (dev_pm_smart_suspend_and_suspended(dev)) { 1140 if (dev_pm_smart_suspend_and_suspended(dev)) {
1141 dev->power.direct_complete = true; 1141 dev_pm_skip_next_resume_phases(dev);
1142 return 0; 1142 return 0;
1143 } 1143 }
1144 1144
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ff2580e7611d..abeb4df4f22e 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1670 dev_name(&adev_dimm->dev)); 1670 dev_name(&adev_dimm->dev));
1671 return -ENXIO; 1671 return -ENXIO;
1672 } 1672 }
1673 /*
1674 * Record nfit_mem for the notification path to track back to
1675 * the nfit sysfs attributes for this dimm device object.
1676 */
1677 dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1673 1678
1674 /* 1679 /*
1675 * Until standardization materializes we need to consider 4 1680 * Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
1752 sysfs_put(nfit_mem->flags_attr); 1757 sysfs_put(nfit_mem->flags_attr);
1753 nfit_mem->flags_attr = NULL; 1758 nfit_mem->flags_attr = NULL;
1754 } 1759 }
1755 if (adev_dimm) 1760 if (adev_dimm) {
1756 acpi_remove_notify_handler(adev_dimm->handle, 1761 acpi_remove_notify_handler(adev_dimm->handle,
1757 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); 1762 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1763 dev_set_drvdata(&adev_dimm->dev, NULL);
1764 }
1758 } 1765 }
1759 mutex_unlock(&acpi_desc->init_mutex); 1766 mutex_unlock(&acpi_desc->init_mutex);
1760} 1767}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a73596a4f804..a7ecfde66b7b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -482,7 +482,8 @@ enum binder_deferred_state {
482 * @tsk task_struct for group_leader of process 482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized) 483 * (invariant after initialized)
484 * @files files_struct for process 484 * @files files_struct for process
485 * (invariant after initialized) 485 * (protected by @files_lock)
486 * @files_lock mutex to protect @files
486 * @deferred_work_node: element for binder_deferred_list 487 * @deferred_work_node: element for binder_deferred_list
487 * (protected by binder_deferred_lock) 488 * (protected by binder_deferred_lock)
488 * @deferred_work: bitmap of deferred work to perform 489 * @deferred_work: bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
530 int pid; 531 int pid;
531 struct task_struct *tsk; 532 struct task_struct *tsk;
532 struct files_struct *files; 533 struct files_struct *files;
534 struct mutex files_lock;
533 struct hlist_node deferred_work_node; 535 struct hlist_node deferred_work_node;
534 int deferred_work; 536 int deferred_work;
535 bool is_dead; 537 bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
877 879
878static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 880static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
879{ 881{
880 struct files_struct *files = proc->files;
881 unsigned long rlim_cur; 882 unsigned long rlim_cur;
882 unsigned long irqs; 883 unsigned long irqs;
884 int ret;
883 885
884 if (files == NULL) 886 mutex_lock(&proc->files_lock);
885 return -ESRCH; 887 if (proc->files == NULL) {
886 888 ret = -ESRCH;
887 if (!lock_task_sighand(proc->tsk, &irqs)) 889 goto err;
888 return -EMFILE; 890 }
889 891 if (!lock_task_sighand(proc->tsk, &irqs)) {
892 ret = -EMFILE;
893 goto err;
894 }
890 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 895 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
891 unlock_task_sighand(proc->tsk, &irqs); 896 unlock_task_sighand(proc->tsk, &irqs);
892 897
893 return __alloc_fd(files, 0, rlim_cur, flags); 898 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
899err:
900 mutex_unlock(&proc->files_lock);
901 return ret;
894} 902}
895 903
896/* 904/*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
899static void task_fd_install( 907static void task_fd_install(
900 struct binder_proc *proc, unsigned int fd, struct file *file) 908 struct binder_proc *proc, unsigned int fd, struct file *file)
901{ 909{
910 mutex_lock(&proc->files_lock);
902 if (proc->files) 911 if (proc->files)
903 __fd_install(proc->files, fd, file); 912 __fd_install(proc->files, fd, file);
913 mutex_unlock(&proc->files_lock);
904} 914}
905 915
906/* 916/*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
910{ 920{
911 int retval; 921 int retval;
912 922
913 if (proc->files == NULL) 923 mutex_lock(&proc->files_lock);
914 return -ESRCH; 924 if (proc->files == NULL) {
915 925 retval = -ESRCH;
926 goto err;
927 }
916 retval = __close_fd(proc->files, fd); 928 retval = __close_fd(proc->files, fd);
917 /* can't restart close syscall because file table entry was cleared */ 929 /* can't restart close syscall because file table entry was cleared */
918 if (unlikely(retval == -ERESTARTSYS || 930 if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
920 retval == -ERESTARTNOHAND || 932 retval == -ERESTARTNOHAND ||
921 retval == -ERESTART_RESTARTBLOCK)) 933 retval == -ERESTART_RESTARTBLOCK))
922 retval = -EINTR; 934 retval = -EINTR;
923 935err:
936 mutex_unlock(&proc->files_lock);
924 return retval; 937 return retval;
925} 938}
926 939
@@ -1948,6 +1961,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
1948} 1961}
1949 1962
1950/** 1963/**
1964 * binder_cleanup_transaction() - cleans up undelivered transaction
1965 * @t: transaction that needs to be cleaned up
1966 * @reason: reason the transaction wasn't delivered
1967 * @error_code: error to return to caller (if synchronous call)
1968 */
1969static void binder_cleanup_transaction(struct binder_transaction *t,
1970 const char *reason,
1971 uint32_t error_code)
1972{
1973 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1974 binder_send_failed_reply(t, error_code);
1975 } else {
1976 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1977 "undelivered transaction %d, %s\n",
1978 t->debug_id, reason);
1979 binder_free_transaction(t);
1980 }
1981}
1982
1983/**
1951 * binder_validate_object() - checks for a valid metadata object in a buffer. 1984 * binder_validate_object() - checks for a valid metadata object in a buffer.
1952 * @buffer: binder_buffer that we're parsing. 1985 * @buffer: binder_buffer that we're parsing.
1953 * @offset: offset in the buffer at which to validate an object. 1986 * @offset: offset in the buffer at which to validate an object.
@@ -4015,12 +4048,20 @@ retry:
4015 if (put_user(cmd, (uint32_t __user *)ptr)) { 4048 if (put_user(cmd, (uint32_t __user *)ptr)) {
4016 if (t_from) 4049 if (t_from)
4017 binder_thread_dec_tmpref(t_from); 4050 binder_thread_dec_tmpref(t_from);
4051
4052 binder_cleanup_transaction(t, "put_user failed",
4053 BR_FAILED_REPLY);
4054
4018 return -EFAULT; 4055 return -EFAULT;
4019 } 4056 }
4020 ptr += sizeof(uint32_t); 4057 ptr += sizeof(uint32_t);
4021 if (copy_to_user(ptr, &tr, sizeof(tr))) { 4058 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4022 if (t_from) 4059 if (t_from)
4023 binder_thread_dec_tmpref(t_from); 4060 binder_thread_dec_tmpref(t_from);
4061
4062 binder_cleanup_transaction(t, "copy_to_user failed",
4063 BR_FAILED_REPLY);
4064
4024 return -EFAULT; 4065 return -EFAULT;
4025 } 4066 }
4026 ptr += sizeof(tr); 4067 ptr += sizeof(tr);
@@ -4090,15 +4131,9 @@ static void binder_release_work(struct binder_proc *proc,
4090 struct binder_transaction *t; 4131 struct binder_transaction *t;
4091 4132
4092 t = container_of(w, struct binder_transaction, work); 4133 t = container_of(w, struct binder_transaction, work);
4093 if (t->buffer->target_node && 4134
4094 !(t->flags & TF_ONE_WAY)) { 4135 binder_cleanup_transaction(t, "process died.",
4095 binder_send_failed_reply(t, BR_DEAD_REPLY); 4136 BR_DEAD_REPLY);
4096 } else {
4097 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4098 "undelivered transaction %d\n",
4099 t->debug_id);
4100 binder_free_transaction(t);
4101 }
4102 } break; 4137 } break;
4103 case BINDER_WORK_RETURN_ERROR: { 4138 case BINDER_WORK_RETURN_ERROR: {
4104 struct binder_error *e = container_of( 4139 struct binder_error *e = container_of(
@@ -4605,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4605 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4640 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4606 if (ret) 4641 if (ret)
4607 return ret; 4642 return ret;
4643 mutex_lock(&proc->files_lock);
4608 proc->files = get_files_struct(current); 4644 proc->files = get_files_struct(current);
4645 mutex_unlock(&proc->files_lock);
4609 return 0; 4646 return 0;
4610 4647
4611err_bad_arg: 4648err_bad_arg:
@@ -4629,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
4629 spin_lock_init(&proc->outer_lock); 4666 spin_lock_init(&proc->outer_lock);
4630 get_task_struct(current->group_leader); 4667 get_task_struct(current->group_leader);
4631 proc->tsk = current->group_leader; 4668 proc->tsk = current->group_leader;
4669 mutex_init(&proc->files_lock);
4632 INIT_LIST_HEAD(&proc->todo); 4670 INIT_LIST_HEAD(&proc->todo);
4633 proc->default_priority = task_nice(current); 4671 proc->default_priority = task_nice(current);
4634 binder_dev = container_of(filp->private_data, struct binder_device, 4672 binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4881,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
4881 4919
4882 files = NULL; 4920 files = NULL;
4883 if (defer & BINDER_DEFERRED_PUT_FILES) { 4921 if (defer & BINDER_DEFERRED_PUT_FILES) {
4922 mutex_lock(&proc->files_lock);
4884 files = proc->files; 4923 files = proc->files;
4885 if (files) 4924 if (files)
4886 proc->files = NULL; 4925 proc->files = NULL;
4926 mutex_unlock(&proc->files_lock);
4887 } 4927 }
4888 4928
4889 if (defer & BINDER_DEFERRED_FLUSH) 4929 if (defer & BINDER_DEFERRED_FLUSH)
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 80854f71559a..0ae6971c2a4c 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MeidaTek AHCI SATA driver 2 * MediaTek AHCI SATA driver
3 * 3 *
4 * Copyright (c) 2017 MediaTek Inc. 4 * Copyright (c) 2017 MediaTek Inc.
5 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
25#include <linux/reset.h> 25#include <linux/reset.h>
26#include "ahci.h" 26#include "ahci.h"
27 27
28#define DRV_NAME "ahci" 28#define DRV_NAME "ahci-mtk"
29 29
30#define SYS_CFG 0x14 30#define SYS_CFG 0x14
31#define SYS_CFG_SATA_MSK GENMASK(31, 30) 31#define SYS_CFG_SATA_MSK GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
192}; 192};
193module_platform_driver(mtk_ahci_driver); 193module_platform_driver(mtk_ahci_driver);
194 194
195MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver"); 195MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
196MODULE_LICENSE("GPL v2"); 196MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b6b0bf76dfc7..2685f28160f7 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
35 35
36/* port register default value */ 36/* port register default value */
37#define AHCI_PORT_PHY_1_CFG 0xa003fffe 37#define AHCI_PORT_PHY_1_CFG 0xa003fffe
38#define AHCI_PORT_PHY2_CFG 0x28184d1f
39#define AHCI_PORT_PHY3_CFG 0x0e081509
38#define AHCI_PORT_TRANS_CFG 0x08000029 40#define AHCI_PORT_TRANS_CFG 0x08000029
39#define AHCI_PORT_AXICC_CFG 0x3fffffff 41#define AHCI_PORT_AXICC_CFG 0x3fffffff
40 42
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
183 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 185 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
184 qpriv->ecc_addr); 186 qpriv->ecc_addr);
185 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 187 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
188 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
189 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
186 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 190 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
187 if (qpriv->is_dmacoherent) 191 if (qpriv->is_dmacoherent)
188 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 192 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
190 194
191 case AHCI_LS2080A: 195 case AHCI_LS2080A:
192 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 196 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
197 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
198 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
193 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 199 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
194 if (qpriv->is_dmacoherent) 200 if (qpriv->is_dmacoherent)
195 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 201 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
201 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 207 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
202 qpriv->ecc_addr); 208 qpriv->ecc_addr);
203 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 209 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
210 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
211 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
204 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 212 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
205 if (qpriv->is_dmacoherent) 213 if (qpriv->is_dmacoherent)
206 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 214 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
212 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, 220 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
213 qpriv->ecc_addr); 221 qpriv->ecc_addr);
214 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 222 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
223 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
224 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
215 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 225 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
216 if (qpriv->is_dmacoherent) 226 if (qpriv->is_dmacoherent)
217 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 227 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
219 229
220 case AHCI_LS2088A: 230 case AHCI_LS2088A:
221 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 231 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
232 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
233 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
222 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 234 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
223 if (qpriv->is_dmacoherent) 235 if (qpriv->is_dmacoherent)
224 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 236 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a882929de4a..3c09122bf038 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3082 bit = fls(mask) - 1; 3082 bit = fls(mask) - 1;
3083 mask &= ~(1 << bit); 3083 mask &= ~(1 << bit);
3084 3084
3085 /* Mask off all speeds higher than or equal to the current 3085 /*
3086 * one. Force 1.5Gbps if current SPD is not available. 3086 * Mask off all speeds higher than or equal to the current one. At
3087 * this point, if current SPD is not available and we previously
3088 * recorded the link speed from SStatus, the driver has already
3089 * masked off the highest bit so mask should already be 1 or 0.
3090 * Otherwise, we should not force 1.5Gbps on a link where we have
3091 * not previously recorded speed from SStatus. Just return in this
3092 * case.
3087 */ 3093 */
3088 if (spd > 1) 3094 if (spd > 1)
3089 mask &= (1 << (spd - 1)) - 1; 3095 mask &= (1 << (spd - 1)) - 1;
3090 else 3096 else
3091 mask &= 1; 3097 return -EINVAL;
3092 3098
3093 /* were we already at the bottom? */ 3099 /* were we already at the bottom? */
3094 if (!mask) 3100 if (!mask)
@@ -4443,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4443 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4449 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4444 */ 4450 */
4445 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4451 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4452 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4446 4453
4447 /* Devices we expect to fail diagnostics */ 4454 /* Devices we expect to fail diagnostics */
4448 4455
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ffd8d33c6e0f..6db2e34bd52f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
82 * is issued to the device. However, if the controller clock is 133MHz, 82 * is issued to the device. However, if the controller clock is 133MHz,
83 * the following tables must be used. 83 * the following tables must be used.
84 */ 84 */
85static struct pdc2027x_pio_timing { 85static const struct pdc2027x_pio_timing {
86 u8 value0, value1, value2; 86 u8 value0, value1, value2;
87} pdc2027x_pio_timing_tbl[] = { 87} pdc2027x_pio_timing_tbl[] = {
88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */ 88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */ 92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
93}; 93};
94 94
95static struct pdc2027x_mdma_timing { 95static const struct pdc2027x_mdma_timing {
96 u8 value0, value1; 96 u8 value0, value1;
97} pdc2027x_mdma_timing_tbl[] = { 97} pdc2027x_mdma_timing_tbl[] = {
98 { 0xdf, 0x5f }, /* MDMA mode 0 */ 98 { 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
100 { 0x69, 0x25 }, /* MDMA mode 2 */ 100 { 0x69, 0x25 }, /* MDMA mode 2 */
101}; 101};
102 102
103static struct pdc2027x_udma_timing { 103static const struct pdc2027x_udma_timing {
104 u8 value0, value1, value2; 104 u8 value0, value1, value2;
105} pdc2027x_udma_timing_tbl[] = { 105} pdc2027x_udma_timing_tbl[] = {
106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */ 106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
649 * @host: target ATA host 649 * @host: target ATA host
650 * @board_idx: board identifier 650 * @board_idx: board identifier
651 */ 651 */
652static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) 652static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
653{ 653{
654 long pll_clock; 654 long pll_clock;
655 655
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
665 665
666 /* Adjust PLL control register */ 666 /* Adjust PLL control register */
667 pdc_adjust_pll(host, pll_clock, board_idx); 667 pdc_adjust_pll(host, pll_clock, board_idx);
668
669 return 0;
670} 668}
671 669
672/** 670/**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
753 //pci_enable_intx(pdev); 751 //pci_enable_intx(pdev);
754 752
755 /* initialize adapter */ 753 /* initialize adapter */
756 if (pdc_hardware_init(host, board_idx) != 0) 754 pdc_hardware_init(host, board_idx);
757 return -EIO;
758 755
759 pci_set_master(pdev); 756 pci_set_master(pdev);
760 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 757 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
778 else 775 else
779 board_idx = PDC_UDMA_133; 776 board_idx = PDC_UDMA_133;
780 777
781 if (pdc_hardware_init(host, board_idx)) 778 pdc_hardware_init(host, board_idx);
782 return -EIO;
783 779
784 ata_host_resume(host); 780 ata_host_resume(host);
785 return 0; 781 return 0;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2f6614c9a229..2415ad9f6dd4 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
91 depends on FW_LOADER 91 depends on FW_LOADER
92 default y 92 default y
93 help 93 help
94 The kernel source tree includes a number of firmware 'blobs' 94 Various drivers in the kernel source tree may require firmware,
95 that are used by various drivers. The recommended way to 95 which is generally available in your distribution's linux-firmware
96 use these is to run "make firmware_install", which, after 96 package.
97 converting ihex files to binary, copies all of the needed 97
98 binary files in firmware/ to /lib/firmware/ on your system so 98 The linux-firmware package should install firmware into
99 that they can be loaded by userspace helpers on request. 99 /lib/firmware/ on your system, so they can be loaded by userspace
100 helpers on request.
100 101
101 Enabling this option will build each required firmware blob 102 Enabling this option will build each required firmware blob
102 into the kernel directly, where request_firmware() will find 103 specified by EXTRA_FIRMWARE into the kernel directly, where
103 them without having to call out to userspace. This may be 104 request_firmware() will find them without having to call out to
104 useful if your root file system requires a device that uses 105 userspace. This may be useful if your root file system requires a
105 such firmware and do not wish to use an initrd. 106 device that uses such firmware and you do not wish to use an
107 initrd.
106 108
107 This single option controls the inclusion of firmware for 109 This single option controls the inclusion of firmware for
108 every driver that uses request_firmware() and ships its 110 every driver that uses request_firmware(), which avoids a
109 firmware in the kernel source tree, which avoids a
110 proliferation of 'Include firmware for xxx device' options. 111 proliferation of 'Include firmware for xxx device' options.
111 112
112 Say 'N' and let firmware be loaded from userspace. 113 Say 'N' and let firmware be loaded from userspace.
@@ -235,6 +236,9 @@ config GENERIC_CPU_DEVICES
235config GENERIC_CPU_AUTOPROBE 236config GENERIC_CPU_AUTOPROBE
236 bool 237 bool
237 238
239config GENERIC_CPU_VULNERABILITIES
240 bool
241
238config SOC_BUS 242config SOC_BUS
239 bool 243 bool
240 select GLOB 244 select GLOB
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index eb3af2739537..07532d83be0b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
186 this_leaf->ways_of_associativity = (size / nr_sets) / line_size; 186 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
187} 187}
188 188
189static bool cache_node_is_unified(struct cacheinfo *this_leaf)
190{
191 return of_property_read_bool(this_leaf->of_node, "cache-unified");
192}
193
189static void cache_of_override_properties(unsigned int cpu) 194static void cache_of_override_properties(unsigned int cpu)
190{ 195{
191 int index; 196 int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
194 199
195 for (index = 0; index < cache_leaves(cpu); index++) { 200 for (index = 0; index < cache_leaves(cpu); index++) {
196 this_leaf = this_cpu_ci->info_list + index; 201 this_leaf = this_cpu_ci->info_list + index;
202 /*
203 * init_cache_level must setup the cache level correctly
204 * overriding the architecturally specified levels, so
205 * if type is NONE at this stage, it should be unified
206 */
207 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
208 cache_node_is_unified(this_leaf))
209 this_leaf->type = CACHE_TYPE_UNIFIED;
197 cache_size(this_leaf); 210 cache_size(this_leaf);
198 cache_get_line_size(this_leaf); 211 cache_get_line_size(this_leaf);
199 cache_nr_sets(this_leaf); 212 cache_nr_sets(this_leaf);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b608d821..d99038487a0d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
511#endif 511#endif
512} 512}
513 513
514#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
515
516ssize_t __weak cpu_show_meltdown(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "Not affected\n");
520}
521
522ssize_t __weak cpu_show_spectre_v1(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 return sprintf(buf, "Not affected\n");
526}
527
528ssize_t __weak cpu_show_spectre_v2(struct device *dev,
529 struct device_attribute *attr, char *buf)
530{
531 return sprintf(buf, "Not affected\n");
532}
533
534static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
535static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
536static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
537
538static struct attribute *cpu_root_vulnerabilities_attrs[] = {
539 &dev_attr_meltdown.attr,
540 &dev_attr_spectre_v1.attr,
541 &dev_attr_spectre_v2.attr,
542 NULL
543};
544
545static const struct attribute_group cpu_root_vulnerabilities_group = {
546 .name = "vulnerabilities",
547 .attrs = cpu_root_vulnerabilities_attrs,
548};
549
550static void __init cpu_register_vulnerabilities(void)
551{
552 if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
553 &cpu_root_vulnerabilities_group))
554 pr_err("Unable to register CPU vulnerabilities\n");
555}
556
557#else
558static inline void cpu_register_vulnerabilities(void) { }
559#endif
560
514void __init cpu_dev_init(void) 561void __init cpu_dev_init(void)
515{ 562{
516 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) 563 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
517 panic("Failed to register CPU subsystem"); 564 panic("Failed to register CPU subsystem");
518 565
519 cpu_dev_register_generic(); 566 cpu_dev_register_generic();
567 cpu_register_vulnerabilities();
520} 568}
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index cd6ccdcf9df0..372d10af2600 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
39{ 39{
40 struct isa_driver *isa_driver = dev->platform_data; 40 struct isa_driver *isa_driver = dev->platform_data;
41 41
42 if (isa_driver->probe) 42 if (isa_driver && isa_driver->probe)
43 return isa_driver->probe(dev, to_isa_dev(dev)->id); 43 return isa_driver->probe(dev, to_isa_dev(dev)->id);
44 44
45 return 0; 45 return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
49{ 49{
50 struct isa_driver *isa_driver = dev->platform_data; 50 struct isa_driver *isa_driver = dev->platform_data;
51 51
52 if (isa_driver->remove) 52 if (isa_driver && isa_driver->remove)
53 return isa_driver->remove(dev, to_isa_dev(dev)->id); 53 return isa_driver->remove(dev, to_isa_dev(dev)->id);
54 54
55 return 0; 55 return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
59{ 59{
60 struct isa_driver *isa_driver = dev->platform_data; 60 struct isa_driver *isa_driver = dev->platform_data;
61 61
62 if (isa_driver->shutdown) 62 if (isa_driver && isa_driver->shutdown)
63 isa_driver->shutdown(dev, to_isa_dev(dev)->id); 63 isa_driver->shutdown(dev, to_isa_dev(dev)->id);
64} 64}
65 65
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
67{ 67{
68 struct isa_driver *isa_driver = dev->platform_data; 68 struct isa_driver *isa_driver = dev->platform_data;
69 69
70 if (isa_driver->suspend) 70 if (isa_driver && isa_driver->suspend)
71 return isa_driver->suspend(dev, to_isa_dev(dev)->id, state); 71 return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
72 72
73 return 0; 73 return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
77{ 77{
78 struct isa_driver *isa_driver = dev->platform_data; 78 struct isa_driver *isa_driver = dev->platform_data;
79 79
80 if (isa_driver->resume) 80 if (isa_driver && isa_driver->resume)
81 return isa_driver->resume(dev, to_isa_dev(dev)->id); 81 return isa_driver->resume(dev, to_isa_dev(dev)->id);
82 82
83 return 0; 83 return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..08744b572af6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,6 +526,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
526/*------------------------- Resume routines -------------------------*/ 526/*------------------------- Resume routines -------------------------*/
527 527
528/** 528/**
529 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
530 * @dev: Target device.
531 *
532 * Make the core skip the "early resume" and "resume" phases for @dev.
533 *
534 * This function can be called by middle-layer code during the "noirq" phase of
535 * system resume if necessary, but not by device drivers.
536 */
537void dev_pm_skip_next_resume_phases(struct device *dev)
538{
539 dev->power.is_late_suspended = false;
540 dev->power.is_suspended = false;
541}
542
543/**
529 * device_resume_noirq - Execute a "noirq resume" callback for given device. 544 * device_resume_noirq - Execute a "noirq resume" callback for given device.
530 * @dev: Device to handle. 545 * @dev: Device to handle.
531 * @state: PM transition of the system being carried out. 546 * @state: PM transition of the system being carried out.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 027d159ac381..6e89b51ea3d9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev)
276 continue; 276 continue;
277 277
278 retval = pm_runtime_get_sync(link->supplier); 278 retval = pm_runtime_get_sync(link->supplier);
279 if (retval < 0) { 279 /* Ignore suppliers with disabled runtime PM. */
280 if (retval < 0 && retval != -EACCES) {
280 pm_runtime_put_noidle(link->supplier); 281 pm_runtime_put_noidle(link->supplier);
281 return retval; 282 return retval;
282 } 283 }
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 02d78f6cecbb..ba8acca036df 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
55 55
56config BCMA_DRIVER_PCI_HOSTMODE 56config BCMA_DRIVER_PCI_HOSTMODE
57 bool "Driver for PCI core working in hostmode" 57 bool "Driver for PCI core working in hostmode"
58 depends on MIPS && BCMA_DRIVER_PCI 58 depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
59 help 59 help
60 PCI core hostmode operation (external PCI bus). 60 PCI core hostmode operation (external PCI bus).
61 61
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e61506968..d5fe720cf149 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
1581 return err; 1581 return err;
1582} 1582}
1583 1583
1584static void lo_release(struct gendisk *disk, fmode_t mode) 1584static void __lo_release(struct loop_device *lo)
1585{ 1585{
1586 struct loop_device *lo = disk->private_data;
1587 int err; 1586 int err;
1588 1587
1589 if (atomic_dec_return(&lo->lo_refcnt)) 1588 if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1610 mutex_unlock(&lo->lo_ctl_mutex); 1609 mutex_unlock(&lo->lo_ctl_mutex);
1611} 1610}
1612 1611
1612static void lo_release(struct gendisk *disk, fmode_t mode)
1613{
1614 mutex_lock(&loop_index_mutex);
1615 __lo_release(disk->private_data);
1616 mutex_unlock(&loop_index_mutex);
1617}
1618
1613static const struct block_device_operations lo_fops = { 1619static const struct block_device_operations lo_fops = {
1614 .owner = THIS_MODULE, 1620 .owner = THIS_MODULE,
1615 .open = lo_open, 1621 .open = lo_open,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ccb9975a97fa..ad0477ae820f 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
35struct nullb_cmd { 35struct nullb_cmd {
36 struct list_head list; 36 struct list_head list;
37 struct llist_node ll_list; 37 struct llist_node ll_list;
38 call_single_data_t csd; 38 struct __call_single_data csd;
39 struct request *rq; 39 struct request *rq;
40 struct bio *bio; 40 struct bio *bio;
41 unsigned int tag; 41 unsigned int tag;
42 blk_status_t error;
42 struct nullb_queue *nq; 43 struct nullb_queue *nq;
43 struct hrtimer timer; 44 struct hrtimer timer;
44 blk_status_t error;
45}; 45};
46 46
47struct nullb_queue { 47struct nullb_queue {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3047 mutex_unlock(&rbd_dev->watch_mutex); 3047 mutex_unlock(&rbd_dev->watch_mutex);
3048} 3048}
3049 3049
3050static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3051{
3052 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3053
3054 strcpy(rbd_dev->lock_cookie, cookie);
3055 rbd_set_owner_cid(rbd_dev, &cid);
3056 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3057}
3058
3050/* 3059/*
3051 * lock_rwsem must be held for write 3060 * lock_rwsem must be held for write
3052 */ 3061 */
3053static int rbd_lock(struct rbd_device *rbd_dev) 3062static int rbd_lock(struct rbd_device *rbd_dev)
3054{ 3063{
3055 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3064 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3056 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3057 char cookie[32]; 3065 char cookie[32];
3058 int ret; 3066 int ret;
3059 3067
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
3068 return ret; 3076 return ret;
3069 3077
3070 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3078 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3071 strcpy(rbd_dev->lock_cookie, cookie); 3079 __rbd_lock(rbd_dev, cookie);
3072 rbd_set_owner_cid(rbd_dev, &cid);
3073 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3074 return 0; 3080 return 0;
3075} 3081}
3076 3082
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3856 queue_delayed_work(rbd_dev->task_wq, 3862 queue_delayed_work(rbd_dev->task_wq,
3857 &rbd_dev->lock_dwork, 0); 3863 &rbd_dev->lock_dwork, 0);
3858 } else { 3864 } else {
3859 strcpy(rbd_dev->lock_cookie, cookie); 3865 __rbd_lock(rbd_dev, cookie);
3860 } 3866 }
3861} 3867}
3862 3868
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4381 segment_size = rbd_obj_bytes(&rbd_dev->header); 4387 segment_size = rbd_obj_bytes(&rbd_dev->header);
4382 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4383 q->limits.max_sectors = queue_max_hw_sectors(q); 4389 q->limits.max_sectors = queue_max_hw_sectors(q);
4384 blk_queue_max_segments(q, segment_size / SECTOR_SIZE); 4390 blk_queue_max_segments(q, USHRT_MAX);
4385 blk_queue_max_segment_size(q, segment_size); 4391 blk_queue_max_segment_size(q, segment_size);
4386 blk_queue_io_min(q, segment_size); 4392 blk_queue_io_min(q, segment_size);
4387 blk_queue_io_opt(q, segment_size); 4393 blk_queue_io_opt(q, segment_size);
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 3c29d36702a8..5426c04fe24b 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
1755 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); 1755 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1756 mutex_init(&cci_pmu->reserve_mutex); 1756 mutex_init(&cci_pmu->reserve_mutex);
1757 atomic_set(&cci_pmu->active_events, 0); 1757 atomic_set(&cci_pmu->active_events, 0);
1758 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); 1758 cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
1759 1759
1760 ret = cci_pmu_init(cci_pmu, pdev); 1760 ret = cci_pmu_init(cci_pmu, pdev);
1761 if (ret) 1761 if (ret) {
1762 put_cpu();
1762 return ret; 1763 return ret;
1764 }
1763 1765
1764 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, 1766 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1765 &cci_pmu->node); 1767 &cci_pmu->node);
1768 put_cpu();
1766 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); 1769 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1767 return 0; 1770 return 0;
1768} 1771}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 3063f5312397..b52332e52ca5 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -262,7 +262,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
262 NULL 262 NULL
263}; 263};
264 264
265static struct attribute_group arm_ccn_pmu_format_attr_group = { 265static const struct attribute_group arm_ccn_pmu_format_attr_group = {
266 .name = "format", 266 .name = "format",
267 .attrs = arm_ccn_pmu_format_attrs, 267 .attrs = arm_ccn_pmu_format_attrs,
268}; 268};
@@ -451,7 +451,7 @@ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
451static struct attribute 451static struct attribute
452 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; 452 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
453 453
454static struct attribute_group arm_ccn_pmu_events_attr_group = { 454static const struct attribute_group arm_ccn_pmu_events_attr_group = {
455 .name = "events", 455 .name = "events",
456 .is_visible = arm_ccn_pmu_events_is_visible, 456 .is_visible = arm_ccn_pmu_events_is_visible,
457 .attrs = arm_ccn_pmu_events_attrs, 457 .attrs = arm_ccn_pmu_events_attrs,
@@ -548,7 +548,7 @@ static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
548 NULL 548 NULL
549}; 549};
550 550
551static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { 551static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
552 .name = "cmp_mask", 552 .name = "cmp_mask",
553 .attrs = arm_ccn_pmu_cmp_mask_attrs, 553 .attrs = arm_ccn_pmu_cmp_mask_attrs,
554}; 554};
@@ -569,7 +569,7 @@ static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
569 NULL, 569 NULL,
570}; 570};
571 571
572static struct attribute_group arm_ccn_pmu_cpumask_attr_group = { 572static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
573 .attrs = arm_ccn_pmu_cpumask_attrs, 573 .attrs = arm_ccn_pmu_cpumask_attrs,
574}; 574};
575 575
@@ -1268,10 +1268,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1268 if (ccn->dt.id == 0) { 1268 if (ccn->dt.id == 0) {
1269 name = "ccn"; 1269 name = "ccn";
1270 } else { 1270 } else {
1271 int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); 1271 name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
1272 1272 ccn->dt.id);
1273 name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); 1273 if (!name) {
1274 snprintf(name, len + 1, "ccn_%d", ccn->dt.id); 1274 err = -ENOMEM;
1275 goto error_choose_name;
1276 }
1275 } 1277 }
1276 1278
1277 /* Perf driver registration */ 1279 /* Perf driver registration */
@@ -1298,7 +1300,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1298 } 1300 }
1299 1301
1300 /* Pick one CPU which we will use to collect data from CCN... */ 1302 /* Pick one CPU which we will use to collect data from CCN... */
1301 cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); 1303 cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
1302 1304
1303 /* Also make sure that the overflow interrupt is handled by this CPU */ 1305 /* Also make sure that the overflow interrupt is handled by this CPU */
1304 if (ccn->irq) { 1306 if (ccn->irq) {
@@ -1315,10 +1317,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1315 1317
1316 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, 1318 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1317 &ccn->dt.node); 1319 &ccn->dt.node);
1320 put_cpu();
1318 return 0; 1321 return 0;
1319 1322
1320error_pmu_register: 1323error_pmu_register:
1321error_set_affinity: 1324error_set_affinity:
1325 put_cpu();
1326error_choose_name:
1322 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); 1327 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1323 for (i = 0; i < ccn->num_xps; i++) 1328 for (i = 0; i < ccn->num_xps; i++)
1324 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); 1329 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1581,8 +1586,8 @@ static int __init arm_ccn_init(void)
1581 1586
1582static void __exit arm_ccn_exit(void) 1587static void __exit arm_ccn_exit(void)
1583{ 1588{
1584 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1585 platform_driver_unregister(&arm_ccn_driver); 1589 platform_driver_unregister(&arm_ccn_driver);
1590 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1586} 1591}
1587 1592
1588module_init(arm_ccn_init); 1593module_init(arm_ccn_init);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93781cf..1b76d9585902 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
178 .match = sunxi_rsb_device_match, 178 .match = sunxi_rsb_device_match,
179 .probe = sunxi_rsb_device_probe, 179 .probe = sunxi_rsb_device_probe,
180 .remove = sunxi_rsb_device_remove, 180 .remove = sunxi_rsb_device_remove,
181 .uevent = of_device_uevent_modalias,
181}; 182};
182 183
183static void sunxi_rsb_dev_release(struct device *dev) 184static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 779869ed32b1..71fad747c0c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -199,6 +199,9 @@ struct smi_info {
199 /* The timer for this si. */ 199 /* The timer for this si. */
200 struct timer_list si_timer; 200 struct timer_list si_timer;
201 201
202 /* This flag is set, if the timer can be set */
203 bool timer_can_start;
204
202 /* This flag is set, if the timer is running (timer_pending() isn't enough) */ 205 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
203 bool timer_running; 206 bool timer_running;
204 207
@@ -355,6 +358,8 @@ out:
355 358
356static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 359static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
357{ 360{
361 if (!smi_info->timer_can_start)
362 return;
358 smi_info->last_timeout_jiffies = jiffies; 363 smi_info->last_timeout_jiffies = jiffies;
359 mod_timer(&smi_info->si_timer, new_val); 364 mod_timer(&smi_info->si_timer, new_val);
360 smi_info->timer_running = true; 365 smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
374 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 379 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
375} 380}
376 381
377static void start_check_enables(struct smi_info *smi_info, bool start_timer) 382static void start_check_enables(struct smi_info *smi_info)
378{ 383{
379 unsigned char msg[2]; 384 unsigned char msg[2];
380 385
381 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 386 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
382 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 387 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
383 388
384 if (start_timer) 389 start_new_msg(smi_info, msg, 2);
385 start_new_msg(smi_info, msg, 2);
386 else
387 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
388 smi_info->si_state = SI_CHECKING_ENABLES; 390 smi_info->si_state = SI_CHECKING_ENABLES;
389} 391}
390 392
391static void start_clear_flags(struct smi_info *smi_info, bool start_timer) 393static void start_clear_flags(struct smi_info *smi_info)
392{ 394{
393 unsigned char msg[3]; 395 unsigned char msg[3];
394 396
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
397 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 399 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
398 msg[2] = WDT_PRE_TIMEOUT_INT; 400 msg[2] = WDT_PRE_TIMEOUT_INT;
399 401
400 if (start_timer) 402 start_new_msg(smi_info, msg, 3);
401 start_new_msg(smi_info, msg, 3);
402 else
403 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
404 smi_info->si_state = SI_CLEARING_FLAGS; 403 smi_info->si_state = SI_CLEARING_FLAGS;
405} 404}
406 405
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
435 * Note that we cannot just use disable_irq(), since the interrupt may 434 * Note that we cannot just use disable_irq(), since the interrupt may
436 * be shared. 435 * be shared.
437 */ 436 */
438static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) 437static inline bool disable_si_irq(struct smi_info *smi_info)
439{ 438{
440 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 439 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
441 smi_info->interrupt_disabled = true; 440 smi_info->interrupt_disabled = true;
442 start_check_enables(smi_info, start_timer); 441 start_check_enables(smi_info);
443 return true; 442 return true;
444 } 443 }
445 return false; 444 return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
449{ 448{
450 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { 449 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
451 smi_info->interrupt_disabled = false; 450 smi_info->interrupt_disabled = false;
452 start_check_enables(smi_info, true); 451 start_check_enables(smi_info);
453 return true; 452 return true;
454 } 453 }
455 return false; 454 return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
467 466
468 msg = ipmi_alloc_smi_msg(); 467 msg = ipmi_alloc_smi_msg();
469 if (!msg) { 468 if (!msg) {
470 if (!disable_si_irq(smi_info, true)) 469 if (!disable_si_irq(smi_info))
471 smi_info->si_state = SI_NORMAL; 470 smi_info->si_state = SI_NORMAL;
472 } else if (enable_si_irq(smi_info)) { 471 } else if (enable_si_irq(smi_info)) {
473 ipmi_free_smi_msg(msg); 472 ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
483 /* Watchdog pre-timeout */ 482 /* Watchdog pre-timeout */
484 smi_inc_stat(smi_info, watchdog_pretimeouts); 483 smi_inc_stat(smi_info, watchdog_pretimeouts);
485 484
486 start_clear_flags(smi_info, true); 485 start_clear_flags(smi_info);
487 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 486 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
488 if (smi_info->intf) 487 if (smi_info->intf)
489 ipmi_smi_watchdog_pretimeout(smi_info->intf); 488 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
866 * disable and messages disabled. 865 * disable and messages disabled.
867 */ 866 */
868 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { 867 if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
869 start_check_enables(smi_info, true); 868 start_check_enables(smi_info);
870 } else { 869 } else {
871 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 870 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
872 if (!smi_info->curr_msg) 871 if (!smi_info->curr_msg)
@@ -1167,6 +1166,7 @@ static int smi_start_processing(void *send_info,
1167 1166
1168 /* Set up the timer that drives the interface. */ 1167 /* Set up the timer that drives the interface. */
1169 timer_setup(&new_smi->si_timer, smi_timeout, 0); 1168 timer_setup(&new_smi->si_timer, smi_timeout, 0);
1169 new_smi->timer_can_start = true;
1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1171 1171
1172 /* Try to claim any interrupts. */ 1172 /* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
1936 check_set_rcv_irq(smi_info); 1936 check_set_rcv_irq(smi_info);
1937} 1937}
1938 1938
1939static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 1939static inline void stop_timer_and_thread(struct smi_info *smi_info)
1940{ 1940{
1941 if (smi_info->thread != NULL) 1941 if (smi_info->thread != NULL)
1942 kthread_stop(smi_info->thread); 1942 kthread_stop(smi_info->thread);
1943
1944 smi_info->timer_can_start = false;
1943 if (smi_info->timer_running) 1945 if (smi_info->timer_running)
1944 del_timer_sync(&smi_info->si_timer); 1946 del_timer_sync(&smi_info->si_timer);
1945} 1947}
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
2152 * Start clearing the flags before we enable interrupts or the 2154 * Start clearing the flags before we enable interrupts or the
2153 * timer to avoid racing with the timer. 2155 * timer to avoid racing with the timer.
2154 */ 2156 */
2155 start_clear_flags(new_smi, false); 2157 start_clear_flags(new_smi);
2156 2158
2157 /* 2159 /*
2158 * IRQ is defined to be set when non-zero. req_events will 2160 * IRQ is defined to be set when non-zero. req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
2238 dev_set_drvdata(new_smi->io.dev, NULL); 2240 dev_set_drvdata(new_smi->io.dev, NULL);
2239 2241
2240out_err_stop_timer: 2242out_err_stop_timer:
2241 wait_for_timer_and_thread(new_smi); 2243 stop_timer_and_thread(new_smi);
2242 2244
2243out_err: 2245out_err:
2244 new_smi->interrupt_disabled = true; 2246 new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2388 */ 2390 */
2389 if (to_clean->io.irq_cleanup) 2391 if (to_clean->io.irq_cleanup)
2390 to_clean->io.irq_cleanup(&to_clean->io); 2392 to_clean->io.irq_cleanup(&to_clean->io);
2391 wait_for_timer_and_thread(to_clean); 2393 stop_timer_and_thread(to_clean);
2392 2394
2393 /* 2395 /*
2394 * Timeouts are stopped, now make sure the interrupts are off 2396 * Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2400 schedule_timeout_uninterruptible(1); 2402 schedule_timeout_uninterruptible(1);
2401 } 2403 }
2402 if (to_clean->handlers) 2404 if (to_clean->handlers)
2403 disable_si_irq(to_clean, false); 2405 disable_si_irq(to_clean);
2404 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 2406 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2405 poll(to_clean); 2407 poll(to_clean);
2406 schedule_timeout_uninterruptible(1); 2408 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 090b073ab441..6b10f0e18a95 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
10{ 10{
11 struct si_sm_io io; 11 struct si_sm_io io;
12 12
13 memset(&io, 0, sizeof(io));
14
13 io.si_type = SI_KCS; 15 io.si_type = SI_KCS;
14 io.addr_source = SI_DEVICETREE; 16 io.addr_source = SI_DEVICETREE;
15 io.addr_type = IPMI_MEM_ADDR_SPACE; 17 io.addr_type = IPMI_MEM_ADDR_SPACE;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 99771f5cad07..27dd11c49d21 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
103 io.addr_source_cleanup = ipmi_pci_cleanup; 103 io.addr_source_cleanup = ipmi_pci_cleanup;
104 io.addr_source_data = pdev; 104 io.addr_source_data = pdev;
105 105
106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) 106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
107 io.addr_type = IPMI_IO_ADDR_SPACE; 107 io.addr_type = IPMI_IO_ADDR_SPACE;
108 else 108 io.io_setup = ipmi_si_port_setup;
109 } else {
109 io.addr_type = IPMI_MEM_ADDR_SPACE; 110 io.addr_type = IPMI_MEM_ADDR_SPACE;
111 io.io_setup = ipmi_si_mem_setup;
112 }
110 io.addr_data = pci_resource_start(pdev, 0); 113 io.addr_data = pci_resource_start(pdev, 0);
111 114
112 io.regspacing = ipmi_pci_probe_regspacing(&io); 115 io.regspacing = ipmi_pci_probe_regspacing(&io);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 647d056df88c..b56c11f51baf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
220 220
221 ret = core->ops->is_enabled(core->hw); 221 ret = core->ops->is_enabled(core->hw);
222done: 222done:
223 clk_pm_runtime_put(core); 223 if (core->dev)
224 pm_runtime_put(core->dev);
224 225
225 return ret; 226 return ret;
226} 227}
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
1564 best_parent_rate = core->parent->rate; 1565 best_parent_rate = core->parent->rate;
1565 } 1566 }
1566 1567
1568 if (clk_pm_runtime_get(core))
1569 return;
1570
1567 if (core->flags & CLK_SET_RATE_UNGATE) { 1571 if (core->flags & CLK_SET_RATE_UNGATE) {
1568 unsigned long flags; 1572 unsigned long flags;
1569 1573
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
1634 /* handle the new child who might not be in core->children yet */ 1638 /* handle the new child who might not be in core->children yet */
1635 if (core->new_child) 1639 if (core->new_child)
1636 clk_change_rate(core->new_child); 1640 clk_change_rate(core->new_child);
1641
1642 clk_pm_runtime_put(core);
1637} 1643}
1638 1644
1639static int clk_core_set_rate_nolock(struct clk_core *core, 1645static int clk_core_set_rate_nolock(struct clk_core *core,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a1a634253d6f..f00d8758ba24 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/clk-provider.h> 18#include <linux/clk-provider.h>
19#include <linux/delay.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/of_device.h> 22#include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
83 return 0; 84 return 0;
84} 85}
85 86
87static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
88 unsigned long id)
89{
90 sun9i_mmc_reset_assert(rcdev, id);
91 udelay(10);
92 sun9i_mmc_reset_deassert(rcdev, id);
93
94 return 0;
95}
96
86static const struct reset_control_ops sun9i_mmc_reset_ops = { 97static const struct reset_control_ops sun9i_mmc_reset_ops = {
87 .assert = sun9i_mmc_reset_assert, 98 .assert = sun9i_mmc_reset_assert,
88 .deassert = sun9i_mmc_reset_deassert, 99 .deassert = sun9i_mmc_reset_deassert,
100 .reset = sun9i_mmc_reset_reset,
89}; 101};
90 102
91static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) 103static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 58d4f4e1ad6a..ca38229b045a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,6 +22,8 @@
22 22
23#include "cpufreq_governor.h" 23#include "cpufreq_governor.h"
24 24
25#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
26
25static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); 27static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
26 28
27static DEFINE_MUTEX(gov_dbs_data_mutex); 29static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
47{ 49{
48 struct dbs_data *dbs_data = to_dbs_data(attr_set); 50 struct dbs_data *dbs_data = to_dbs_data(attr_set);
49 struct policy_dbs_info *policy_dbs; 51 struct policy_dbs_info *policy_dbs;
52 unsigned int sampling_interval;
50 int ret; 53 int ret;
51 ret = sscanf(buf, "%u", &dbs_data->sampling_rate); 54
52 if (ret != 1) 55 ret = sscanf(buf, "%u", &sampling_interval);
56 if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
53 return -EINVAL; 57 return -EINVAL;
54 58
59 dbs_data->sampling_rate = sampling_interval;
60
55 /* 61 /*
56 * We are operating under dbs_data->mutex and so the list and its 62 * We are operating under dbs_data->mutex and so the list and its
57 * entries can't be freed concurrently. 63 * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
430 if (ret) 436 if (ret)
431 goto free_policy_dbs_info; 437 goto free_policy_dbs_info;
432 438
433 dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); 439 /*
440 * The sampling interval should not be less than the transition latency
441 * of the CPU and it also cannot be too small for dbs_update() to work
442 * correctly.
443 */
444 dbs_data->sampling_rate = max_t(unsigned int,
445 CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
446 cpufreq_policy_transition_delay_us(policy));
434 447
435 if (!have_governor_per_policy()) 448 if (!have_governor_per_policy())
436 gov->gdbs_data = dbs_data; 449 gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 628fe899cb48..d9b2c2de49c4 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
226 val >>= OCOTP_CFG3_SPEED_SHIFT; 226 val >>= OCOTP_CFG3_SPEED_SHIFT;
227 val &= 0x3; 227 val &= 0x3;
228 228
229 if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
230 of_machine_is_compatible("fsl,imx6q"))
231 if (dev_pm_opp_disable(dev, 1200000000))
232 dev_warn(dev, "failed to disable 1.2GHz OPP\n");
233 if (val < OCOTP_CFG3_SPEED_996MHZ) 229 if (val < OCOTP_CFG3_SPEED_996MHZ)
234 if (dev_pm_opp_disable(dev, 996000000)) 230 if (dev_pm_opp_disable(dev, 996000000))
235 dev_warn(dev, "failed to disable 996MHz OPP\n"); 231 dev_warn(dev, "failed to disable 996MHz OPP\n");
236 if (of_machine_is_compatible("fsl,imx6q")) { 232
233 if (of_machine_is_compatible("fsl,imx6q") ||
234 of_machine_is_compatible("fsl,imx6qp")) {
237 if (val != OCOTP_CFG3_SPEED_852MHZ) 235 if (val != OCOTP_CFG3_SPEED_852MHZ)
238 if (dev_pm_opp_disable(dev, 852000000)) 236 if (dev_pm_opp_disable(dev, 852000000))
239 dev_warn(dev, "failed to disable 852MHz OPP\n"); 237 dev_warn(dev, "failed to disable 852MHz OPP\n");
238 if (val != OCOTP_CFG3_SPEED_1P2GHZ)
239 if (dev_pm_opp_disable(dev, 1200000000))
240 dev_warn(dev, "failed to disable 1.2GHz OPP\n");
240 } 241 }
241 iounmap(base); 242 iounmap(base);
242put_node: 243put_node:
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..b56b3f711d94 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
5 select CRYPTO_SHA256 5 select CRYPTO_SHA256
6 select CRYPTO_SHA512 6 select CRYPTO_SHA512
7 select CRYPTO_AUTHENC 7 select CRYPTO_AUTHENC
8 select CRYPTO_GF128MUL
8 ---help--- 9 ---help---
9 The Chelsio Crypto Co-processor driver for T6 adapters. 10 The Chelsio Crypto Co-processor driver for T6 adapters.
10 11
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
607 ndesc = ctx->handle_result(priv, ring, sreq->req, 607 ndesc = ctx->handle_result(priv, ring, sreq->req,
608 &should_complete, &ret); 608 &should_complete, &ret);
609 if (ndesc < 0) { 609 if (ndesc < 0) {
610 kfree(sreq);
610 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 611 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
611 return; 612 return;
612 } 613 }
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
14 14
15#include <crypto/aes.h> 15#include <crypto/aes.h>
16#include <crypto/skcipher.h> 16#include <crypto/skcipher.h>
17#include <crypto/internal/skcipher.h>
17 18
18#include "safexcel.h" 19#include "safexcel.h"
19 20
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
33 unsigned int key_len; 34 unsigned int key_len;
34}; 35};
35 36
37struct safexcel_cipher_req {
38 bool needs_inv;
39};
40
36static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, 41static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
37 struct crypto_async_request *async, 42 struct crypto_async_request *async,
38 struct safexcel_command_desc *cdesc, 43 struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
126 return 0; 131 return 0;
127} 132}
128 133
129static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 134static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
130 struct crypto_async_request *async, 135 struct crypto_async_request *async,
131 bool *should_complete, int *ret) 136 bool *should_complete, int *ret)
132{ 137{
133 struct skcipher_request *req = skcipher_request_cast(async); 138 struct skcipher_request *req = skcipher_request_cast(async);
134 struct safexcel_result_desc *rdesc; 139 struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
265 spin_unlock_bh(&priv->ring[ring].egress_lock); 270 spin_unlock_bh(&priv->ring[ring].egress_lock);
266 271
267 request->req = &req->base; 272 request->req = &req->base;
268 ctx->base.handle_result = safexcel_handle_result;
269 273
270 *commands = n_cdesc; 274 *commands = n_cdesc;
271 *results = n_rdesc; 275 *results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
341 345
342 ring = safexcel_select_ring(priv); 346 ring = safexcel_select_ring(priv);
343 ctx->base.ring = ring; 347 ctx->base.ring = ring;
344 ctx->base.needs_inv = false;
345 ctx->base.send = safexcel_aes_send;
346 348
347 spin_lock_bh(&priv->ring[ring].queue_lock); 349 spin_lock_bh(&priv->ring[ring].queue_lock);
348 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 350 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
359 return ndesc; 361 return ndesc;
360} 362}
361 363
364static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
365 struct crypto_async_request *async,
366 bool *should_complete, int *ret)
367{
368 struct skcipher_request *req = skcipher_request_cast(async);
369 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
370 int err;
371
372 if (sreq->needs_inv) {
373 sreq->needs_inv = false;
374 err = safexcel_handle_inv_result(priv, ring, async,
375 should_complete, ret);
376 } else {
377 err = safexcel_handle_req_result(priv, ring, async,
378 should_complete, ret);
379 }
380
381 return err;
382}
383
362static int safexcel_cipher_send_inv(struct crypto_async_request *async, 384static int safexcel_cipher_send_inv(struct crypto_async_request *async,
363 int ring, struct safexcel_request *request, 385 int ring, struct safexcel_request *request,
364 int *commands, int *results) 386 int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
368 struct safexcel_crypto_priv *priv = ctx->priv; 390 struct safexcel_crypto_priv *priv = ctx->priv;
369 int ret; 391 int ret;
370 392
371 ctx->base.handle_result = safexcel_handle_inv_result;
372
373 ret = safexcel_invalidate_cache(async, &ctx->base, priv, 393 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
374 ctx->base.ctxr_dma, ring, request); 394 ctx->base.ctxr_dma, ring, request);
375 if (unlikely(ret)) 395 if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
381 return 0; 401 return 0;
382} 402}
383 403
404static int safexcel_send(struct crypto_async_request *async,
405 int ring, struct safexcel_request *request,
406 int *commands, int *results)
407{
408 struct skcipher_request *req = skcipher_request_cast(async);
409 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
410 int ret;
411
412 if (sreq->needs_inv)
413 ret = safexcel_cipher_send_inv(async, ring, request,
414 commands, results);
415 else
416 ret = safexcel_aes_send(async, ring, request,
417 commands, results);
418 return ret;
419}
420
384static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) 421static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
385{ 422{
386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
387 struct safexcel_crypto_priv *priv = ctx->priv; 424 struct safexcel_crypto_priv *priv = ctx->priv;
388 struct skcipher_request req; 425 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
389 struct safexcel_inv_result result = {}; 427 struct safexcel_inv_result result = {};
390 int ring = ctx->base.ring; 428 int ring = ctx->base.ring;
391 429
392 memset(&req, 0, sizeof(struct skcipher_request)); 430 memset(req, 0, sizeof(struct skcipher_request));
393 431
394 /* create invalidation request */ 432 /* create invalidation request */
395 init_completion(&result.completion); 433 init_completion(&result.completion);
396 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 434 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
397 safexcel_inv_complete, &result); 435 safexcel_inv_complete, &result);
398 436
399 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); 437 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
400 ctx = crypto_tfm_ctx(req.base.tfm); 438 ctx = crypto_tfm_ctx(req->base.tfm);
401 ctx->base.exit_inv = true; 439 ctx->base.exit_inv = true;
402 ctx->base.send = safexcel_cipher_send_inv; 440 sreq->needs_inv = true;
403 441
404 spin_lock_bh(&priv->ring[ring].queue_lock); 442 spin_lock_bh(&priv->ring[ring].queue_lock);
405 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
406 spin_unlock_bh(&priv->ring[ring].queue_lock); 444 spin_unlock_bh(&priv->ring[ring].queue_lock);
407 445
408 if (!priv->ring[ring].need_dequeue) 446 if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
424 enum safexcel_cipher_direction dir, u32 mode) 462 enum safexcel_cipher_direction dir, u32 mode)
425{ 463{
426 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 464 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
465 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
427 struct safexcel_crypto_priv *priv = ctx->priv; 466 struct safexcel_crypto_priv *priv = ctx->priv;
428 int ret, ring; 467 int ret, ring;
429 468
469 sreq->needs_inv = false;
430 ctx->direction = dir; 470 ctx->direction = dir;
431 ctx->mode = mode; 471 ctx->mode = mode;
432 472
433 if (ctx->base.ctxr) { 473 if (ctx->base.ctxr) {
434 if (ctx->base.needs_inv) 474 if (ctx->base.needs_inv) {
435 ctx->base.send = safexcel_cipher_send_inv; 475 sreq->needs_inv = true;
476 ctx->base.needs_inv = false;
477 }
436 } else { 478 } else {
437 ctx->base.ring = safexcel_select_ring(priv); 479 ctx->base.ring = safexcel_select_ring(priv);
438 ctx->base.send = safexcel_aes_send;
439
440 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 480 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
441 EIP197_GFP_FLAGS(req->base), 481 EIP197_GFP_FLAGS(req->base),
442 &ctx->base.ctxr_dma); 482 &ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
476 alg.skcipher.base); 516 alg.skcipher.base);
477 517
478 ctx->priv = tmpl->priv; 518 ctx->priv = tmpl->priv;
519 ctx->base.send = safexcel_send;
520 ctx->base.handle_result = safexcel_handle_result;
521
522 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
523 sizeof(struct safexcel_cipher_req));
479 524
480 return 0; 525 return 0;
481} 526}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..0c5a5820b06e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
32 bool last_req; 32 bool last_req;
33 bool finish; 33 bool finish;
34 bool hmac; 34 bool hmac;
35 bool needs_inv;
35 36
36 u8 state_sz; /* expected sate size, only set once */ 37 u8 state_sz; /* expected sate size, only set once */
37 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; 38 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
38 39
39 u64 len; 40 u64 len;
40 u64 processed; 41 u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
119 } 120 }
120} 121}
121 122
122static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 123static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
123 struct crypto_async_request *async, 124 struct crypto_async_request *async,
124 bool *should_complete, int *ret) 125 bool *should_complete, int *ret)
125{ 126{
126 struct safexcel_result_desc *rdesc; 127 struct safexcel_result_desc *rdesc;
127 struct ahash_request *areq = ahash_request_cast(async); 128 struct ahash_request *areq = ahash_request_cast(async);
128 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 129 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); 130 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130 int cache_len, result_sz = sreq->state_sz; 131 int cache_len;
131 132
132 *ret = 0; 133 *ret = 0;
133 134
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
148 spin_unlock_bh(&priv->ring[ring].egress_lock); 149 spin_unlock_bh(&priv->ring[ring].egress_lock);
149 150
150 if (sreq->finish) 151 if (sreq->finish)
151 result_sz = crypto_ahash_digestsize(ahash); 152 memcpy(areq->result, sreq->state,
152 memcpy(sreq->state, areq->result, result_sz); 153 crypto_ahash_digestsize(ahash));
153 154
154 dma_unmap_sg(priv->dev, areq->src, 155 dma_unmap_sg(priv->dev, areq->src,
155 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); 156 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
165 return 1; 166 return 1;
166} 167}
167 168
168static int safexcel_ahash_send(struct crypto_async_request *async, int ring, 169static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
169 struct safexcel_request *request, int *commands, 170 struct safexcel_request *request,
170 int *results) 171 int *commands, int *results)
171{ 172{
172 struct ahash_request *areq = ahash_request_cast(async); 173 struct ahash_request *areq = ahash_request_cast(async);
173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 174 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
273 /* Add the token */ 274 /* Add the token */
274 safexcel_hash_token(first_cdesc, len, req->state_sz); 275 safexcel_hash_token(first_cdesc, len, req->state_sz);
275 276
276 ctx->base.result_dma = dma_map_single(priv->dev, areq->result, 277 ctx->base.result_dma = dma_map_single(priv->dev, req->state,
277 req->state_sz, DMA_FROM_DEVICE); 278 req->state_sz, DMA_FROM_DEVICE);
278 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { 279 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
279 ret = -EINVAL; 280 ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
292 293
293 req->processed += len; 294 req->processed += len;
294 request->req = &areq->base; 295 request->req = &areq->base;
295 ctx->base.handle_result = safexcel_handle_result;
296 296
297 *commands = n_cdesc; 297 *commands = n_cdesc;
298 *results = 1; 298 *results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
374 374
375 ring = safexcel_select_ring(priv); 375 ring = safexcel_select_ring(priv);
376 ctx->base.ring = ring; 376 ctx->base.ring = ring;
377 ctx->base.needs_inv = false;
378 ctx->base.send = safexcel_ahash_send;
379 377
380 spin_lock_bh(&priv->ring[ring].queue_lock); 378 spin_lock_bh(&priv->ring[ring].queue_lock);
381 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 379 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
392 return 1; 390 return 1;
393} 391}
394 392
393static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
394 struct crypto_async_request *async,
395 bool *should_complete, int *ret)
396{
397 struct ahash_request *areq = ahash_request_cast(async);
398 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
399 int err;
400
401 if (req->needs_inv) {
402 req->needs_inv = false;
403 err = safexcel_handle_inv_result(priv, ring, async,
404 should_complete, ret);
405 } else {
406 err = safexcel_handle_req_result(priv, ring, async,
407 should_complete, ret);
408 }
409
410 return err;
411}
412
395static int safexcel_ahash_send_inv(struct crypto_async_request *async, 413static int safexcel_ahash_send_inv(struct crypto_async_request *async,
396 int ring, struct safexcel_request *request, 414 int ring, struct safexcel_request *request,
397 int *commands, int *results) 415 int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
400 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
401 int ret; 419 int ret;
402 420
403 ctx->base.handle_result = safexcel_handle_inv_result;
404 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, 421 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
405 ctx->base.ctxr_dma, ring, request); 422 ctx->base.ctxr_dma, ring, request);
406 if (unlikely(ret)) 423 if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
412 return 0; 429 return 0;
413} 430}
414 431
432static int safexcel_ahash_send(struct crypto_async_request *async,
433 int ring, struct safexcel_request *request,
434 int *commands, int *results)
435{
436 struct ahash_request *areq = ahash_request_cast(async);
437 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
438 int ret;
439
440 if (req->needs_inv)
441 ret = safexcel_ahash_send_inv(async, ring, request,
442 commands, results);
443 else
444 ret = safexcel_ahash_send_req(async, ring, request,
445 commands, results);
446 return ret;
447}
448
415static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) 449static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
416{ 450{
417 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct safexcel_crypto_priv *priv = ctx->priv; 452 struct safexcel_crypto_priv *priv = ctx->priv;
419 struct ahash_request req; 453 AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
420 struct safexcel_inv_result result = {}; 455 struct safexcel_inv_result result = {};
421 int ring = ctx->base.ring; 456 int ring = ctx->base.ring;
422 457
423 memset(&req, 0, sizeof(struct ahash_request)); 458 memset(req, 0, sizeof(struct ahash_request));
424 459
425 /* create invalidation request */ 460 /* create invalidation request */
426 init_completion(&result.completion); 461 init_completion(&result.completion);
427 ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 462 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
428 safexcel_inv_complete, &result); 463 safexcel_inv_complete, &result);
429 464
430 ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); 465 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
431 ctx = crypto_tfm_ctx(req.base.tfm); 466 ctx = crypto_tfm_ctx(req->base.tfm);
432 ctx->base.exit_inv = true; 467 ctx->base.exit_inv = true;
433 ctx->base.send = safexcel_ahash_send_inv; 468 rctx->needs_inv = true;
434 469
435 spin_lock_bh(&priv->ring[ring].queue_lock); 470 spin_lock_bh(&priv->ring[ring].queue_lock);
436 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
437 spin_unlock_bh(&priv->ring[ring].queue_lock); 472 spin_unlock_bh(&priv->ring[ring].queue_lock);
438 473
439 if (!priv->ring[ring].need_dequeue) 474 if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
481 struct safexcel_crypto_priv *priv = ctx->priv; 516 struct safexcel_crypto_priv *priv = ctx->priv;
482 int ret, ring; 517 int ret, ring;
483 518
484 ctx->base.send = safexcel_ahash_send; 519 req->needs_inv = false;
485 520
486 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 521 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
487 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 522 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
488 523
489 if (ctx->base.ctxr) { 524 if (ctx->base.ctxr) {
490 if (ctx->base.needs_inv) 525 if (ctx->base.needs_inv) {
491 ctx->base.send = safexcel_ahash_send_inv; 526 ctx->base.needs_inv = false;
527 req->needs_inv = true;
528 }
492 } else { 529 } else {
493 ctx->base.ring = safexcel_select_ring(priv); 530 ctx->base.ring = safexcel_select_ring(priv);
494 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 531 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
622 struct safexcel_alg_template, alg.ahash); 659 struct safexcel_alg_template, alg.ahash);
623 660
624 ctx->priv = tmpl->priv; 661 ctx->priv = tmpl->priv;
662 ctx->base.send = safexcel_ahash_send;
663 ctx->base.handle_result = safexcel_handle_result;
625 664
626 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 665 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
627 sizeof(struct safexcel_ahash_req)); 666 sizeof(struct safexcel_ahash_req));
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
1625 CWQ_ENTRY_SIZE, 0, NULL); 1625 CWQ_ENTRY_SIZE, 0, NULL);
1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1628 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1628 return -ENOMEM; 1629 return -ENOMEM;
1629 } 1630 }
1630 return 0; 1631 return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
1634{ 1635{
1635 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1638 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1639 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1637} 1640}
1638 1641
1639static long spu_queue_register_workfn(void *arg) 1642static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271b3bf9..a861b5b4d443 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
708 unsigned long flags) 708 unsigned long flags)
709{ 709{
710 struct at_dma_chan *atchan = to_at_dma_chan(chan); 710 struct at_dma_chan *atchan = to_at_dma_chan(chan);
711 struct data_chunk *first = xt->sgl; 711 struct data_chunk *first;
712 struct at_desc *desc = NULL; 712 struct at_desc *desc = NULL;
713 size_t xfer_count; 713 size_t xfer_count;
714 unsigned int dwidth; 714 unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) 720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
721 return NULL; 721 return NULL;
722 722
723 first = xt->sgl;
724
723 dev_info(chan2dev(chan), 725 dev_info(chan2dev(chan),
724 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 726 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
725 __func__, &xt->src_start, &xt->dst_start, xt->numf, 727 __func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273fed715..afd5e10f8927 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
555 555
556 ret = dma_async_device_register(dd); 556 ret = dma_async_device_register(dd);
557 if (ret) 557 if (ret)
558 return ret; 558 goto err_clk;
559 559
560 irq = platform_get_irq(pdev, 0); 560 irq = platform_get_irq(pdev, 0);
561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); 561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
568 568
569err_unregister: 569err_unregister:
570 dma_async_device_unregister(dd); 570 dma_async_device_unregister(dd);
571err_clk:
572 clk_disable_unprepare(dmadev->clk);
571 return ret; 573 return ret;
572} 574}
573 575
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7fbf91f..ec5f9d2bc820 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
155#define PATTERN_COUNT_MASK 0x1f 155#define PATTERN_COUNT_MASK 0x1f
156#define PATTERN_MEMSET_IDX 0x01 156#define PATTERN_MEMSET_IDX 0x01
157 157
158/* poor man's completion - we want to use wait_event_freezable() on it */
159struct dmatest_done {
160 bool done;
161 wait_queue_head_t *wait;
162};
163
158struct dmatest_thread { 164struct dmatest_thread {
159 struct list_head node; 165 struct list_head node;
160 struct dmatest_info *info; 166 struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
165 u8 **dsts; 171 u8 **dsts;
166 u8 **udsts; 172 u8 **udsts;
167 enum dma_transaction_type type; 173 enum dma_transaction_type type;
174 wait_queue_head_t done_wait;
175 struct dmatest_done test_done;
168 bool done; 176 bool done;
169}; 177};
170 178
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
342 return error_count; 350 return error_count;
343} 351}
344 352
345/* poor man's completion - we want to use wait_event_freezable() on it */
346struct dmatest_done {
347 bool done;
348 wait_queue_head_t *wait;
349};
350 353
351static void dmatest_callback(void *arg) 354static void dmatest_callback(void *arg)
352{ 355{
353 struct dmatest_done *done = arg; 356 struct dmatest_done *done = arg;
354 357 struct dmatest_thread *thread =
355 done->done = true; 358 container_of(arg, struct dmatest_thread, done_wait);
356 wake_up_all(done->wait); 359 if (!thread->done) {
360 done->done = true;
361 wake_up_all(done->wait);
362 } else {
363 /*
364 * If thread->done, it means that this callback occurred
365 * after the parent thread has cleaned up. This can
366 * happen in the case that driver doesn't implement
367 * the terminate_all() functionality and a dma operation
368 * did not occur within the timeout period
369 */
370 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
371 }
357} 372}
358 373
359static unsigned int min_odd(unsigned int x, unsigned int y) 374static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
424 */ 439 */
425static int dmatest_func(void *data) 440static int dmatest_func(void *data)
426{ 441{
427 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
428 struct dmatest_thread *thread = data; 442 struct dmatest_thread *thread = data;
429 struct dmatest_done done = { .wait = &done_wait }; 443 struct dmatest_done *done = &thread->test_done;
430 struct dmatest_info *info; 444 struct dmatest_info *info;
431 struct dmatest_params *params; 445 struct dmatest_params *params;
432 struct dma_chan *chan; 446 struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
673 continue; 687 continue;
674 } 688 }
675 689
676 done.done = false; 690 done->done = false;
677 tx->callback = dmatest_callback; 691 tx->callback = dmatest_callback;
678 tx->callback_param = &done; 692 tx->callback_param = done;
679 cookie = tx->tx_submit(tx); 693 cookie = tx->tx_submit(tx);
680 694
681 if (dma_submit_error(cookie)) { 695 if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
688 } 702 }
689 dma_async_issue_pending(chan); 703 dma_async_issue_pending(chan);
690 704
691 wait_event_freezable_timeout(done_wait, done.done, 705 wait_event_freezable_timeout(thread->done_wait, done->done,
692 msecs_to_jiffies(params->timeout)); 706 msecs_to_jiffies(params->timeout));
693 707
694 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 708 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
695 709
696 if (!done.done) { 710 if (!done->done) {
697 /*
698 * We're leaving the timed out dma operation with
699 * dangling pointer to done_wait. To make this
700 * correct, we'll need to allocate wait_done for
701 * each test iteration and perform "who's gonna
702 * free it this time?" dancing. For now, just
703 * leave it dangling.
704 */
705 WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
706 dmaengine_unmap_put(um); 711 dmaengine_unmap_put(um);
707 result("test timed out", total_tests, src_off, dst_off, 712 result("test timed out", total_tests, src_off, dst_off,
708 len, 0); 713 len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
789 dmatest_KBs(runtime, total_len), ret); 794 dmatest_KBs(runtime, total_len), ret);
790 795
791 /* terminate all transfers on specified channels */ 796 /* terminate all transfers on specified channels */
792 if (ret) 797 if (ret || failed_tests)
793 dmaengine_terminate_all(chan); 798 dmaengine_terminate_all(chan);
794 799
795 thread->done = true; 800 thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
849 thread->info = info; 854 thread->info = info;
850 thread->chan = dtc->chan; 855 thread->chan = dtc->chan;
851 thread->type = type; 856 thread->type = type;
857 thread->test_done.wait = &thread->done_wait;
858 init_waitqueue_head(&thread->done_wait);
852 smp_wmb(); 859 smp_wmb();
853 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", 860 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
854 dma_chan_name(chan), op, i); 861 dma_chan_name(chan), op, i);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
863 } 863 }
864} 864}
865 865
866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) 866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
867{ 867{
868 int i; 868 int i;
869 869
870 for (i = 0; i < DMAMUX_NR; i++) 870 for (i = 0; i < nr_clocks; i++)
871 clk_disable_unprepare(fsl_edma->muxclk[i]); 871 clk_disable_unprepare(fsl_edma->muxclk[i]);
872} 872}
873 873
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
904 904
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); 905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); 906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
907 if (IS_ERR(fsl_edma->muxbase[i])) 907 if (IS_ERR(fsl_edma->muxbase[i])) {
908 /* on error: disable all previously enabled clks */
909 fsl_disable_clocks(fsl_edma, i);
908 return PTR_ERR(fsl_edma->muxbase[i]); 910 return PTR_ERR(fsl_edma->muxbase[i]);
911 }
909 912
910 sprintf(clkname, "dmamux%d", i); 913 sprintf(clkname, "dmamux%d", i);
911 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); 914 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
912 if (IS_ERR(fsl_edma->muxclk[i])) { 915 if (IS_ERR(fsl_edma->muxclk[i])) {
913 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); 916 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
917 /* on error: disable all previously enabled clks */
918 fsl_disable_clocks(fsl_edma, i);
914 return PTR_ERR(fsl_edma->muxclk[i]); 919 return PTR_ERR(fsl_edma->muxclk[i]);
915 } 920 }
916 921
917 ret = clk_prepare_enable(fsl_edma->muxclk[i]); 922 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
918 if (ret) { 923 if (ret)
919 /* disable only clks which were enabled on error */ 924 /* on error: disable all previously enabled clks */
920 for (; i >= 0; i--) 925 fsl_disable_clocks(fsl_edma, i);
921 clk_disable_unprepare(fsl_edma->muxclk[i]);
922
923 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
924 return ret;
925 }
926 926
927 } 927 }
928 928
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
976 if (ret) { 976 if (ret) {
977 dev_err(&pdev->dev, 977 dev_err(&pdev->dev,
978 "Can't register Freescale eDMA engine. (%d)\n", ret); 978 "Can't register Freescale eDMA engine. (%d)\n", ret);
979 fsl_disable_clocks(fsl_edma); 979 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
980 return ret; 980 return ret;
981 } 981 }
982 982
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
985 dev_err(&pdev->dev, 985 dev_err(&pdev->dev,
986 "Can't register Freescale eDMA of_dma. (%d)\n", ret); 986 "Can't register Freescale eDMA of_dma. (%d)\n", ret);
987 dma_async_device_unregister(&fsl_edma->dma_dev); 987 dma_async_device_unregister(&fsl_edma->dma_dev);
988 fsl_disable_clocks(fsl_edma); 988 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
989 return ret; 989 return ret;
990 } 990 }
991 991
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); 1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
1016 of_dma_controller_free(np); 1016 of_dma_controller_free(np);
1017 dma_async_device_unregister(&fsl_edma->dma_dev); 1017 dma_async_device_unregister(&fsl_edma->dma_dev);
1018 fsl_disable_clocks(fsl_edma); 1018 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
1019 1019
1020 return 0; 1020 return 0;
1021} 1021}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d0caa6..7792a9186f9c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
390 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 390 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
391 dev_err(dev, "Self-test copy failed compare, disabling\n"); 391 dev_err(dev, "Self-test copy failed compare, disabling\n");
392 err = -ENODEV; 392 err = -ENODEV;
393 goto free_resources; 393 goto unmap_dma;
394 } 394 }
395 395
396unmap_dma: 396unmap_dma:
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index dfb373c8ba2a..7da9f1b83ebe 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -28,7 +28,6 @@
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 29
30#include <linux/bitmap.h> 30#include <linux/bitmap.h>
31#include <linux/bitfield.h>
32#include <linux/device.h> 31#include <linux/device.h>
33#include <linux/err.h> 32#include <linux/err.h>
34#include <linux/export.h> 33#include <linux/export.h>
@@ -73,13 +72,21 @@
73 72
74#define MAX_DVFS_DOMAINS 8 73#define MAX_DVFS_DOMAINS 8
75#define MAX_DVFS_OPPS 16 74#define MAX_DVFS_OPPS 16
76 75#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
77#define PROTO_REV_MAJOR_MASK GENMASK(31, 16) 76#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
78#define PROTO_REV_MINOR_MASK GENMASK(15, 0) 77
79 78#define PROTOCOL_REV_MINOR_BITS 16
80#define FW_REV_MAJOR_MASK GENMASK(31, 24) 79#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
81#define FW_REV_MINOR_MASK GENMASK(23, 16) 80#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
82#define FW_REV_PATCH_MASK GENMASK(15, 0) 81#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
82
83#define FW_REV_MAJOR_BITS 24
84#define FW_REV_MINOR_BITS 16
85#define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1)
86#define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1)
87#define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS)
88#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
89#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
83 90
84#define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) 91#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
85 92
@@ -304,6 +311,10 @@ struct clk_get_info {
304 u8 name[20]; 311 u8 name[20];
305} __packed; 312} __packed;
306 313
314struct clk_get_value {
315 __le32 rate;
316} __packed;
317
307struct clk_set_value { 318struct clk_set_value {
308 __le16 id; 319 __le16 id;
309 __le16 reserved; 320 __le16 reserved;
@@ -317,9 +328,7 @@ struct legacy_clk_set_value {
317} __packed; 328} __packed;
318 329
319struct dvfs_info { 330struct dvfs_info {
320 u8 domain; 331 __le32 header;
321 u8 opp_count;
322 __le16 latency;
323 struct { 332 struct {
324 __le32 freq; 333 __le32 freq;
325 __le32 m_volt; 334 __le32 m_volt;
@@ -342,6 +351,11 @@ struct _scpi_sensor_info {
342 char name[20]; 351 char name[20];
343}; 352};
344 353
354struct sensor_value {
355 __le32 lo_val;
356 __le32 hi_val;
357} __packed;
358
345struct dev_pstate_set { 359struct dev_pstate_set {
346 __le16 dev_id; 360 __le16 dev_id;
347 u8 pstate; 361 u8 pstate;
@@ -405,20 +419,19 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
405 unsigned int len; 419 unsigned int len;
406 420
407 if (scpi_info->is_legacy) { 421 if (scpi_info->is_legacy) {
408 struct legacy_scpi_shared_mem __iomem *mem = 422 struct legacy_scpi_shared_mem *mem = ch->rx_payload;
409 ch->rx_payload;
410 423
411 /* RX Length is not replied by the legacy Firmware */ 424 /* RX Length is not replied by the legacy Firmware */
412 len = match->rx_len; 425 len = match->rx_len;
413 426
414 match->status = ioread32(&mem->status); 427 match->status = le32_to_cpu(mem->status);
415 memcpy_fromio(match->rx_buf, mem->payload, len); 428 memcpy_fromio(match->rx_buf, mem->payload, len);
416 } else { 429 } else {
417 struct scpi_shared_mem __iomem *mem = ch->rx_payload; 430 struct scpi_shared_mem *mem = ch->rx_payload;
418 431
419 len = min(match->rx_len, CMD_SIZE(cmd)); 432 len = min(match->rx_len, CMD_SIZE(cmd));
420 433
421 match->status = ioread32(&mem->status); 434 match->status = le32_to_cpu(mem->status);
422 memcpy_fromio(match->rx_buf, mem->payload, len); 435 memcpy_fromio(match->rx_buf, mem->payload, len);
423 } 436 }
424 437
@@ -432,11 +445,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
432static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) 445static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
433{ 446{
434 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 447 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
435 struct scpi_shared_mem __iomem *mem = ch->rx_payload; 448 struct scpi_shared_mem *mem = ch->rx_payload;
436 u32 cmd = 0; 449 u32 cmd = 0;
437 450
438 if (!scpi_info->is_legacy) 451 if (!scpi_info->is_legacy)
439 cmd = ioread32(&mem->command); 452 cmd = le32_to_cpu(mem->command);
440 453
441 scpi_process_cmd(ch, cmd); 454 scpi_process_cmd(ch, cmd);
442} 455}
@@ -446,7 +459,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
446 unsigned long flags; 459 unsigned long flags;
447 struct scpi_xfer *t = msg; 460 struct scpi_xfer *t = msg;
448 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 461 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
449 struct scpi_shared_mem __iomem *mem = ch->tx_payload; 462 struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
450 463
451 if (t->tx_buf) { 464 if (t->tx_buf) {
452 if (scpi_info->is_legacy) 465 if (scpi_info->is_legacy)
@@ -465,7 +478,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
465 } 478 }
466 479
467 if (!scpi_info->is_legacy) 480 if (!scpi_info->is_legacy)
468 iowrite32(t->cmd, &mem->command); 481 mem->command = cpu_to_le32(t->cmd);
469} 482}
470 483
471static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) 484static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -570,13 +583,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
570static unsigned long scpi_clk_get_val(u16 clk_id) 583static unsigned long scpi_clk_get_val(u16 clk_id)
571{ 584{
572 int ret; 585 int ret;
573 __le32 rate; 586 struct clk_get_value clk;
574 __le16 le_clk_id = cpu_to_le16(clk_id); 587 __le16 le_clk_id = cpu_to_le16(clk_id);
575 588
576 ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, 589 ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
577 sizeof(le_clk_id), &rate, sizeof(rate)); 590 sizeof(le_clk_id), &clk, sizeof(clk));
578 591
579 return ret ? ret : le32_to_cpu(rate); 592 return ret ? ret : le32_to_cpu(clk.rate);
580} 593}
581 594
582static int scpi_clk_set_val(u16 clk_id, unsigned long rate) 595static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -632,34 +645,34 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
632 645
633static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) 646static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
634{ 647{
635 if (domain >= MAX_DVFS_DOMAINS)
636 return ERR_PTR(-EINVAL);
637
638 return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL);
639}
640
641static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
642{
643 struct scpi_dvfs_info *info; 648 struct scpi_dvfs_info *info;
644 struct scpi_opp *opp; 649 struct scpi_opp *opp;
645 struct dvfs_info buf; 650 struct dvfs_info buf;
646 int ret, i; 651 int ret, i;
647 652
653 if (domain >= MAX_DVFS_DOMAINS)
654 return ERR_PTR(-EINVAL);
655
656 if (scpi_info->dvfs[domain]) /* data already populated */
657 return scpi_info->dvfs[domain];
658
648 ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), 659 ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
649 &buf, sizeof(buf)); 660 &buf, sizeof(buf));
650 if (ret) 661 if (ret)
651 return ret; 662 return ERR_PTR(ret);
652 663
653 info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL); 664 info = kmalloc(sizeof(*info), GFP_KERNEL);
654 if (!info) 665 if (!info)
655 return -ENOMEM; 666 return ERR_PTR(-ENOMEM);
656 667
657 info->count = buf.opp_count; 668 info->count = DVFS_OPP_COUNT(buf.header);
658 info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */ 669 info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
659 670
660 info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL); 671 info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
661 if (!info->opps) 672 if (!info->opps) {
662 return -ENOMEM; 673 kfree(info);
674 return ERR_PTR(-ENOMEM);
675 }
663 676
664 for (i = 0, opp = info->opps; i < info->count; i++, opp++) { 677 for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
665 opp->freq = le32_to_cpu(buf.opps[i].freq); 678 opp->freq = le32_to_cpu(buf.opps[i].freq);
@@ -669,15 +682,7 @@ static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
669 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); 682 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
670 683
671 scpi_info->dvfs[domain] = info; 684 scpi_info->dvfs[domain] = info;
672 return 0; 685 return info;
673}
674
675static void scpi_dvfs_populate(struct device *dev)
676{
677 int domain;
678
679 for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++)
680 scpi_dvfs_populate_info(dev, domain);
681} 686}
682 687
683static int scpi_dev_domain_id(struct device *dev) 688static int scpi_dev_domain_id(struct device *dev)
@@ -708,6 +713,9 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
708 if (IS_ERR(info)) 713 if (IS_ERR(info))
709 return PTR_ERR(info); 714 return PTR_ERR(info);
710 715
716 if (!info->latency)
717 return 0;
718
711 return info->latency; 719 return info->latency;
712} 720}
713 721
@@ -768,19 +776,20 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
768static int scpi_sensor_get_value(u16 sensor, u64 *val) 776static int scpi_sensor_get_value(u16 sensor, u64 *val)
769{ 777{
770 __le16 id = cpu_to_le16(sensor); 778 __le16 id = cpu_to_le16(sensor);
771 __le64 value; 779 struct sensor_value buf;
772 int ret; 780 int ret;
773 781
774 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), 782 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
775 &value, sizeof(value)); 783 &buf, sizeof(buf));
776 if (ret) 784 if (ret)
777 return ret; 785 return ret;
778 786
779 if (scpi_info->is_legacy) 787 if (scpi_info->is_legacy)
780 /* only 32-bits supported, upper 32 bits can be junk */ 788 /* only 32-bits supported, hi_val can be junk */
781 *val = le32_to_cpup((__le32 *)&value); 789 *val = le32_to_cpu(buf.lo_val);
782 else 790 else
783 *val = le64_to_cpu(value); 791 *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
792 le32_to_cpu(buf.lo_val);
784 793
785 return 0; 794 return 0;
786} 795}
@@ -853,19 +862,23 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
853static ssize_t protocol_version_show(struct device *dev, 862static ssize_t protocol_version_show(struct device *dev,
854 struct device_attribute *attr, char *buf) 863 struct device_attribute *attr, char *buf)
855{ 864{
856 return sprintf(buf, "%lu.%lu\n", 865 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
857 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), 866
858 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version)); 867 return sprintf(buf, "%d.%d\n",
868 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
869 PROTOCOL_REV_MINOR(scpi_info->protocol_version));
859} 870}
860static DEVICE_ATTR_RO(protocol_version); 871static DEVICE_ATTR_RO(protocol_version);
861 872
862static ssize_t firmware_version_show(struct device *dev, 873static ssize_t firmware_version_show(struct device *dev,
863 struct device_attribute *attr, char *buf) 874 struct device_attribute *attr, char *buf)
864{ 875{
865 return sprintf(buf, "%lu.%lu.%lu\n", 876 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
866 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), 877
867 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), 878 return sprintf(buf, "%d.%d.%d\n",
868 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); 879 FW_REV_MAJOR(scpi_info->firmware_version),
880 FW_REV_MINOR(scpi_info->firmware_version),
881 FW_REV_PATCH(scpi_info->firmware_version));
869} 882}
870static DEVICE_ATTR_RO(firmware_version); 883static DEVICE_ATTR_RO(firmware_version);
871 884
@@ -876,13 +889,39 @@ static struct attribute *versions_attrs[] = {
876}; 889};
877ATTRIBUTE_GROUPS(versions); 890ATTRIBUTE_GROUPS(versions);
878 891
879static void scpi_free_channels(void *data) 892static void
893scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
880{ 894{
881 struct scpi_drvinfo *info = data;
882 int i; 895 int i;
883 896
884 for (i = 0; i < info->num_chans; i++) 897 for (i = 0; i < count && pchan->chan; i++, pchan++) {
885 mbox_free_channel(info->channels[i].chan); 898 mbox_free_channel(pchan->chan);
899 devm_kfree(dev, pchan->xfers);
900 devm_iounmap(dev, pchan->rx_payload);
901 }
902}
903
904static int scpi_remove(struct platform_device *pdev)
905{
906 int i;
907 struct device *dev = &pdev->dev;
908 struct scpi_drvinfo *info = platform_get_drvdata(pdev);
909
910 scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
911
912 of_platform_depopulate(dev);
913 sysfs_remove_groups(&dev->kobj, versions_groups);
914 scpi_free_channels(dev, info->channels, info->num_chans);
915 platform_set_drvdata(pdev, NULL);
916
917 for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
918 kfree(info->dvfs[i]->opps);
919 kfree(info->dvfs[i]);
920 }
921 devm_kfree(dev, info->channels);
922 devm_kfree(dev, info);
923
924 return 0;
886} 925}
887 926
888#define MAX_SCPI_XFERS 10 927#define MAX_SCPI_XFERS 10
@@ -913,6 +952,7 @@ static int scpi_probe(struct platform_device *pdev)
913{ 952{
914 int count, idx, ret; 953 int count, idx, ret;
915 struct resource res; 954 struct resource res;
955 struct scpi_chan *scpi_chan;
916 struct device *dev = &pdev->dev; 956 struct device *dev = &pdev->dev;
917 struct device_node *np = dev->of_node; 957 struct device_node *np = dev->of_node;
918 958
@@ -929,19 +969,13 @@ static int scpi_probe(struct platform_device *pdev)
929 return -ENODEV; 969 return -ENODEV;
930 } 970 }
931 971
932 scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), 972 scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
933 GFP_KERNEL); 973 if (!scpi_chan)
934 if (!scpi_info->channels)
935 return -ENOMEM; 974 return -ENOMEM;
936 975
937 ret = devm_add_action(dev, scpi_free_channels, scpi_info); 976 for (idx = 0; idx < count; idx++) {
938 if (ret)
939 return ret;
940
941 for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
942 resource_size_t size; 977 resource_size_t size;
943 int idx = scpi_info->num_chans; 978 struct scpi_chan *pchan = scpi_chan + idx;
944 struct scpi_chan *pchan = scpi_info->channels + idx;
945 struct mbox_client *cl = &pchan->cl; 979 struct mbox_client *cl = &pchan->cl;
946 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 980 struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
947 981
@@ -949,14 +983,15 @@ static int scpi_probe(struct platform_device *pdev)
949 of_node_put(shmem); 983 of_node_put(shmem);
950 if (ret) { 984 if (ret) {
951 dev_err(dev, "failed to get SCPI payload mem resource\n"); 985 dev_err(dev, "failed to get SCPI payload mem resource\n");
952 return ret; 986 goto err;
953 } 987 }
954 988
955 size = resource_size(&res); 989 size = resource_size(&res);
956 pchan->rx_payload = devm_ioremap(dev, res.start, size); 990 pchan->rx_payload = devm_ioremap(dev, res.start, size);
957 if (!pchan->rx_payload) { 991 if (!pchan->rx_payload) {
958 dev_err(dev, "failed to ioremap SCPI payload\n"); 992 dev_err(dev, "failed to ioremap SCPI payload\n");
959 return -EADDRNOTAVAIL; 993 ret = -EADDRNOTAVAIL;
994 goto err;
960 } 995 }
961 pchan->tx_payload = pchan->rx_payload + (size >> 1); 996 pchan->tx_payload = pchan->rx_payload + (size >> 1);
962 997
@@ -982,11 +1017,17 @@ static int scpi_probe(struct platform_device *pdev)
982 dev_err(dev, "failed to get channel%d err %d\n", 1017 dev_err(dev, "failed to get channel%d err %d\n",
983 idx, ret); 1018 idx, ret);
984 } 1019 }
1020err:
1021 scpi_free_channels(dev, scpi_chan, idx);
1022 scpi_info = NULL;
985 return ret; 1023 return ret;
986 } 1024 }
987 1025
1026 scpi_info->channels = scpi_chan;
1027 scpi_info->num_chans = count;
988 scpi_info->commands = scpi_std_commands; 1028 scpi_info->commands = scpi_std_commands;
989 scpi_info->scpi_ops = &scpi_ops; 1029
1030 platform_set_drvdata(pdev, scpi_info);
990 1031
991 if (scpi_info->is_legacy) { 1032 if (scpi_info->is_legacy) {
992 /* Replace with legacy variants */ 1033 /* Replace with legacy variants */
@@ -1002,23 +1043,23 @@ static int scpi_probe(struct platform_device *pdev)
1002 ret = scpi_init_versions(scpi_info); 1043 ret = scpi_init_versions(scpi_info);
1003 if (ret) { 1044 if (ret) {
1004 dev_err(dev, "incorrect or no SCP firmware found\n"); 1045 dev_err(dev, "incorrect or no SCP firmware found\n");
1046 scpi_remove(pdev);
1005 return ret; 1047 return ret;
1006 } 1048 }
1007 1049
1008 scpi_dvfs_populate(dev); 1050 _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
1009 1051 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
1010 _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", 1052 PROTOCOL_REV_MINOR(scpi_info->protocol_version),
1011 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), 1053 FW_REV_MAJOR(scpi_info->firmware_version),
1012 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version), 1054 FW_REV_MINOR(scpi_info->firmware_version),
1013 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), 1055 FW_REV_PATCH(scpi_info->firmware_version));
1014 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), 1056 scpi_info->scpi_ops = &scpi_ops;
1015 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
1016 1057
1017 ret = devm_device_add_groups(dev, versions_groups); 1058 ret = sysfs_create_groups(&dev->kobj, versions_groups);
1018 if (ret) 1059 if (ret)
1019 dev_err(dev, "unable to create sysfs version group\n"); 1060 dev_err(dev, "unable to create sysfs version group\n");
1020 1061
1021 return devm_of_platform_populate(dev); 1062 return of_platform_populate(dev->of_node, NULL, NULL, dev);
1022} 1063}
1023 1064
1024static const struct of_device_id scpi_of_match[] = { 1065static const struct of_device_id scpi_of_match[] = {
@@ -1035,6 +1076,7 @@ static struct platform_driver scpi_driver = {
1035 .of_match_table = scpi_of_match, 1076 .of_match_table = scpi_of_match,
1036 }, 1077 },
1037 .probe = scpi_probe, 1078 .probe = scpi_probe,
1079 .remove = scpi_remove,
1038}; 1080};
1039module_platform_driver(scpi_driver); 1081module_platform_driver(scpi_driver);
1040 1082
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c4dd84..055e2e8f985a 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
20 20
21#define NO_FURTHER_WRITE_ACTION -1 21#define NO_FURTHER_WRITE_ACTION -1
22 22
23#ifndef phys_to_page
24#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
25#endif
26
27/** 23/**
28 * efi_free_all_buff_pages - free all previous allocated buffer pages 24 * efi_free_all_buff_pages - free all previous allocated buffer pages
29 * @cap_info: pointer to current instance of capsule_info structure 25 * @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
35static void efi_free_all_buff_pages(struct capsule_info *cap_info) 31static void efi_free_all_buff_pages(struct capsule_info *cap_info)
36{ 32{
37 while (cap_info->index > 0) 33 while (cap_info->index > 0)
38 __free_page(phys_to_page(cap_info->pages[--cap_info->index])); 34 __free_page(cap_info->pages[--cap_info->index]);
39 35
40 cap_info->index = NO_FURTHER_WRITE_ACTION; 36 cap_info->index = NO_FURTHER_WRITE_ACTION;
41} 37}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
71 67
72 cap_info->pages = temp_page; 68 cap_info->pages = temp_page;
73 69
70 temp_page = krealloc(cap_info->phys,
71 pages_needed * sizeof(phys_addr_t *),
72 GFP_KERNEL | __GFP_ZERO);
73 if (!temp_page)
74 return -ENOMEM;
75
76 cap_info->phys = temp_page;
77
74 return 0; 78 return 0;
75} 79}
76 80
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
105 **/ 109 **/
106static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) 110static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
107{ 111{
112 bool do_vunmap = false;
108 int ret; 113 int ret;
109 114
110 ret = efi_capsule_update(&cap_info->header, cap_info->pages); 115 /*
116 * cap_info->capsule may have been assigned already by a quirk
117 * handler, so only overwrite it if it is NULL
118 */
119 if (!cap_info->capsule) {
120 cap_info->capsule = vmap(cap_info->pages, cap_info->index,
121 VM_MAP, PAGE_KERNEL);
122 if (!cap_info->capsule)
123 return -ENOMEM;
124 do_vunmap = true;
125 }
126
127 ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
128 if (do_vunmap)
129 vunmap(cap_info->capsule);
111 if (ret) { 130 if (ret) {
112 pr_err("capsule update failed\n"); 131 pr_err("capsule update failed\n");
113 return ret; 132 return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
165 goto failed; 184 goto failed;
166 } 185 }
167 186
168 cap_info->pages[cap_info->index++] = page_to_phys(page); 187 cap_info->pages[cap_info->index] = page;
188 cap_info->phys[cap_info->index] = page_to_phys(page);
169 cap_info->page_bytes_remain = PAGE_SIZE; 189 cap_info->page_bytes_remain = PAGE_SIZE;
190 cap_info->index++;
170 } else { 191 } else {
171 page = phys_to_page(cap_info->pages[cap_info->index - 1]); 192 page = cap_info->pages[cap_info->index - 1];
172 } 193 }
173 194
174 kbuff = kmap(page); 195 kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
252 struct capsule_info *cap_info = file->private_data; 273 struct capsule_info *cap_info = file->private_data;
253 274
254 kfree(cap_info->pages); 275 kfree(cap_info->pages);
276 kfree(cap_info->phys);
255 kfree(file->private_data); 277 kfree(file->private_data);
256 file->private_data = NULL; 278 file->private_data = NULL;
257 return 0; 279 return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
281 return -ENOMEM; 303 return -ENOMEM;
282 } 304 }
283 305
306 cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
307 if (!cap_info->phys) {
308 kfree(cap_info->pages);
309 kfree(cap_info);
310 return -ENOMEM;
311 }
312
284 file->private_data = cap_info; 313 file->private_data = cap_info;
285 314
286 return 0; 315 return 0;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index f70febf680c3..557a47829d03 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -109,6 +109,8 @@ struct kobject *efi_kobj;
109/* 109/*
110 * Let's not leave out systab information that snuck into 110 * Let's not leave out systab information that snuck into
111 * the efivars driver 111 * the efivars driver
112 * Note, do not add more fields in systab sysfs file as it breaks sysfs
113 * one value per file rule!
112 */ 114 */
113static ssize_t systab_show(struct kobject *kobj, 115static ssize_t systab_show(struct kobject *kobj,
114 struct kobj_attribute *attr, char *buf) 116 struct kobj_attribute *attr, char *buf)
@@ -143,8 +145,7 @@ static ssize_t systab_show(struct kobject *kobj,
143 return str - buf; 145 return str - buf;
144} 146}
145 147
146static struct kobj_attribute efi_attr_systab = 148static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
147 __ATTR(systab, 0400, systab_show, NULL);
148 149
149#define EFI_FIELD(var) efi.var 150#define EFI_FIELD(var) efi.var
150 151
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index bd7ed3c1148a..c47e0c6ec00f 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
106}; 106};
107 107
108/* Generic ESRT Entry ("ESRE") support. */ 108/* Generic ESRT Entry ("ESRE") support. */
109static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) 109static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
110{ 110{
111 char *str = buf; 111 char *str = buf;
112 112
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
117 return str - buf; 117 return str - buf;
118} 118}
119 119
120static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, 120static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
121 esre_fw_class_show, NULL);
122 121
123#define esre_attr_decl(name, size, fmt) \ 122#define esre_attr_decl(name, size, fmt) \
124static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ 123static ssize_t name##_show(struct esre_entry *entry, char *buf) \
125{ \ 124{ \
126 return sprintf(buf, fmt "\n", \ 125 return sprintf(buf, fmt "\n", \
127 le##size##_to_cpu(entry->esre.esre1->name)); \ 126 le##size##_to_cpu(entry->esre.esre1->name)); \
128} \ 127} \
129\ 128\
130static struct esre_attribute esre_##name = __ATTR(name, 0400, \ 129static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
131 esre_##name##_show, NULL)
132 130
133esre_attr_decl(fw_type, 32, "%u"); 131esre_attr_decl(fw_type, 32, "%u");
134esre_attr_decl(fw_version, 32, "%u"); 132esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
193 191
194/* support for displaying ESRT fields at the top level */ 192/* support for displaying ESRT fields at the top level */
195#define esrt_attr_decl(name, size, fmt) \ 193#define esrt_attr_decl(name, size, fmt) \
196static ssize_t esrt_##name##_show(struct kobject *kobj, \ 194static ssize_t name##_show(struct kobject *kobj, \
197 struct kobj_attribute *attr, char *buf)\ 195 struct kobj_attribute *attr, char *buf)\
198{ \ 196{ \
199 return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ 197 return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
200} \ 198} \
201\ 199\
202static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ 200static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
203 esrt_##name##_show, NULL)
204 201
205esrt_attr_decl(fw_resource_count, 32, "%u"); 202esrt_attr_decl(fw_resource_count, 32, "%u");
206esrt_attr_decl(fw_resource_count_max, 32, "%u"); 203esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -431,7 +428,7 @@ err_remove_group:
431err_remove_esrt: 428err_remove_esrt:
432 kobject_put(esrt_kobj); 429 kobject_put(esrt_kobj);
433err: 430err:
434 kfree(esrt); 431 memunmap(esrt);
435 esrt = NULL; 432 esrt = NULL;
436 return error; 433 return error;
437} 434}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 8e64b77aeac9..f377609ff141 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
63 return map_attr->show(entry, buf); 63 return map_attr->show(entry, buf);
64} 64}
65 65
66static struct map_attribute map_type_attr = __ATTR_RO(type); 66static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
67static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); 67static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
68static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); 68static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
69static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); 69static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
70static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); 70static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
71 71
72/* 72/*
73 * These are default attributes that are added for every memmap entry. 73 * These are default attributes that are added for every memmap entry.
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 35e553b3b190..e4b40f2b4627 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
295 if (ret) 295 if (ret)
296 return ret; 296 return ret;
297 297
298 return vpd_sections_init(entry.cbmem_addr); 298 vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
299 if (!vpd_kobj)
300 return -ENOMEM;
301
302 ret = vpd_sections_init(entry.cbmem_addr);
303 if (ret) {
304 kobject_put(vpd_kobj);
305 return ret;
306 }
307
308 return 0;
309}
310
311static int vpd_remove(struct platform_device *pdev)
312{
313 vpd_section_destroy(&ro_vpd);
314 vpd_section_destroy(&rw_vpd);
315
316 kobject_put(vpd_kobj);
317
318 return 0;
299} 319}
300 320
301static struct platform_driver vpd_driver = { 321static struct platform_driver vpd_driver = {
302 .probe = vpd_probe, 322 .probe = vpd_probe,
323 .remove = vpd_remove,
303 .driver = { 324 .driver = {
304 .name = "vpd", 325 .name = "vpd",
305 }, 326 },
306}; 327};
307 328
329static struct platform_device *vpd_pdev;
330
308static int __init vpd_platform_init(void) 331static int __init vpd_platform_init(void)
309{ 332{
310 struct platform_device *pdev; 333 int ret;
311
312 pdev = platform_device_register_simple("vpd", -1, NULL, 0);
313 if (IS_ERR(pdev))
314 return PTR_ERR(pdev);
315 334
316 vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); 335 ret = platform_driver_register(&vpd_driver);
317 if (!vpd_kobj) 336 if (ret)
318 return -ENOMEM; 337 return ret;
319 338
320 platform_driver_register(&vpd_driver); 339 vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
340 if (IS_ERR(vpd_pdev)) {
341 platform_driver_unregister(&vpd_driver);
342 return PTR_ERR(vpd_pdev);
343 }
321 344
322 return 0; 345 return 0;
323} 346}
324 347
325static void __exit vpd_platform_exit(void) 348static void __exit vpd_platform_exit(void)
326{ 349{
327 vpd_section_destroy(&ro_vpd); 350 platform_device_unregister(vpd_pdev);
328 vpd_section_destroy(&rw_vpd); 351 platform_driver_unregister(&vpd_driver);
329 kobject_put(vpd_kobj);
330} 352}
331 353
332module_init(vpd_platform_init); 354module_init(vpd_platform_init);
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 5cfe39f7a45f..deb483064f53 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
582{ 582{
583 pr_debug("fw_cfg: unloading.\n"); 583 pr_debug("fw_cfg: unloading.\n");
584 fw_cfg_sysfs_cache_cleanup(); 584 fw_cfg_sysfs_cache_cleanup();
585 sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
586 fw_cfg_io_cleanup();
585 fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset); 587 fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
586 fw_cfg_kobj_cleanup(fw_cfg_sel_ko); 588 fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
587 fw_cfg_io_cleanup();
588 return 0; 589 return 0;
589} 590}
590 591
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 6b535ec858cc..15a1f4b348c4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -23,6 +23,7 @@
23struct gen_74x164_chip { 23struct gen_74x164_chip {
24 struct gpio_chip gpio_chip; 24 struct gpio_chip gpio_chip;
25 struct mutex lock; 25 struct mutex lock;
26 struct gpio_desc *gpiod_oe;
26 u32 registers; 27 u32 registers;
27 /* 28 /*
28 * Since the registers are chained, every byte sent will make 29 * Since the registers are chained, every byte sent will make
@@ -31,8 +32,7 @@ struct gen_74x164_chip {
31 * register at the end of the transfer. So, to have a logical 32 * register at the end of the transfer. So, to have a logical
32 * numbering, store the bytes in reverse order. 33 * numbering, store the bytes in reverse order.
33 */ 34 */
34 u8 buffer[0]; 35 u8 buffer[];
35 struct gpio_desc *gpiod_oe;
36}; 36};
37 37
38static int __gen_74x164_write_config(struct gen_74x164_chip *chip) 38static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index dfcf56ee3c61..76861a00bb92 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
522 * category than their parents, so it won't report false recursion. 522 * category than their parents, so it won't report false recursion.
523 */ 523 */
524static struct lock_class_key gpio_lock_class; 524static struct lock_class_key gpio_lock_class;
525static struct lock_class_key gpio_request_class;
525 526
526static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, 527static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
527 irq_hw_number_t hwirq) 528 irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
531 ret = irq_set_chip_data(irq, d->host_data); 532 ret = irq_set_chip_data(irq, d->host_data);
532 if (ret < 0) 533 if (ret < 0)
533 return ret; 534 return ret;
534 irq_set_lockdep_class(irq, &gpio_lock_class); 535 irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
535 irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); 536 irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
536 irq_set_noprobe(irq); 537 irq_set_noprobe(irq);
537 538
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 545d43a587b7..bb4f8cf18bd9 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
327 * category than their parents, so it won't report false recursion. 327 * category than their parents, so it won't report false recursion.
328 */ 328 */
329static struct lock_class_key brcmstb_gpio_irq_lock_class; 329static struct lock_class_key brcmstb_gpio_irq_lock_class;
330static struct lock_class_key brcmstb_gpio_irq_request_class;
330 331
331 332
332static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, 333static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
346 ret = irq_set_chip_data(irq, &bank->gc); 347 ret = irq_set_chip_data(irq, &bank->gc);
347 if (ret < 0) 348 if (ret < 0)
348 return ret; 349 return ret;
349 irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class); 350 irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
351 &brcmstb_gpio_irq_request_class);
350 irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq); 352 irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
351 irq_set_noprobe(irq); 353 irq_set_noprobe(irq);
352 return 0; 354 return 0;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index f75d8443ecaf..e4b3d7db68c9 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
383 u32 mask; 383 u32 mask;
384 384
385 d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); 385 d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
386 g = (struct davinci_gpio_regs __iomem *)d->regs; 386 g = (struct davinci_gpio_regs __iomem *)d->regs[0];
387 mask = __gpio_mask(data->irq - d->base_irq); 387 mask = __gpio_mask(data->irq - d->base_irq);
388 388
389 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 389 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f9042bcc27a4..7b14d6280e44 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
152{ 152{
153 unsigned long get_mask = 0; 153 unsigned long get_mask = 0;
154 unsigned long set_mask = 0; 154 unsigned long set_mask = 0;
155 int bit = 0;
156 155
157 while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) { 156 /* Make sure we first clear any bits that are zero when we read the register */
158 if (gc->bgpio_dir & BIT(bit)) 157 *bits &= ~*mask;
159 set_mask |= BIT(bit); 158
160 else 159 /* Exploit the fact that we know which directions are set */
161 get_mask |= BIT(bit); 160 set_mask = *mask & gc->bgpio_dir;
162 } 161 get_mask = *mask & ~gc->bgpio_dir;
163 162
164 if (set_mask) 163 if (set_mask)
165 *bits |= gc->read_reg(gc->reg_set) & set_mask; 164 *bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
176 175
177/* 176/*
178 * This only works if the bits in the GPIO register are in native endianness. 177 * This only works if the bits in the GPIO register are in native endianness.
179 * It is dirt simple and fast in this case. (Also the most common case.)
180 */ 178 */
181static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask, 179static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
182 unsigned long *bits) 180 unsigned long *bits)
183{ 181{
184 182 /* Make sure we first clear any bits that are zero when we read the register */
185 *bits = gc->read_reg(gc->reg_dat) & *mask; 183 *bits &= ~*mask;
184 *bits |= gc->read_reg(gc->reg_dat) & *mask;
186 return 0; 185 return 0;
187} 186}
188 187
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
196 unsigned long val; 195 unsigned long val;
197 int bit; 196 int bit;
198 197
198 /* Make sure we first clear any bits that are zero when we read the register */
199 *bits &= ~*mask;
200
199 /* Create a mirrored mask */ 201 /* Create a mirrored mask */
200 bit = 0; 202 bit = -1;
201 while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) 203 while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
202 readmask |= bgpio_line2mask(gc, bit); 204 readmask |= bgpio_line2mask(gc, bit);
203 205
204 /* Read the register */ 206 /* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
208 * Mirror the result into the "bits" result, this will give line 0 210 * Mirror the result into the "bits" result, this will give line 0
209 * in bit 0 ... line 31 in bit 31 for a 32bit register. 211 * in bit 0 ... line 31 in bit 31 for a 32bit register.
210 */ 212 */
211 bit = 0; 213 bit = -1;
212 while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio) 214 while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
213 *bits |= bgpio_line2mask(gc, bit); 215 *bits |= bgpio_line2mask(gc, bit);
214 216
215 return 0; 217 return 0;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index babb7bd2ba59..a0a5f9730aa7 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -947,7 +947,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
947 { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), }, 947 { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
948 { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), }, 948 { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
949 949
950 { .compatible = "onsemi,pca9654", .data = OF_953X( 8, PCA_INT), }, 950 { .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
951 951
952 { .compatible = "exar,xra1202", .data = OF_953X( 8, 0), }, 952 { .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
953 { } 953 { }
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 23e771dba4c1..e85903eddc68 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
103 struct gpio_reg *r = to_gpio_reg(gc); 103 struct gpio_reg *r = to_gpio_reg(gc);
104 int irq = r->irqs[offset]; 104 int irq = r->irqs[offset];
105 105
106 if (irq >= 0 && r->irq.domain) 106 if (irq >= 0 && r->irqdomain)
107 irq = irq_find_mapping(r->irq.domain, irq); 107 irq = irq_find_mapping(r->irqdomain, irq);
108 108
109 return irq; 109 return irq;
110} 110}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8db47f671708..02fa8fe2292a 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
565 * than their parents, so it won't report false recursion. 565 * than their parents, so it won't report false recursion.
566 */ 566 */
567static struct lock_class_key gpio_lock_class; 567static struct lock_class_key gpio_lock_class;
568static struct lock_class_key gpio_request_class;
568 569
569static int tegra_gpio_probe(struct platform_device *pdev) 570static int tegra_gpio_probe(struct platform_device *pdev)
570{ 571{
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
670 671
671 bank = &tgi->bank_info[GPIO_BANK(gpio)]; 672 bank = &tgi->bank_info[GPIO_BANK(gpio)];
672 673
673 irq_set_lockdep_class(irq, &gpio_lock_class); 674 irq_set_lockdep_class(irq, &gpio_lock_class,
675 &gpio_request_class);
674 irq_set_chip_data(irq, bank); 676 irq_set_chip_data(irq, bank);
675 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); 677 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
676 } 678 }
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2313af82fad3..acd59113e08b 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
139 139
140static int xgene_gpio_sb_domain_activate(struct irq_domain *d, 140static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
141 struct irq_data *irq_data, 141 struct irq_data *irq_data,
142 bool early) 142 bool reserve)
143{ 143{
144 struct xgene_gpio_sb *priv = d->host_data; 144 struct xgene_gpio_sb *priv = d->host_data;
145 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); 145 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index eb4528c87c0b..d6f3d9ee1350 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
1074 } 1074 }
1075 1075
1076 if (!chip->names) 1076 if (!chip->names)
1077 devprop_gpiochip_set_names(chip); 1077 devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
1078 1078
1079 acpi_gpiochip_request_regions(acpi_gpio); 1079 acpi_gpiochip_request_regions(acpi_gpio);
1080 acpi_gpiochip_scan_gpios(acpi_gpio); 1080 acpi_gpiochip_scan_gpios(acpi_gpio);
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 27f383bda7d9..f748aa3e77f7 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -19,30 +19,27 @@
19/** 19/**
20 * devprop_gpiochip_set_names - Set GPIO line names using device properties 20 * devprop_gpiochip_set_names - Set GPIO line names using device properties
21 * @chip: GPIO chip whose lines should be named, if possible 21 * @chip: GPIO chip whose lines should be named, if possible
22 * @fwnode: Property Node containing the gpio-line-names property
22 * 23 *
23 * Looks for device property "gpio-line-names" and if it exists assigns 24 * Looks for device property "gpio-line-names" and if it exists assigns
24 * GPIO line names for the chip. The memory allocated for the assigned 25 * GPIO line names for the chip. The memory allocated for the assigned
25 * names belong to the underlying firmware node and should not be released 26 * names belong to the underlying firmware node and should not be released
26 * by the caller. 27 * by the caller.
27 */ 28 */
28void devprop_gpiochip_set_names(struct gpio_chip *chip) 29void devprop_gpiochip_set_names(struct gpio_chip *chip,
30 const struct fwnode_handle *fwnode)
29{ 31{
30 struct gpio_device *gdev = chip->gpiodev; 32 struct gpio_device *gdev = chip->gpiodev;
31 const char **names; 33 const char **names;
32 int ret, i; 34 int ret, i;
33 35
34 if (!chip->parent) { 36 ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
35 dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
36 return;
37 }
38
39 ret = device_property_read_string_array(chip->parent, "gpio-line-names",
40 NULL, 0); 37 NULL, 0);
41 if (ret < 0) 38 if (ret < 0)
42 return; 39 return;
43 40
44 if (ret != gdev->ngpio) { 41 if (ret != gdev->ngpio) {
45 dev_warn(chip->parent, 42 dev_warn(&gdev->dev,
46 "names %d do not match number of GPIOs %d\n", ret, 43 "names %d do not match number of GPIOs %d\n", ret,
47 gdev->ngpio); 44 gdev->ngpio);
48 return; 45 return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
52 if (!names) 49 if (!names)
53 return; 50 return;
54 51
55 ret = device_property_read_string_array(chip->parent, "gpio-line-names", 52 ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
56 names, gdev->ngpio); 53 names, gdev->ngpio);
57 if (ret < 0) { 54 if (ret < 0) {
58 dev_warn(chip->parent, "failed to read GPIO line names\n"); 55 dev_warn(&gdev->dev, "failed to read GPIO line names\n");
59 kfree(names); 56 kfree(names);
60 return; 57 return;
61 } 58 }
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e0d59e61b52f..72a0695d2ac3 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
493 493
494 /* If the chip defines names itself, these take precedence */ 494 /* If the chip defines names itself, these take precedence */
495 if (!chip->names) 495 if (!chip->names)
496 devprop_gpiochip_set_names(chip); 496 devprop_gpiochip_set_names(chip,
497 of_fwnode_handle(chip->of_node));
497 498
498 of_node_get(chip->of_node); 499 of_node_get(chip->of_node);
499 500
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index aad84a6306c4..14532d9576e4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
73 73
74static void gpiochip_free_hogs(struct gpio_chip *chip); 74static void gpiochip_free_hogs(struct gpio_chip *chip);
75static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 75static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
76 struct lock_class_key *key); 76 struct lock_class_key *lock_key,
77 struct lock_class_key *request_key);
77static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 78static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
78static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); 79static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
79static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); 80static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
1100} 1101}
1101 1102
1102int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 1103int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1103 struct lock_class_key *key) 1104 struct lock_class_key *lock_key,
1105 struct lock_class_key *request_key)
1104{ 1106{
1105 unsigned long flags; 1107 unsigned long flags;
1106 int status = 0; 1108 int status = 0;
@@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1246 if (status) 1248 if (status)
1247 goto err_remove_from_list; 1249 goto err_remove_from_list;
1248 1250
1249 status = gpiochip_add_irqchip(chip, key); 1251 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1250 if (status) 1252 if (status)
1251 goto err_remove_chip; 1253 goto err_remove_chip;
1252 1254
@@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
1632 * This lock class tells lockdep that GPIO irqs are in a different 1634 * This lock class tells lockdep that GPIO irqs are in a different
1633 * category than their parents, so it won't report false recursion. 1635 * category than their parents, so it won't report false recursion.
1634 */ 1636 */
1635 irq_set_lockdep_class(irq, chip->irq.lock_key); 1637 irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
1636 irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); 1638 irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
1637 /* Chips that use nested thread handlers have them marked */ 1639 /* Chips that use nested thread handlers have them marked */
1638 if (chip->irq.threaded) 1640 if (chip->irq.threaded)
@@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
1712/** 1714/**
1713 * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip 1715 * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
1714 * @gpiochip: the GPIO chip to add the IRQ chip to 1716 * @gpiochip: the GPIO chip to add the IRQ chip to
1715 * @lock_key: lockdep class 1717 * @lock_key: lockdep class for IRQ lock
1718 * @request_key: lockdep class for IRQ request
1716 */ 1719 */
1717static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1720static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
1718 struct lock_class_key *lock_key) 1721 struct lock_class_key *lock_key,
1722 struct lock_class_key *request_key)
1719{ 1723{
1720 struct irq_chip *irqchip = gpiochip->irq.chip; 1724 struct irq_chip *irqchip = gpiochip->irq.chip;
1721 const struct irq_domain_ops *ops; 1725 const struct irq_domain_ops *ops;
@@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
1753 gpiochip->to_irq = gpiochip_to_irq; 1757 gpiochip->to_irq = gpiochip_to_irq;
1754 gpiochip->irq.default_type = type; 1758 gpiochip->irq.default_type = type;
1755 gpiochip->irq.lock_key = lock_key; 1759 gpiochip->irq.lock_key = lock_key;
1760 gpiochip->irq.request_key = request_key;
1756 1761
1757 if (gpiochip->irq.domain_ops) 1762 if (gpiochip->irq.domain_ops)
1758 ops = gpiochip->irq.domain_ops; 1763 ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1850 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE 1855 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
1851 * to have the core avoid setting up any default type in the hardware. 1856 * to have the core avoid setting up any default type in the hardware.
1852 * @threaded: whether this irqchip uses a nested thread handler 1857 * @threaded: whether this irqchip uses a nested thread handler
1853 * @lock_key: lockdep class 1858 * @lock_key: lockdep class for IRQ lock
1859 * @request_key: lockdep class for IRQ request
1854 * 1860 *
1855 * This function closely associates a certain irqchip with a certain 1861 * This function closely associates a certain irqchip with a certain
1856 * gpiochip, providing an irq domain to translate the local IRQs to 1862 * gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
1872 irq_flow_handler_t handler, 1878 irq_flow_handler_t handler,
1873 unsigned int type, 1879 unsigned int type,
1874 bool threaded, 1880 bool threaded,
1875 struct lock_class_key *lock_key) 1881 struct lock_class_key *lock_key,
1882 struct lock_class_key *request_key)
1876{ 1883{
1877 struct device_node *of_node; 1884 struct device_node *of_node;
1878 1885
@@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
1913 gpiochip->irq.default_type = type; 1920 gpiochip->irq.default_type = type;
1914 gpiochip->to_irq = gpiochip_to_irq; 1921 gpiochip->to_irq = gpiochip_to_irq;
1915 gpiochip->irq.lock_key = lock_key; 1922 gpiochip->irq.lock_key = lock_key;
1923 gpiochip->irq.request_key = request_key;
1916 gpiochip->irq.domain = irq_domain_add_simple(of_node, 1924 gpiochip->irq.domain = irq_domain_add_simple(of_node,
1917 gpiochip->ngpio, first_irq, 1925 gpiochip->ngpio, first_irq,
1918 &gpiochip_domain_ops, gpiochip); 1926 &gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
1940#else /* CONFIG_GPIOLIB_IRQCHIP */ 1948#else /* CONFIG_GPIOLIB_IRQCHIP */
1941 1949
1942static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1950static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
1943 struct lock_class_key *key) 1951 struct lock_class_key *lock_key,
1952 struct lock_class_key *request_key)
1944{ 1953{
1945 return 0; 1954 return 0;
1946} 1955}
@@ -2884,6 +2893,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
2884EXPORT_SYMBOL_GPL(gpiod_set_raw_value); 2893EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2885 2894
2886/** 2895/**
2896 * gpiod_set_value_nocheck() - set a GPIO line value without checking
2897 * @desc: the descriptor to set the value on
2898 * @value: value to set
2899 *
2900 * This sets the value of a GPIO line backing a descriptor, applying
2901 * different semantic quirks like active low and open drain/source
2902 * handling.
2903 */
2904static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
2905{
2906 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
2907 value = !value;
2908 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2909 gpio_set_open_drain_value_commit(desc, value);
2910 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2911 gpio_set_open_source_value_commit(desc, value);
2912 else
2913 gpiod_set_raw_value_commit(desc, value);
2914}
2915
2916/**
2887 * gpiod_set_value() - assign a gpio's value 2917 * gpiod_set_value() - assign a gpio's value
2888 * @desc: gpio whose value will be assigned 2918 * @desc: gpio whose value will be assigned
2889 * @value: value to assign 2919 * @value: value to assign
@@ -2897,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2897void gpiod_set_value(struct gpio_desc *desc, int value) 2927void gpiod_set_value(struct gpio_desc *desc, int value)
2898{ 2928{
2899 VALIDATE_DESC_VOID(desc); 2929 VALIDATE_DESC_VOID(desc);
2900 /* Should be using gpiod_set_value_cansleep() */
2901 WARN_ON(desc->gdev->chip->can_sleep); 2930 WARN_ON(desc->gdev->chip->can_sleep);
2902 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2931 gpiod_set_value_nocheck(desc, value);
2903 value = !value;
2904 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2905 gpio_set_open_drain_value_commit(desc, value);
2906 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2907 gpio_set_open_source_value_commit(desc, value);
2908 else
2909 gpiod_set_raw_value_commit(desc, value);
2910} 2932}
2911EXPORT_SYMBOL_GPL(gpiod_set_value); 2933EXPORT_SYMBOL_GPL(gpiod_set_value);
2912 2934
@@ -3234,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
3234{ 3256{
3235 might_sleep_if(extra_checks); 3257 might_sleep_if(extra_checks);
3236 VALIDATE_DESC_VOID(desc); 3258 VALIDATE_DESC_VOID(desc);
3237 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 3259 gpiod_set_value_nocheck(desc, value);
3238 value = !value;
3239 gpiod_set_raw_value_commit(desc, value);
3240} 3260}
3241EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); 3261EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
3242 3262
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index af48322839c3..6c44d1652139 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
228 return desc - &desc->gdev->descs[0]; 228 return desc - &desc->gdev->descs[0];
229} 229}
230 230
231void devprop_gpiochip_set_names(struct gpio_chip *chip); 231void devprop_gpiochip_set_names(struct gpio_chip *chip,
232 const struct fwnode_handle *fwnode);
232 233
233/* With descriptor prefix */ 234/* With descriptor prefix */
234 235
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8a08e81ee90d..d4176a3fb706 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the ACP, which is a sub-component 23# Makefile for the ACP, which is a sub-component
3# of AMDSOC/AMDGPU drm driver. 24# of AMDSOC/AMDGPU drm driver.
4# It provides the HW control for ACP related functionalities. 25# It provides the HW control for ACP related functionalities.
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 78d609123420..90202cf4cd1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,4 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
2# 22#
3# Makefile for the drm device driver. This driver provides support for the 23# Makefile for the drm device driver. This driver provides support for the
4# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 24# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 6c570d4e4516..f8edf5483f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
2 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index f337c316ec2c..06525f2c36c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,4 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
2#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 24#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _AMDGPU_TRACE_H_ 25#define _AMDGPU_TRACE_H_
4 26
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index da43813d67a4..5aeb5f8816f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2467 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 2467 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2468 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 2468 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2469 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 2469 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2470 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */ 2470 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2471 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ 2471 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2472 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 2472 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2473 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 2473 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 7bb0bc0ca3d6..342c2d937b17 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -1,4 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
2# 22#
3# Makefile for Heterogenous System Architecture support for AMD GPU devices 23# Makefile for Heterogenous System Architecture support for AMD GPU devices
4# 24#
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 8ba37dd9cf7f..c27c81cdeed3 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the DAL (Display Abstract Layer), which is a sub-component 23# Makefile for the DAL (Display Abstract Layer), which is a sub-component
3# of the AMDGPU drm driver. 24# of the AMDGPU drm driver.
4# It provides the HW control for display related functionalities. 25# It provides the HW control for display related functionalities.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 4699e47aa76b..2b72009844f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'dm' sub-component of DAL. 23# Makefile for the 'dm' sub-component of DAL.
3# It provides the control and status of dm blocks. 24# It provides the control and status of dm blocks.
4 25
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f71fe6d2ddda..bb5fa895fb64 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2336 const struct dm_connector_state *dm_state) 2336 const struct dm_connector_state *dm_state)
2337{ 2337{
2338 struct drm_display_mode *preferred_mode = NULL; 2338 struct drm_display_mode *preferred_mode = NULL;
2339 const struct drm_connector *drm_connector; 2339 struct drm_connector *drm_connector;
2340 struct dc_stream_state *stream = NULL; 2340 struct dc_stream_state *stream = NULL;
2341 struct drm_display_mode mode = *drm_mode; 2341 struct drm_display_mode mode = *drm_mode;
2342 bool native_mode_found = false; 2342 bool native_mode_found = false;
@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2355 2355
2356 if (!aconnector->dc_sink) { 2356 if (!aconnector->dc_sink) {
2357 /* 2357 /*
2358 * Exclude MST from creating fake_sink 2358 * Create dc_sink when necessary to MST
2359 * TODO: need to enable MST into fake_sink feature 2359 * Don't apply fake_sink to MST
2360 */ 2360 */
2361 if (aconnector->mst_port) 2361 if (aconnector->mst_port) {
2362 goto stream_create_fail; 2362 dm_dp_mst_dc_sink_create(drm_connector);
2363 goto mst_dc_sink_create_done;
2364 }
2363 2365
2364 if (create_fake_sink(aconnector)) 2366 if (create_fake_sink(aconnector))
2365 goto stream_create_fail; 2367 goto stream_create_fail;
@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2410stream_create_fail: 2412stream_create_fail:
2411dm_state_null: 2413dm_state_null:
2412drm_connector_null: 2414drm_connector_null:
2415mst_dc_sink_create_done:
2413 return stream; 2416 return stream;
2414} 2417}
2415 2418
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 117521c6a6ed..0230250a1164 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
189 struct mutex hpd_lock; 189 struct mutex hpd_lock;
190 190
191 bool fake_enable; 191 bool fake_enable;
192
193 bool mst_connected;
192}; 194};
193 195
194#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 196#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f8efb98b1fa7..638c2c2b5cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
185 return ret; 185 return ret;
186} 186}
187 187
188void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
189{
190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
191 struct edid *edid;
192 struct dc_sink *dc_sink;
193 struct dc_sink_init_data init_params = {
194 .link = aconnector->dc_link,
195 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
196
197 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
198
199 if (!edid) {
200 drm_mode_connector_update_edid_property(
201 &aconnector->base,
202 NULL);
203 return;
204 }
205
206 aconnector->edid = edid;
207
208 dc_sink = dc_link_add_remote_sink(
209 aconnector->dc_link,
210 (uint8_t *)aconnector->edid,
211 (aconnector->edid->extensions + 1) * EDID_LENGTH,
212 &init_params);
213
214 dc_sink->priv = aconnector;
215 aconnector->dc_sink = dc_sink;
216
217 amdgpu_dm_add_sink_to_freesync_module(
218 connector, aconnector->edid);
219
220 drm_mode_connector_update_edid_property(
221 &aconnector->base, aconnector->edid);
222}
223
188static int dm_dp_mst_get_modes(struct drm_connector *connector) 224static int dm_dp_mst_get_modes(struct drm_connector *connector)
189{ 225{
190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 226 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
311 drm_mode_connector_set_path_property(connector, pathprop); 347 drm_mode_connector_set_path_property(connector, pathprop);
312 348
313 drm_connector_list_iter_end(&conn_iter); 349 drm_connector_list_iter_end(&conn_iter);
350 aconnector->mst_connected = true;
314 return &aconnector->base; 351 return &aconnector->base;
315 } 352 }
316 } 353 }
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
363 */ 400 */
364 amdgpu_dm_connector_funcs_reset(connector); 401 amdgpu_dm_connector_funcs_reset(connector);
365 402
403 aconnector->mst_connected = true;
404
366 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", 405 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
367 aconnector, connector->base.id, aconnector->mst_port); 406 aconnector, connector->base.id, aconnector->mst_port);
368 407
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
394 drm_mode_connector_update_edid_property( 433 drm_mode_connector_update_edid_property(
395 &aconnector->base, 434 &aconnector->base,
396 NULL); 435 NULL);
436
437 aconnector->mst_connected = false;
397} 438}
398 439
399static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) 440static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
404 drm_kms_helper_hotplug_event(dev); 445 drm_kms_helper_hotplug_event(dev);
405} 446}
406 447
448static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
449{
450 mutex_lock(&connector->dev->mode_config.mutex);
451 drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
452 mutex_unlock(&connector->dev->mode_config.mutex);
453}
454
407static void dm_dp_mst_register_connector(struct drm_connector *connector) 455static void dm_dp_mst_register_connector(struct drm_connector *connector)
408{ 456{
409 struct drm_device *dev = connector->dev; 457 struct drm_device *dev = connector->dev;
410 struct amdgpu_device *adev = dev->dev_private; 458 struct amdgpu_device *adev = dev->dev_private;
459 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
411 460
412 if (adev->mode_info.rfbdev) 461 if (adev->mode_info.rfbdev)
413 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); 462 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
416 465
417 drm_connector_register(connector); 466 drm_connector_register(connector);
418 467
468 if (aconnector->mst_connected)
469 dm_dp_mst_link_status_reset(connector);
419} 470}
420 471
421static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 472static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..8cf51da26657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
31 31
32void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 32void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
33 struct amdgpu_dm_connector *aconnector); 33 struct amdgpu_dm_connector *aconnector);
34void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
34 35
35#endif 36#endif
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 4f83e3011743..aed538a4d1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for Display Core (dc) component. 23# Makefile for Display Core (dc) component.
3# 24#
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 43c5ccdeeb72..6af8c8a9ad80 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'utils' sub-component of DAL. 23# Makefile for the 'utils' sub-component of DAL.
3# It provides the general basic services required by other DAL 24# It provides the general basic services required by other DAL
4# subcomponents. 25# subcomponents.
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
index 6ec815dce9cc..239e86bbec5a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'bios' sub-component of DAL. 23# Makefile for the 'bios' sub-component of DAL.
3# It provides the parsing and executing controls for atom bios image. 24# It provides the parsing and executing controls for atom bios image.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 41ef35995b02..7959e382ed28 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'calcs' sub-component of DAL. 23# Makefile for the 'calcs' sub-component of DAL.
3# It calculates Bandwidth and Watermarks values for HW programming 24# It calculates Bandwidth and Watermarks values for HW programming
4# 25#
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3dce35e66b09..b142629a1058 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
900 v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; 900 v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
901 v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; 901 v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
902 v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; 902 v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
903 /*
904 * Spreadsheet doesn't handle taps_c is one properly,
905 * need to force Chroma to always be scaled to pass
906 * bandwidth validation.
907 */
908 if (v->override_hta_pschroma[input_idx] == 1)
909 v->override_hta_pschroma[input_idx] = 2;
910 if (v->override_vta_pschroma[input_idx] == 1)
911 v->override_vta_pschroma[input_idx] = 2;
903 v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; 912 v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
904 } 913 }
905 if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) 914 if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 6acee5426e4b..43c7a7fddb83 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -1,4 +1,26 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23/*
2 * dc_debug.c 24 * dc_debug.c
3 * 25 *
4 * Created on: Nov 3, 2016 26 * Created on: Nov 3, 2016
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e27ed4a45265..42a111b9505d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
1801 link->link_enc->funcs->disable_output(link->link_enc, signal, link); 1801 link->link_enc->funcs->disable_output(link->link_enc, signal, link);
1802} 1802}
1803 1803
1804bool dp_active_dongle_validate_timing( 1804static bool dp_active_dongle_validate_timing(
1805 const struct dc_crtc_timing *timing, 1805 const struct dc_crtc_timing *timing,
1806 const struct dc_dongle_caps *dongle_caps) 1806 const struct dc_dongle_caps *dongle_caps)
1807{ 1807{
@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
1833 /* Check Color Depth and Pixel Clock */ 1833 /* Check Color Depth and Pixel Clock */
1834 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 1834 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
1835 required_pix_clk /= 2; 1835 required_pix_clk /= 2;
1836 else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
1837 required_pix_clk = required_pix_clk * 2 / 3;
1836 1838
1837 switch (timing->display_color_depth) { 1839 switch (timing->display_color_depth) {
1838 case COLOR_DEPTH_666: 1840 case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b7422d3b71ef..928895809867 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 0d84b2a1ccfd..90e81f7ba919 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -1,4 +1,26 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23/*
2 * dc_helper.c 24 * dc_helper.c
3 * 25 *
4 * Created on: Aug 30, 2016 26 * Created on: Aug 30, 2016
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8abec0bed379..11401fd8e535 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for common 'dce' logic 23# Makefile for common 'dce' logic
3# HW object file under this folder follow similar pattern for HW programming 24# HW object file under this folder follow similar pattern for HW programming
4# - register offset and/or shift + mask stored in the dec_hw struct 25# - register offset and/or shift + mask stored in the dec_hw struct
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index ea40870624b3..a822d4e2a169 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 90911258bdb3..3ea43e2a9450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
index de8fdf438f9b..2f366d66635d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -1,4 +1,27 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24/*
2 * dce100_resource.h 25 * dce100_resource.h
3 * 26 *
4 * Created on: 2016-01-20 27 * Created on: 2016-01-20
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index 98d956e2f218..d564c0eb8b04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 07ff8d2faf3f..d844fadcd56f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
2866 int num_planes, 2866 int num_planes,
2867 struct dc_state *context) 2867 struct dc_state *context)
2868{ 2868{
2869 int i, be_idx; 2869 int i;
2870 2870
2871 if (num_planes == 0) 2871 if (num_planes == 0)
2872 return; 2872 return;
2873 2873
2874 be_idx = -1;
2875 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2874 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2876 if (stream == context->res_ctx.pipe_ctx[i].stream) { 2875 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2877 be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst; 2876 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2878 break; 2877
2878 if (stream == pipe_ctx->stream) {
2879 if (!pipe_ctx->top_pipe &&
2880 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
2881 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
2879 } 2882 }
2880 } 2883 }
2881 2884
@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
2895 context->stream_count); 2898 context->stream_count);
2896 2899
2897 dce110_program_front_end_for_pipe(dc, pipe_ctx); 2900 dce110_program_front_end_for_pipe(dc, pipe_ctx);
2901
2902 dc->hwss.update_plane_addr(dc, pipe_ctx);
2903
2898 program_surface_visibility(dc, pipe_ctx); 2904 program_surface_visibility(dc, pipe_ctx);
2899 2905
2900 } 2906 }
2907
2908 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2909 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2910 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
2911
2912 if ((stream == pipe_ctx->stream) &&
2913 (!pipe_ctx->top_pipe) &&
2914 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
2915 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2916 }
2901} 2917}
2902 2918
2903static void dce110_power_down_fe(struct dc *dc, int fe_idx) 2919static void dce110_power_down_fe(struct dc *dc, int fe_idx)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 61adb8174ce0..42df17f9aa8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright 2012-15 Advanced Micro Devices, Inc. 2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 07d9303d5477..59b4cd329715 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -1,3 +1,26 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
1#include "dm_services.h" 24#include "dm_services.h"
2 25
3/* include DCE11 register header files */ 26/* include DCE11 register header files */
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 265ac4310d85..8e090446d511 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1779b963525c..37db1f8d45ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
@@ -8,4 +29,4 @@ dce120_hw_sequencer.o
8 29
9AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120)) 30AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
10 31
11AMD_DISPLAY_FILES += $(AMD_DAL_DCE120) \ No newline at end of file 32AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index c1105895e5fa..bc388aa4b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'controller' sub-component of DAL. 23# Makefile for the 'controller' sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index ebeb88283a14..f565a6042970 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for DCN. 23# Makefile for DCN.
3 24
4DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ 25DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 74e7c82bdc76..a9d55d0dd69e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
159 scl_data->taps.h_taps = 1; 159 scl_data->taps.h_taps = 1;
160 if (IDENTITY_RATIO(scl_data->ratios.vert)) 160 if (IDENTITY_RATIO(scl_data->ratios.vert))
161 scl_data->taps.v_taps = 1; 161 scl_data->taps.v_taps = 1;
162 /* 162 if (IDENTITY_RATIO(scl_data->ratios.horz_c))
163 * Spreadsheet doesn't handle taps_c is one properly, 163 scl_data->taps.h_taps_c = 1;
164 * need to force Chroma to always be scaled to pass 164 if (IDENTITY_RATIO(scl_data->ratios.vert_c))
165 * bandwidth validation. 165 scl_data->taps.v_taps_c = 1;
166 */
167 } 166 }
168 167
169 return true; 168 return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1aba47..34daf895f848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
1360 1360
1361void dpp1_cm_set_output_csc_default( 1361void dpp1_cm_set_output_csc_default(
1362 struct dpp *dpp_base, 1362 struct dpp *dpp_base,
1363 const struct default_adjustment *default_adjust); 1363 enum dc_color_space colorspace);
1364 1364
1365void dpp1_cm_set_gamut_remap( 1365void dpp1_cm_set_gamut_remap(
1366 struct dpp *dpp, 1366 struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c244bf5..ed1216b53465 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
225 225
226void dpp1_cm_set_output_csc_default( 226void dpp1_cm_set_output_csc_default(
227 struct dpp *dpp_base, 227 struct dpp *dpp_base,
228 const struct default_adjustment *default_adjust) 228 enum dc_color_space colorspace)
229{ 229{
230 230
231 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 231 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
232 uint32_t ocsc_mode = 0; 232 uint32_t ocsc_mode = 0;
233 233
234 if (default_adjust != NULL) { 234 switch (colorspace) {
235 switch (default_adjust->out_color_space) {
236 case COLOR_SPACE_SRGB: 235 case COLOR_SPACE_SRGB:
237 case COLOR_SPACE_2020_RGB_FULLRANGE: 236 case COLOR_SPACE_2020_RGB_FULLRANGE:
238 ocsc_mode = 0; 237 ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
253 case COLOR_SPACE_UNKNOWN: 252 case COLOR_SPACE_UNKNOWN:
254 default: 253 default:
255 break; 254 break;
256 }
257 } 255 }
258 256
259 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); 257 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c3b454..05dc01e54531 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2097 tbl_entry.color_space = color_space; 2097 tbl_entry.color_space = color_space;
2098 //tbl_entry.regval = matrix; 2098 //tbl_entry.regval = matrix;
2099 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry); 2099 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
2100 } else {
2101 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2100 } 2102 }
2101} 2103}
2102static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) 2104static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 87bab8e8139f..3488af2b5786 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'utils' sub-component of DAL. 23# Makefile for the 'utils' sub-component of DAL.
3# It provides the general basic services required by other DAL 24# It provides the general basic services required by other DAL
4# subcomponents. 25# subcomponents.
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 70d01a9e9676..562ee189d780 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'gpio' sub-component of DAL. 23# Makefile for the 'gpio' sub-component of DAL.
3# It provides the control and status of HW GPIO pins. 24# It provides the control and status of HW GPIO pins.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index 55603400acd9..352885cb4d07 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'i2c' sub-component of DAL. 23# Makefile for the 'i2c' sub-component of DAL.
3# It provides the control and status of HW i2c engine of the adapter. 24# It provides the control and status of HW i2c engine of the adapter.
4 25
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a68460edcd..9420dfb94d39 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
64 64
65 void (*opp_set_csc_default)( 65 void (*opp_set_csc_default)(
66 struct dpp *dpp, 66 struct dpp *dpp,
67 const struct default_adjustment *default_adjust); 67 enum dc_color_space colorspace);
68 68
69 void (*opp_set_csc_adjustment)( 69 void (*opp_set_csc_adjustment)(
70 struct dpp *dpp, 70 struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 3d33bcda7059..498b7f05c5ca 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -1,4 +1,26 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23/*
2 * link_encoder.h 24 * link_encoder.h
3 * 25 *
4 * Created on: Oct 6, 2015 26 * Created on: Oct 6, 2015
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 3050afe8e8a9..b5db1692393c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -1,4 +1,26 @@
1/* 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23/*
2 * stream_encoder.h 24 * stream_encoder.h
3 * 25 *
4 */ 26 */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index c7e93f7223bd..498515aad4a5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'audio' sub-component of DAL. 23# Makefile for the 'audio' sub-component of DAL.
3# It provides the control and status of HW adapter resources, 24# It provides the control and status of HW adapter resources,
4# that are global for the ASIC and sharable between pipes. 25# that are global for the ASIC and sharable between pipes.
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
index fc0b7318d9cc..07326d244d50 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the virtual sub-component of DAL. 23# Makefile for the virtual sub-component of DAL.
3# It provides the control and status of HW CRTC block. 24# It provides the control and status of HW CRTC block.
4 25
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
index db8e0ff6d7a9..fb9a499780e8 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for the 'freesync' sub-module of DAL. 23# Makefile for the 'freesync' sub-module of DAL.
3# 24#
4 25
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
index 87cd7009e80f..690243001e1a 100644
--- a/drivers/gpu/drm/amd/lib/Makefile
+++ b/drivers/gpu/drm/amd/lib/Makefile
@@ -1,4 +1,25 @@
1# 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
22#
2# Makefile for AMD library routines, which are used by AMD driver 23# Makefile for AMD library routines, which are used by AMD driver
3# components. 24# components.
4# 25#
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 8c55c6e254d9..231785a9e24c 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,4 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
2 22
3subdir-ccflags-y += \ 23subdir-ccflags-y += \
4 -I$(FULL_AMD_PATH)/powerplay/inc/ \ 24 -I$(FULL_AMD_PATH)/powerplay/inc/ \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 824fb6fe54ae..a212c27f2e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -1,4 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
2# 22#
3# Makefile for the 'hw manager' sub-component of powerplay. 23# Makefile for the 'hw manager' sub-component of powerplay.
4# It provides the hardware management services for the driver. 24# It provides the hardware management services for the driver.
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index 67fae834bc67..8de384bf9a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -1,4 +1,26 @@
1// SPDX-License-Identifier: GPL-2.0 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
2#include "pp_overdriver.h" 24#include "pp_overdriver.h"
3#include <linux/errno.h> 25#include <linux/errno.h>
4 26
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
index 08cd70c75d8b..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -1,4 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
2#ifndef SMU72_H 24#ifndef SMU72_H
3#define SMU72_H 25#define SMU72_H
4 26
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
index b2edbc0c3c4d..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -1,4 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
2#ifndef SMU72_DISCRETE_H 24#ifndef SMU72_DISCRETE_H
3#define SMU72_DISCRETE_H 25#define SMU72_DISCRETE_H
4 26
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 30d3089d7dba..98e701e4f553 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -1,4 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1#
2# Copyright 2017 Advanced Micro Devices, Inc.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# the rights to use, copy, modify, merge, publish, distribute, sublicense,
8# and/or sell copies of the Software, and to permit persons to whom the
9# Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice shall be included in
12# all copies or substantial portions of the Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20# OTHER DEALINGS IN THE SOFTWARE.
21#
2# 22#
3# Makefile for the 'smu manager' sub-component of powerplay. 23# Makefile for the 'smu manager' sub-component of powerplay.
4# It provides the smu management services for the driver. 24# It provides the smu management services for the driver.
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 283a0dc25e84..07129e6c31a9 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -1,4 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
2#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 24#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _GPU_SCHED_TRACE_H_ 25#define _GPU_SCHED_TRACE_H_
4 26
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..a0f4d2a2a481 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
168void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb, 168void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
169 int x, int y) 169 int x, int y)
170{ 170{
171 const struct drm_format_info *format = fb->format;
172 unsigned int num_planes = format->num_planes;
171 u32 addr = drm_fb_obj(fb)->dev_addr; 173 u32 addr = drm_fb_obj(fb)->dev_addr;
172 int num_planes = fb->format->num_planes;
173 int i; 174 int i;
174 175
175 if (num_planes > 3) 176 if (num_planes > 3)
176 num_planes = 3; 177 num_planes = 3;
177 178
178 for (i = 0; i < num_planes; i++) 179 addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
180 x * format->cpp[0];
181
182 y /= format->vsub;
183 x /= format->hsub;
184
185 for (i = 1; i < num_planes; i++)
179 addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] + 186 addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
180 x * fb->format->cpp[i]; 187 x * format->cpp[i];
181 for (; i < 3; i++) 188 for (; i < 3; i++)
182 addrs[i] = 0; 189 addrs[i] = 0;
183} 190}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
744 if (plane->fb) 751 if (plane->fb)
745 drm_framebuffer_put(plane->fb); 752 drm_framebuffer_put(plane->fb);
746 753
747 /* Power down the Y/U/V FIFOs */
748 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
749
750 /* Power down most RAMs and FIFOs if this is the primary plane */ 754 /* Power down most RAMs and FIFOs if this is the primary plane */
751 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 755 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
752 sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | 756 sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
753 CFG_PDWN32x32 | CFG_PDWN64x66; 757 CFG_PDWN32x32 | CFG_PDWN64x66;
754 dma_ctrl0_mask = CFG_GRA_ENA; 758 dma_ctrl0_mask = CFG_GRA_ENA;
755 } else { 759 } else {
760 /* Power down the Y/U/V FIFOs */
761 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
756 dma_ctrl0_mask = CFG_DMA_ENA; 762 dma_ctrl0_mask = CFG_DMA_ENA;
757 } 763 }
758 764
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1225 1231
1226 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", 1232 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
1227 dcrtc); 1233 dcrtc);
1228 if (ret < 0) { 1234 if (ret < 0)
1229 kfree(dcrtc); 1235 goto err_crtc;
1230 return ret;
1231 }
1232 1236
1233 if (dcrtc->variant->init) { 1237 if (dcrtc->variant->init) {
1234 ret = dcrtc->variant->init(dcrtc, dev); 1238 ret = dcrtc->variant->init(dcrtc, dev);
1235 if (ret) { 1239 if (ret)
1236 kfree(dcrtc); 1240 goto err_crtc;
1237 return ret;
1238 }
1239 } 1241 }
1240 1242
1241 /* Ensure AXI pipeline is enabled */ 1243 /* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1246 dcrtc->crtc.port = port; 1248 dcrtc->crtc.port = port;
1247 1249
1248 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 1250 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
1249 if (!primary) 1251 if (!primary) {
1250 return -ENOMEM; 1252 ret = -ENOMEM;
1253 goto err_crtc;
1254 }
1251 1255
1252 ret = armada_drm_plane_init(primary); 1256 ret = armada_drm_plane_init(primary);
1253 if (ret) { 1257 if (ret) {
1254 kfree(primary); 1258 kfree(primary);
1255 return ret; 1259 goto err_crtc;
1256 } 1260 }
1257 1261
1258 ret = drm_universal_plane_init(drm, &primary->base, 0, 1262 ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1263 DRM_PLANE_TYPE_PRIMARY, NULL); 1267 DRM_PLANE_TYPE_PRIMARY, NULL);
1264 if (ret) { 1268 if (ret) {
1265 kfree(primary); 1269 kfree(primary);
1266 return ret; 1270 goto err_crtc;
1267 } 1271 }
1268 1272
1269 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, 1273 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1282 1286
1283err_crtc_init: 1287err_crtc_init:
1284 primary->base.funcs->destroy(&primary->base); 1288 primary->base.funcs->destroy(&primary->base);
1289err_crtc:
1290 kfree(dcrtc);
1291
1285 return ret; 1292 return ret;
1286} 1293}
1287 1294
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..bfd3514fbe9b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
42}; 42};
43 43
44struct armada_plane_state { 44struct armada_plane_state {
45 u16 src_x;
46 u16 src_y;
45 u32 src_hw; 47 u32 src_hw;
46 u32 dst_hw; 48 u32 dst_hw;
47 u32 dst_yx; 49 u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..aba947696178 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
99{ 99{
100 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane); 100 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
101 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 101 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
102 const struct drm_format_info *format;
102 struct drm_rect src = { 103 struct drm_rect src = {
103 .x1 = src_x, 104 .x1 = src_x,
104 .y1 = src_y, 105 .y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
117 }; 118 };
118 uint32_t val, ctrl0; 119 uint32_t val, ctrl0;
119 unsigned idx = 0; 120 unsigned idx = 0;
120 bool visible; 121 bool visible, fb_changed;
121 int ret; 122 int ret;
122 123
123 trace_armada_ovl_plane_update(plane, crtc, fb, 124 trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
138 if (!visible) 139 if (!visible)
139 ctrl0 &= ~CFG_DMA_ENA; 140 ctrl0 &= ~CFG_DMA_ENA;
140 141
142 /*
143 * Shifting a YUV packed format image by one pixel causes the U/V
144 * planes to swap. Compensate for it by also toggling the UV swap.
145 */
146 format = fb->format;
147 if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
148 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
149
150 fb_changed = plane->fb != fb ||
151 dplane->base.state.src_x != src.x1 >> 16 ||
152 dplane->base.state.src_y != src.y1 >> 16;
153
141 if (!dcrtc->plane) { 154 if (!dcrtc->plane) {
142 dcrtc->plane = plane; 155 dcrtc->plane = plane;
143 armada_ovl_update_attr(&dplane->prop, dcrtc); 156 armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
145 158
146 /* FIXME: overlay on an interlaced display */ 159 /* FIXME: overlay on an interlaced display */
147 /* Just updating the position/size? */ 160 /* Just updating the position/size? */
148 if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) { 161 if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
149 val = (drm_rect_height(&src) & 0xffff0000) | 162 val = (drm_rect_height(&src) & 0xffff0000) |
150 drm_rect_width(&src) >> 16; 163 drm_rect_width(&src) >> 16;
151 dplane->base.state.src_hw = val; 164 dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
169 if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0) 182 if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
170 armada_drm_plane_work_cancel(dcrtc, &dplane->base); 183 armada_drm_plane_work_cancel(dcrtc, &dplane->base);
171 184
172 if (plane->fb != fb) { 185 if (fb_changed) {
173 u32 addrs[3], pixel_format; 186 u32 addrs[3];
174 int num_planes, hsub;
175 187
176 /* 188 /*
177 * Take a reference on the new framebuffer - we want to 189 * Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
182 if (plane->fb) 194 if (plane->fb)
183 armada_ovl_retire_fb(dplane, plane->fb); 195 armada_ovl_retire_fb(dplane, plane->fb);
184 196
185 src_y = src.y1 >> 16; 197 dplane->base.state.src_y = src_y = src.y1 >> 16;
186 src_x = src.x1 >> 16; 198 dplane->base.state.src_x = src_x = src.x1 >> 16;
187 199
188 armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y); 200 armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
189 201
190 pixel_format = fb->format->format;
191 hsub = drm_format_horz_chroma_subsampling(pixel_format);
192 num_planes = fb->format->num_planes;
193
194 /*
195 * Annoyingly, shifting a YUYV-format image by one pixel
196 * causes the U/V planes to toggle. Toggle the UV swap.
197 * (Unfortunately, this causes momentary colour flickering.)
198 */
199 if (src_x & (hsub - 1) && num_planes == 1)
200 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
201
202 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0], 202 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
203 LCD_SPU_DMA_START_ADDR_Y0); 203 LCD_SPU_DMA_START_ADDR_Y0);
204 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1], 204 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5dd3f1cd074a..a8905049b9da 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
946 return 0; 946 return 0;
947 } 947 }
948 948
949 pm_runtime_get_sync(dp->dev);
949 edid = drm_get_edid(connector, &dp->aux.ddc); 950 edid = drm_get_edid(connector, &dp->aux.ddc);
951 pm_runtime_put(dp->dev);
950 if (edid) { 952 if (edid) {
951 drm_mode_connector_update_edid_property(&dp->connector, 953 drm_mode_connector_update_edid_property(&dp->connector,
952 edid); 954 edid);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 25f4b2e9a44f..9ae236036e32 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,6 +152,25 @@ static void drm_connector_free(struct kref *kref)
152 connector->funcs->destroy(connector); 152 connector->funcs->destroy(connector);
153} 153}
154 154
155void drm_connector_free_work_fn(struct work_struct *work)
156{
157 struct drm_connector *connector, *n;
158 struct drm_device *dev =
159 container_of(work, struct drm_device, mode_config.connector_free_work);
160 struct drm_mode_config *config = &dev->mode_config;
161 unsigned long flags;
162 struct llist_node *freed;
163
164 spin_lock_irqsave(&config->connector_list_lock, flags);
165 freed = llist_del_all(&config->connector_free_list);
166 spin_unlock_irqrestore(&config->connector_list_lock, flags);
167
168 llist_for_each_entry_safe(connector, n, freed, free_node) {
169 drm_mode_object_unregister(dev, &connector->base);
170 connector->funcs->destroy(connector);
171 }
172}
173
155/** 174/**
156 * drm_connector_init - Init a preallocated connector 175 * drm_connector_init - Init a preallocated connector
157 * @dev: DRM device 176 * @dev: DRM device
@@ -529,6 +548,25 @@ void drm_connector_list_iter_begin(struct drm_device *dev,
529} 548}
530EXPORT_SYMBOL(drm_connector_list_iter_begin); 549EXPORT_SYMBOL(drm_connector_list_iter_begin);
531 550
551/*
552 * Extra-safe connector put function that works in any context. Should only be
553 * used from the connector_iter functions, where we never really expect to
554 * actually release the connector when dropping our final reference.
555 */
556static void
557__drm_connector_put_safe(struct drm_connector *conn)
558{
559 struct drm_mode_config *config = &conn->dev->mode_config;
560
561 lockdep_assert_held(&config->connector_list_lock);
562
563 if (!refcount_dec_and_test(&conn->base.refcount.refcount))
564 return;
565
566 llist_add(&conn->free_node, &config->connector_free_list);
567 schedule_work(&config->connector_free_work);
568}
569
532/** 570/**
533 * drm_connector_list_iter_next - return next connector 571 * drm_connector_list_iter_next - return next connector
534 * @iter: connectr_list iterator 572 * @iter: connectr_list iterator
@@ -558,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
558 596
559 /* loop until it's not a zombie connector */ 597 /* loop until it's not a zombie connector */
560 } while (!kref_get_unless_zero(&iter->conn->base.refcount)); 598 } while (!kref_get_unless_zero(&iter->conn->base.refcount));
561 spin_unlock_irqrestore(&config->connector_list_lock, flags);
562 599
563 if (old_conn) 600 if (old_conn)
564 drm_connector_put(old_conn); 601 __drm_connector_put_safe(old_conn);
602 spin_unlock_irqrestore(&config->connector_list_lock, flags);
565 603
566 return iter->conn; 604 return iter->conn;
567} 605}
@@ -578,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
578 */ 616 */
579void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) 617void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
580{ 618{
619 struct drm_mode_config *config = &iter->dev->mode_config;
620 unsigned long flags;
621
581 iter->dev = NULL; 622 iter->dev = NULL;
582 if (iter->conn) 623 if (iter->conn) {
583 drm_connector_put(iter->conn); 624 spin_lock_irqsave(&config->connector_list_lock, flags);
625 __drm_connector_put_safe(iter->conn);
626 spin_unlock_irqrestore(&config->connector_list_lock, flags);
627 }
584 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); 628 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
585} 629}
586EXPORT_SYMBOL(drm_connector_list_iter_end); 630EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1207,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
1207 if (edid) 1251 if (edid)
1208 size = EDID_LENGTH * (1 + edid->extensions); 1252 size = EDID_LENGTH * (1 + edid->extensions);
1209 1253
1254 /* Set the display info, using edid if available, otherwise
1255 * reseting the values to defaults. This duplicates the work
1256 * done in drm_add_edid_modes, but that function is not
1257 * consistently called before this one in all drivers and the
1258 * computation is cheap enough that it seems better to
1259 * duplicate it rather than attempt to ensure some arbitrary
1260 * ordering of calls.
1261 */
1262 if (edid)
1263 drm_add_display_info(connector, edid);
1264 else
1265 drm_reset_display_info(connector);
1266
1210 drm_object_property_set_value(&connector->base, 1267 drm_object_property_set_value(&connector->base,
1211 dev->mode_config.non_desktop_property, 1268 dev->mode_config.non_desktop_property,
1212 connector->display_info.non_desktop); 1269 connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb8841778c..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
142 uint64_t value); 142 uint64_t value);
143int drm_connector_create_standard_properties(struct drm_device *dev); 143int drm_connector_create_standard_properties(struct drm_device *dev);
144const char *drm_get_connector_force_name(enum drm_connector_force force); 144const char *drm_get_connector_force_name(enum drm_connector_force force);
145void drm_connector_free_work_fn(struct work_struct *work);
145 146
146/* IOCTL */ 147/* IOCTL */
147int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 148int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dfe14763871..cb487148359a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
1731 * 1731 *
1732 * Returns true if @vendor is in @edid, false otherwise 1732 * Returns true if @vendor is in @edid, false otherwise
1733 */ 1733 */
1734static bool edid_vendor(struct edid *edid, const char *vendor) 1734static bool edid_vendor(const struct edid *edid, const char *vendor)
1735{ 1735{
1736 char edid_vendor[3]; 1736 char edid_vendor[3];
1737 1737
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
1749 * 1749 *
1750 * This tells subsequent routines what fixes they need to apply. 1750 * This tells subsequent routines what fixes they need to apply.
1751 */ 1751 */
1752static u32 edid_get_quirks(struct edid *edid) 1752static u32 edid_get_quirks(const struct edid *edid)
1753{ 1753{
1754 const struct edid_quirk *quirk; 1754 const struct edid_quirk *quirk;
1755 int i; 1755 int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2813/* 2813/*
2814 * Search EDID for CEA extension block. 2814 * Search EDID for CEA extension block.
2815 */ 2815 */
2816static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) 2816static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
2817{ 2817{
2818 u8 *edid_ext = NULL; 2818 u8 *edid_ext = NULL;
2819 int i; 2819 int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
2835 return edid_ext; 2835 return edid_ext;
2836} 2836}
2837 2837
2838static u8 *drm_find_cea_extension(struct edid *edid) 2838static u8 *drm_find_cea_extension(const struct edid *edid)
2839{ 2839{
2840 return drm_find_edid_extension(edid, CEA_EXT); 2840 return drm_find_edid_extension(edid, CEA_EXT);
2841} 2841}
2842 2842
2843static u8 *drm_find_displayid_extension(struct edid *edid) 2843static u8 *drm_find_displayid_extension(const struct edid *edid)
2844{ 2844{
2845 return drm_find_edid_extension(edid, DISPLAYID_EXT); 2845 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2846} 2846}
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
4363} 4363}
4364 4364
4365static void drm_parse_cea_ext(struct drm_connector *connector, 4365static void drm_parse_cea_ext(struct drm_connector *connector,
4366 struct edid *edid) 4366 const struct edid *edid)
4367{ 4367{
4368 struct drm_display_info *info = &connector->display_info; 4368 struct drm_display_info *info = &connector->display_info;
4369 const u8 *edid_ext; 4369 const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
4397 } 4397 }
4398} 4398}
4399 4399
4400static void drm_add_display_info(struct drm_connector *connector, 4400/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
4401 struct edid *edid, u32 quirks) 4401 * all of the values which would have been set from EDID
4402 */
4403void
4404drm_reset_display_info(struct drm_connector *connector)
4402{ 4405{
4403 struct drm_display_info *info = &connector->display_info; 4406 struct drm_display_info *info = &connector->display_info;
4404 4407
4408 info->width_mm = 0;
4409 info->height_mm = 0;
4410
4411 info->bpc = 0;
4412 info->color_formats = 0;
4413 info->cea_rev = 0;
4414 info->max_tmds_clock = 0;
4415 info->dvi_dual = false;
4416
4417 info->non_desktop = 0;
4418}
4419EXPORT_SYMBOL_GPL(drm_reset_display_info);
4420
4421u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
4422{
4423 struct drm_display_info *info = &connector->display_info;
4424
4425 u32 quirks = edid_get_quirks(edid);
4426
4405 info->width_mm = edid->width_cm * 10; 4427 info->width_mm = edid->width_cm * 10;
4406 info->height_mm = edid->height_cm * 10; 4428 info->height_mm = edid->height_cm * 10;
4407 4429
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
4414 4436
4415 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4437 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4416 4438
4439 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
4440
4417 if (edid->revision < 3) 4441 if (edid->revision < 3)
4418 return; 4442 return quirks;
4419 4443
4420 if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) 4444 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
4421 return; 4445 return quirks;
4422 4446
4423 drm_parse_cea_ext(connector, edid); 4447 drm_parse_cea_ext(connector, edid);
4424 4448
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
4438 4462
4439 /* Only defined for 1.4 with digital displays */ 4463 /* Only defined for 1.4 with digital displays */
4440 if (edid->revision < 4) 4464 if (edid->revision < 4)
4441 return; 4465 return quirks;
4442 4466
4443 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { 4467 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
4444 case DRM_EDID_DIGITAL_DEPTH_6: 4468 case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
4473 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; 4497 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
4474 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) 4498 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
4475 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 4499 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
4500 return quirks;
4476} 4501}
4502EXPORT_SYMBOL_GPL(drm_add_display_info);
4477 4503
4478static int validate_displayid(u8 *displayid, int length, int idx) 4504static int validate_displayid(u8 *displayid, int length, int idx)
4479{ 4505{
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
4627 return 0; 4653 return 0;
4628 } 4654 }
4629 4655
4630 quirks = edid_get_quirks(edid);
4631
4632 /* 4656 /*
4633 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. 4657 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
4634 * To avoid multiple parsing of same block, lets parse that map 4658 * To avoid multiple parsing of same block, lets parse that map
4635 * from sink info, before parsing CEA modes. 4659 * from sink info, before parsing CEA modes.
4636 */ 4660 */
4637 drm_add_display_info(connector, edid, quirks); 4661 quirks = drm_add_display_info(connector, edid);
4638 4662
4639 /* 4663 /*
4640 * EDID spec says modes should be preferred in this order: 4664 * EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a1eff4..1402c0e71b03 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
220 220
221 mutex_lock(&dev->mode_config.idr_mutex); 221 mutex_lock(&dev->mode_config.idr_mutex);
222 222
223 /* Insert the new lessee into the tree */
224 id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
225 if (id < 0) {
226 error = id;
227 goto out_lessee;
228 }
229
230 lessee->lessee_id = id;
231 lessee->lessor = drm_master_get(lessor);
232 list_add_tail(&lessee->lessee_list, &lessor->lessees);
233
234 idr_for_each_entry(leases, entry, object) { 223 idr_for_each_entry(leases, entry, object) {
235 error = 0; 224 error = 0;
236 if (!idr_find(&dev->mode_config.crtc_idr, object)) 225 if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
246 } 235 }
247 } 236 }
248 237
238 /* Insert the new lessee into the tree */
239 id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
240 if (id < 0) {
241 error = id;
242 goto out_lessee;
243 }
244
245 lessee->lessee_id = id;
246 lessee->lessor = drm_master_get(lessor);
247 list_add_tail(&lessee->lessee_list, &lessor->lessees);
248
249 /* Move the leases over */ 249 /* Move the leases over */
250 lessee->leases = *leases; 250 lessee->leases = *leases;
251 DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor); 251 DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
254 return lessee; 254 return lessee;
255 255
256out_lessee: 256out_lessee:
257 drm_master_put(&lessee);
258
259 mutex_unlock(&dev->mode_config.idr_mutex); 257 mutex_unlock(&dev->mode_config.idr_mutex);
260 258
259 drm_master_put(&lessee);
260
261 return ERR_PTR(error); 261 return ERR_PTR(error);
262} 262}
263 263
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8ea74bc..c3c79ee6119e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
575 */ 575 */
576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
577{ 577{
578 struct drm_mm *mm = old->mm;
579
578 DRM_MM_BUG_ON(!old->allocated); 580 DRM_MM_BUG_ON(!old->allocated);
579 581
580 *new = *old; 582 *new = *old;
581 583
582 list_replace(&old->node_list, &new->node_list); 584 list_replace(&old->node_list, &new->node_list);
583 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); 585 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
584 586
585 if (drm_mm_hole_follows(old)) { 587 if (drm_mm_hole_follows(old)) {
586 list_replace(&old->hole_stack, &new->hole_stack); 588 list_replace(&old->hole_stack, &new->hole_stack);
587 rb_replace_node(&old->rb_hole_size, 589 rb_replace_node(&old->rb_hole_size,
588 &new->rb_hole_size, 590 &new->rb_hole_size,
589 &old->mm->holes_size); 591 &mm->holes_size);
590 rb_replace_node(&old->rb_hole_addr, 592 rb_replace_node(&old->rb_hole_addr,
591 &new->rb_hole_addr, 593 &new->rb_hole_addr,
592 &old->mm->holes_addr); 594 &mm->holes_addr);
593 } 595 }
594 596
595 old->allocated = false; 597 old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cda8bfab6d3b..256de7313612 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
382 ida_init(&dev->mode_config.connector_ida); 382 ida_init(&dev->mode_config.connector_ida);
383 spin_lock_init(&dev->mode_config.connector_list_lock); 383 spin_lock_init(&dev->mode_config.connector_list_lock);
384 384
385 init_llist_head(&dev->mode_config.connector_free_list);
386 INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
387
385 drm_mode_create_standard_properties(dev); 388 drm_mode_create_standard_properties(dev);
386 389
387 /* Just to be sure */ 390 /* Just to be sure */
@@ -431,6 +434,8 @@ void drm_mode_config_cleanup(struct drm_device *dev)
431 drm_connector_put(connector); 434 drm_connector_put(connector);
432 } 435 }
433 drm_connector_list_iter_end(&conn_iter); 436 drm_connector_list_iter_end(&conn_iter);
437 /* connector_iter drops references in a work item. */
438 flush_work(&dev->mode_config.connector_free_work);
434 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { 439 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
435 drm_connector_list_iter_begin(dev, &conn_iter); 440 drm_connector_list_iter_begin(dev, &conn_iter);
436 drm_for_each_connector_iter(connector, &conn_iter) 441 drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 37a93cdffb4a..2c90519576a3 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
558} 558}
559 559
560/* 560/*
561 * setplane_internal - setplane handler for internal callers 561 * __setplane_internal - setplane handler for internal callers
562 * 562 *
563 * Note that we assume an extra reference has already been taken on fb. If the 563 * This function will take a reference on the new fb for the plane
564 * update fails, this reference will be dropped before return; if it succeeds, 564 * on success.
565 * the previous framebuffer (if any) will be unreferenced instead.
566 * 565 *
567 * src_{x,y,w,h} are provided in 16.16 fixed point format 566 * src_{x,y,w,h} are provided in 16.16 fixed point format
568 */ 567 */
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
630 if (!ret) { 629 if (!ret) {
631 plane->crtc = crtc; 630 plane->crtc = crtc;
632 plane->fb = fb; 631 plane->fb = fb;
633 fb = NULL; 632 drm_framebuffer_get(plane->fb);
634 } else { 633 } else {
635 plane->old_fb = NULL; 634 plane->old_fb = NULL;
636 } 635 }
637 636
638out: 637out:
639 if (fb)
640 drm_framebuffer_put(fb);
641 if (plane->old_fb) 638 if (plane->old_fb)
642 drm_framebuffer_put(plane->old_fb); 639 drm_framebuffer_put(plane->old_fb);
643 plane->old_fb = NULL; 640 plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
685 struct drm_plane *plane; 682 struct drm_plane *plane;
686 struct drm_crtc *crtc = NULL; 683 struct drm_crtc *crtc = NULL;
687 struct drm_framebuffer *fb = NULL; 684 struct drm_framebuffer *fb = NULL;
685 int ret;
688 686
689 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 687 if (!drm_core_check_feature(dev, DRIVER_MODESET))
690 return -EINVAL; 688 return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
717 } 715 }
718 } 716 }
719 717
720 /* 718 ret = setplane_internal(plane, crtc, fb,
721 * setplane_internal will take care of deref'ing either the old or new 719 plane_req->crtc_x, plane_req->crtc_y,
722 * framebuffer depending on success. 720 plane_req->crtc_w, plane_req->crtc_h,
723 */ 721 plane_req->src_x, plane_req->src_y,
724 return setplane_internal(plane, crtc, fb, 722 plane_req->src_w, plane_req->src_h);
725 plane_req->crtc_x, plane_req->crtc_y, 723
726 plane_req->crtc_w, plane_req->crtc_h, 724 if (fb)
727 plane_req->src_x, plane_req->src_y, 725 drm_framebuffer_put(fb);
728 plane_req->src_w, plane_req->src_h); 726
727 return ret;
729} 728}
730 729
731static int drm_mode_cursor_universal(struct drm_crtc *crtc, 730static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
788 src_h = fb->height << 16; 787 src_h = fb->height << 16;
789 } 788 }
790 789
791 /*
792 * setplane_internal will take care of deref'ing either the old or new
793 * framebuffer depending on success.
794 */
795 ret = __setplane_internal(crtc->cursor, crtc, fb, 790 ret = __setplane_internal(crtc->cursor, crtc, fb,
796 crtc_x, crtc_y, crtc_w, crtc_h, 791 crtc_x, crtc_y, crtc_w, crtc_h,
797 0, 0, src_w, src_h, ctx); 792 0, 0, src_w, src_h, ctx);
793
794 if (fb)
795 drm_framebuffer_put(fb);
798 796
799 /* Update successful; save new cursor position, if necessary */ 797 /* Update successful; save new cursor position, if necessary */
800 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { 798 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f776fc1cc543..cb4d09c70fd4 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
369 .release = drm_syncobj_file_release, 369 .release = drm_syncobj_file_release,
370}; 370};
371 371
372static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
373{
374 struct file *file = anon_inode_getfile("syncobj_file",
375 &drm_syncobj_file_fops,
376 syncobj, 0);
377 if (IS_ERR(file))
378 return PTR_ERR(file);
379
380 drm_syncobj_get(syncobj);
381 if (cmpxchg(&syncobj->file, NULL, file)) {
382 /* lost the race */
383 fput(file);
384 }
385
386 return 0;
387}
388
389int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 372int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
390{ 373{
391 int ret; 374 struct file *file;
392 int fd; 375 int fd;
393 376
394 fd = get_unused_fd_flags(O_CLOEXEC); 377 fd = get_unused_fd_flags(O_CLOEXEC);
395 if (fd < 0) 378 if (fd < 0)
396 return fd; 379 return fd;
397 380
398 if (!syncobj->file) { 381 file = anon_inode_getfile("syncobj_file",
399 ret = drm_syncobj_alloc_file(syncobj); 382 &drm_syncobj_file_fops,
400 if (ret) { 383 syncobj, 0);
401 put_unused_fd(fd); 384 if (IS_ERR(file)) {
402 return ret; 385 put_unused_fd(fd);
403 } 386 return PTR_ERR(file);
404 } 387 }
405 fd_install(fd, syncobj->file); 388
389 drm_syncobj_get(syncobj);
390 fd_install(fd, file);
391
406 *p_fd = fd; 392 *p_fd = fd;
407 return 0; 393 return 0;
408} 394}
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
422 return ret; 408 return ret;
423} 409}
424 410
425static struct drm_syncobj *drm_syncobj_fdget(int fd)
426{
427 struct file *file = fget(fd);
428
429 if (!file)
430 return NULL;
431 if (file->f_op != &drm_syncobj_file_fops)
432 goto err;
433
434 return file->private_data;
435err:
436 fput(file);
437 return NULL;
438};
439
440static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 411static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
441 int fd, u32 *handle) 412 int fd, u32 *handle)
442{ 413{
443 struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); 414 struct drm_syncobj *syncobj;
415 struct file *file;
444 int ret; 416 int ret;
445 417
446 if (!syncobj) 418 file = fget(fd);
419 if (!file)
447 return -EINVAL; 420 return -EINVAL;
448 421
422 if (file->f_op != &drm_syncobj_file_fops) {
423 fput(file);
424 return -EINVAL;
425 }
426
449 /* take a reference to put in the idr */ 427 /* take a reference to put in the idr */
428 syncobj = file->private_data;
450 drm_syncobj_get(syncobj); 429 drm_syncobj_get(syncobj);
451 430
452 idr_preload(GFP_KERNEL); 431 idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
455 spin_unlock(&file_private->syncobj_table_lock); 434 spin_unlock(&file_private->syncobj_table_lock);
456 idr_preload_end(); 435 idr_preload_end();
457 436
458 if (ret < 0) { 437 if (ret > 0) {
459 fput(syncobj->file); 438 *handle = ret;
460 return ret; 439 ret = 0;
461 } 440 } else
462 *handle = ret; 441 drm_syncobj_put(syncobj);
463 return 0; 442
443 fput(file);
444 return ret;
464} 445}
465 446
466static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, 447static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 82b72425a42f..27e423b87266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -37,8 +37,6 @@
37#define DRIVER_MAJOR 1 37#define DRIVER_MAJOR 1
38#define DRIVER_MINOR 0 38#define DRIVER_MINOR 0
39 39
40static struct device *exynos_drm_get_dma_device(void);
41
42int exynos_atomic_check(struct drm_device *dev, 40int exynos_atomic_check(struct drm_device *dev,
43 struct drm_atomic_state *state) 41 struct drm_atomic_state *state)
44{ 42{
@@ -148,7 +146,7 @@ static struct drm_driver exynos_drm_driver = {
148 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 146 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
149 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 147 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
150 .gem_prime_export = drm_gem_prime_export, 148 .gem_prime_export = drm_gem_prime_export,
151 .gem_prime_import = drm_gem_prime_import, 149 .gem_prime_import = exynos_drm_gem_prime_import,
152 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table, 150 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
153 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, 151 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
154 .gem_prime_vmap = exynos_drm_gem_prime_vmap, 152 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
@@ -301,6 +299,27 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
301 return match ?: ERR_PTR(-ENODEV); 299 return match ?: ERR_PTR(-ENODEV);
302} 300}
303 301
302static struct device *exynos_drm_get_dma_device(void)
303{
304 int i;
305
306 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
307 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
308 struct device *dev;
309
310 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
311 continue;
312
313 while ((dev = bus_find_device(&platform_bus_type, NULL,
314 &info->driver->driver,
315 (void *)platform_bus_type.match))) {
316 put_device(dev);
317 return dev;
318 }
319 }
320 return NULL;
321}
322
304static int exynos_drm_bind(struct device *dev) 323static int exynos_drm_bind(struct device *dev)
305{ 324{
306 struct exynos_drm_private *private; 325 struct exynos_drm_private *private;
@@ -469,27 +488,6 @@ static struct platform_driver exynos_drm_platform_driver = {
469 }, 488 },
470}; 489};
471 490
472static struct device *exynos_drm_get_dma_device(void)
473{
474 int i;
475
476 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
477 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
478 struct device *dev;
479
480 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
481 continue;
482
483 while ((dev = bus_find_device(&platform_bus_type, NULL,
484 &info->driver->driver,
485 (void *)platform_bus_type.match))) {
486 put_device(dev);
487 return dev;
488 }
489 }
490 return NULL;
491}
492
493static void exynos_drm_unregister_devices(void) 491static void exynos_drm_unregister_devices(void)
494{ 492{
495 int i; 493 int i;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c6847fa708fa..589d465a7f88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -194,11 +194,6 @@ struct drm_exynos_file_private {
194/* 194/*
195 * Exynos drm private structure. 195 * Exynos drm private structure.
196 * 196 *
197 * @da_start: start address to device address space.
198 * with iommu, device address space starts from this address
199 * otherwise default one.
200 * @da_space_size: size of device address space.
201 * if 0 then default value is used for it.
202 * @pending: the crtcs that have pending updates to finish 197 * @pending: the crtcs that have pending updates to finish
203 * @lock: protect access to @pending 198 * @lock: protect access to @pending
204 * @wait: wait an atomic commit to finish 199 * @wait: wait an atomic commit to finish
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 077de014d610..11cc01b47bc0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
247 if (IS_ERR(exynos_gem)) 247 if (IS_ERR(exynos_gem))
248 return exynos_gem; 248 return exynos_gem;
249 249
250 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
251 /*
252 * when no IOMMU is available, all allocated buffers are
253 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
254 */
255 flags &= ~EXYNOS_BO_NONCONTIG;
256 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
257 }
258
250 /* set memory type and cache attribute from user side. */ 259 /* set memory type and cache attribute from user side. */
251 exynos_gem->flags = flags; 260 exynos_gem->flags = flags;
252 261
@@ -506,6 +515,12 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
506} 515}
507 516
508/* low-level interface prime helpers */ 517/* low-level interface prime helpers */
518struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
519 struct dma_buf *dma_buf)
520{
521 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
522}
523
509struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 524struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
510{ 525{
511 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 526 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e86d1a9518c3..5a4c7de80f65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -117,6 +117,8 @@ int exynos_drm_gem_fault(struct vm_fault *vmf);
117int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 117int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
118 118
119/* low-level interface prime helpers */ 119/* low-level interface prime helpers */
120struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
121 struct dma_buf *dma_buf);
120struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj); 122struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
121struct drm_gem_object * 123struct drm_gem_object *
122exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 124exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ab19545d59a1..4ce2e6bd0680 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
208 return 0; 208 return 0;
209} 209}
210 210
211static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
212 unsigned int offset, void *p_data, unsigned int bytes)
213{
214 u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
215 u32 new = *(u32 *)(p_data);
216
217 if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
218 /* We don't have rom, return size of 0. */
219 *pval = 0;
220 else
221 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
222 return 0;
223}
224
211static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, 225static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
212 void *p_data, unsigned int bytes) 226 void *p_data, unsigned int bytes)
213{ 227{
@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
300 } 314 }
301 315
302 switch (rounddown(offset, 4)) { 316 switch (rounddown(offset, 4)) {
317 case PCI_ROM_ADDRESS:
318 if (WARN_ON(!IS_ALIGNED(offset, 4)))
319 return -EINVAL;
320 return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
321
303 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: 322 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
304 if (WARN_ON(!IS_ALIGNED(offset, 4))) 323 if (WARN_ON(!IS_ALIGNED(offset, 4)))
305 return -EINVAL; 324 return -EINVAL;
@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
375 pci_resource_len(gvt->dev_priv->drm.pdev, 0); 394 pci_resource_len(gvt->dev_priv->drm.pdev, 0);
376 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = 395 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
377 pci_resource_len(gvt->dev_priv->drm.pdev, 2); 396 pci_resource_len(gvt->dev_priv->drm.pdev, 2);
397
398 memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
378} 399}
379 400
380/** 401/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..49af94627c8a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2777} 2777}
2778 2778
2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, 2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2780 unsigned int opcode, int rings) 2780 unsigned int opcode, unsigned long rings)
2781{ 2781{
2782 struct cmd_info *info = NULL; 2782 struct cmd_info *info = NULL;
2783 unsigned int ring; 2783 unsigned int ring;
2784 2784
2785 for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) { 2785 for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
2786 info = find_cmd_entry(gvt, opcode, ring); 2786 info = find_cmd_entry(gvt, opcode, ring);
2787 if (info) 2787 if (info)
2788 break; 2788 break;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 355120865efd..309f3fa6794a 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
266 /* Clear host CRT status, so guest couldn't detect this host CRT. */ 266 /* Clear host CRT status, so guest couldn't detect this host CRT. */
267 if (IS_BROADWELL(dev_priv)) 267 if (IS_BROADWELL(dev_priv))
268 vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; 268 vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
269
270 vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
269} 271}
270 272
271static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) 273static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
282static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 284static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
283 int type, unsigned int resolution) 285 int type, unsigned int resolution)
284{ 286{
285 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
286 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 287 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
287 288
288 if (WARN_ON(resolution >= GVT_EDID_NUM)) 289 if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
308 port->type = type; 309 port->type = type;
309 310
310 emulate_monitor_status_change(vgpu); 311 emulate_monitor_status_change(vgpu);
311 vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 312
312 return 0; 313 return 0;
313} 314}
314 315
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e331142badb..64d67ff9bf08 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1359 return ret; 1359 return ret;
1360 } else { 1360 } else {
1361 if (!test_bit(index, spt->post_shadow_bitmap)) { 1361 if (!test_bit(index, spt->post_shadow_bitmap)) {
1362 int type = spt->shadow_page.type;
1363
1362 ppgtt_get_shadow_entry(spt, &se, index); 1364 ppgtt_get_shadow_entry(spt, &se, index);
1363 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1365 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1364 if (ret) 1366 if (ret)
1365 return ret; 1367 return ret;
1368 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1369 ppgtt_set_shadow_entry(spt, &se, index);
1366 } 1370 }
1367
1368 ppgtt_set_post_shadow(spt, index); 1371 ppgtt_set_post_shadow(spt, index);
1369 } 1372 }
1370 1373
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 44cd5ff5e97d..1f840f6b81bb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
137 return 0; 137 return 0;
138} 138}
139 139
140static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) 140/**
141 * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
142 * @gvt: a GVT device
143 * @offset: register offset
144 *
145 * Returns:
146 * Ring ID on success, negative error code if failed.
147 */
148int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
149 unsigned int offset)
141{ 150{
142 enum intel_engine_id id; 151 enum intel_engine_id id;
143 struct intel_engine_cs *engine; 152 struct intel_engine_cs *engine;
144 153
145 reg &= ~GENMASK(11, 0); 154 offset &= ~GENMASK(11, 0);
146 for_each_engine(engine, gvt->dev_priv, id) { 155 for_each_engine(engine, gvt->dev_priv, id) {
147 if (engine->mmio_base == reg) 156 if (engine->mmio_base == offset)
148 return id; 157 return id;
149 } 158 }
150 return -1; 159 return -ENODEV;
151} 160}
152 161
153#define offset_to_fence_num(offset) \ 162#define offset_to_fence_num(offset) \
@@ -1398,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1398static int mmio_read_from_hw(struct intel_vgpu *vgpu, 1407static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1399 unsigned int offset, void *p_data, unsigned int bytes) 1408 unsigned int offset, void *p_data, unsigned int bytes)
1400{ 1409{
1401 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1410 struct intel_gvt *gvt = vgpu->gvt;
1411 struct drm_i915_private *dev_priv = gvt->dev_priv;
1412 int ring_id;
1413 u32 ring_base;
1414
1415 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
1416 /**
1417 * Read HW reg in following case
1418 * a. the offset isn't a ring mmio
1419 * b. the offset's ring is running on hw.
1420 * c. the offset is ring time stamp mmio
1421 */
1422 if (ring_id >= 0)
1423 ring_base = dev_priv->engine[ring_id]->mmio_base;
1424
1425 if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
1426 offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
1427 offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
1428 mmio_hw_access_pre(dev_priv);
1429 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1430 mmio_hw_access_post(dev_priv);
1431 }
1402 1432
1403 mmio_hw_access_pre(dev_priv);
1404 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1405 mmio_hw_access_post(dev_priv);
1406 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 1433 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1407} 1434}
1408 1435
1409static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 1436static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1410 void *p_data, unsigned int bytes) 1437 void *p_data, unsigned int bytes)
1411{ 1438{
1412 int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); 1439 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1413 struct intel_vgpu_execlist *execlist; 1440 struct intel_vgpu_execlist *execlist;
1414 u32 data = *(u32 *)p_data; 1441 u32 data = *(u32 *)p_data;
1415 int ret = 0; 1442 int ret = 0;
@@ -1436,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1436 void *p_data, unsigned int bytes) 1463 void *p_data, unsigned int bytes)
1437{ 1464{
1438 u32 data = *(u32 *)p_data; 1465 u32 data = *(u32 *)p_data;
1439 int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); 1466 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1440 bool enable_execlist; 1467 bool enable_execlist;
1441 1468
1442 write_vreg(vgpu, offset, p_data, bytes); 1469 write_vreg(vgpu, offset, p_data, bytes);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 32cd64ddad26..dbc04ad2c7a1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
65 struct hlist_node node; 65 struct hlist_node node;
66}; 66};
67 67
68int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
69 unsigned int reg);
68unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); 70unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
69bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); 71bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
70 72
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 3ac1dc97a7a0..69f8f0d155b9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131 return i915_gem_context_force_single_submission(req->ctx); 131 return i915_gem_context_force_single_submission(req->ctx);
132} 132}
133 133
134static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
135{
136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
137 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
138 i915_reg_t reg;
139
140 reg = RING_INSTDONE(ring_base);
141 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
142 reg = RING_ACTHD(ring_base);
143 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
144 reg = RING_ACTHD_UDW(ring_base);
145 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
146}
147
134static int shadow_context_status_change(struct notifier_block *nb, 148static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data) 149 unsigned long action, void *data)
136{ 150{
@@ -175,9 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
175 atomic_set(&workload->shadow_ctx_active, 1); 189 atomic_set(&workload->shadow_ctx_active, 1);
176 break; 190 break;
177 case INTEL_CONTEXT_SCHEDULE_OUT: 191 case INTEL_CONTEXT_SCHEDULE_OUT:
178 case INTEL_CONTEXT_SCHEDULE_PREEMPTED: 192 save_ring_hw_state(workload->vgpu, ring_id);
179 atomic_set(&workload->shadow_ctx_active, 0); 193 atomic_set(&workload->shadow_ctx_active, 0);
180 break; 194 break;
195 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
196 save_ring_hw_state(workload->vgpu, ring_id);
197 break;
181 default: 198 default:
182 WARN_ON(1); 199 WARN_ON(1);
183 return NOTIFY_OK; 200 return NOTIFY_OK;
@@ -740,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
740 if (IS_ERR(vgpu->shadow_ctx)) 757 if (IS_ERR(vgpu->shadow_ctx))
741 return PTR_ERR(vgpu->shadow_ctx); 758 return PTR_ERR(vgpu->shadow_ctx);
742 759
760 if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
761 vgpu->shadow_ctx->priority = INT_MAX;
762
743 vgpu->shadow_ctx->engine[RCS].initialised = true; 763 vgpu->shadow_ctx->engine[RCS].initialised = true;
744 764
745 bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); 765 bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c582b6..e143004e66d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
2368 */ 2368 */
2369 struct workqueue_struct *wq; 2369 struct workqueue_struct *wq;
2370 2370
2371 /* ordered wq for modesets */
2372 struct workqueue_struct *modeset_wq;
2373
2371 /* Display functions */ 2374 /* Display functions */
2372 struct drm_i915_display_funcs display; 2375 struct drm_i915_display_funcs display;
2373 2376
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3a140eedfc83..5cfba89ed586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
330 * must wait for all rendering to complete to the object (as unbinding 330 * must wait for all rendering to complete to the object (as unbinding
331 * must anyway), and retire the requests. 331 * must anyway), and retire the requests.
332 */ 332 */
333 ret = i915_gem_object_wait(obj, 333 ret = i915_gem_object_set_to_cpu_domain(obj, false);
334 I915_WAIT_INTERRUPTIBLE |
335 I915_WAIT_LOCKED |
336 I915_WAIT_ALL,
337 MAX_SCHEDULE_TIMEOUT,
338 NULL);
339 if (ret) 334 if (ret)
340 return ret; 335 return ret;
341 336
342 i915_gem_retire_requests(to_i915(obj->base.dev));
343
344 while ((vma = list_first_entry_or_null(&obj->vma_list, 337 while ((vma = list_first_entry_or_null(&obj->vma_list,
345 struct i915_vma, 338 struct i915_vma,
346 obj_link))) { 339 obj_link))) {
@@ -474,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
474 struct drm_i915_gem_request *rq; 467 struct drm_i915_gem_request *rq;
475 struct intel_engine_cs *engine; 468 struct intel_engine_cs *engine;
476 469
477 if (!dma_fence_is_i915(fence)) 470 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
478 return; 471 return;
479 472
480 rq = to_request(fence); 473 rq = to_request(fence);
@@ -4712,17 +4705,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4712 * state. Fortunately, the kernel_context is disposable and we do 4705 * state. Fortunately, the kernel_context is disposable and we do
4713 * not rely on its state. 4706 * not rely on its state.
4714 */ 4707 */
4715 ret = i915_gem_switch_to_kernel_context(dev_priv); 4708 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
4716 if (ret) 4709 ret = i915_gem_switch_to_kernel_context(dev_priv);
4717 goto err_unlock; 4710 if (ret)
4711 goto err_unlock;
4718 4712
4719 ret = i915_gem_wait_for_idle(dev_priv, 4713 ret = i915_gem_wait_for_idle(dev_priv,
4720 I915_WAIT_INTERRUPTIBLE | 4714 I915_WAIT_INTERRUPTIBLE |
4721 I915_WAIT_LOCKED); 4715 I915_WAIT_LOCKED);
4722 if (ret && ret != -EIO) 4716 if (ret && ret != -EIO)
4723 goto err_unlock; 4717 goto err_unlock;
4724 4718
4725 assert_kernel_context_is_current(dev_priv); 4719 assert_kernel_context_is_current(dev_priv);
4720 }
4726 i915_gem_contexts_lost(dev_priv); 4721 i915_gem_contexts_lost(dev_priv);
4727 mutex_unlock(&dev->struct_mutex); 4722 mutex_unlock(&dev->struct_mutex);
4728 4723
@@ -4946,8 +4941,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
4946{ 4941{
4947 int ret; 4942 int ret;
4948 4943
4949 mutex_lock(&dev_priv->drm.struct_mutex);
4950
4951 /* 4944 /*
4952 * We need to fallback to 4K pages since gvt gtt handling doesn't 4945 * We need to fallback to 4K pages since gvt gtt handling doesn't
4953 * support huge page entries - we will need to check either hypervisor 4946 * support huge page entries - we will need to check either hypervisor
@@ -4967,18 +4960,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
4967 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 4960 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4968 } 4961 }
4969 4962
4963 ret = i915_gem_init_userptr(dev_priv);
4964 if (ret)
4965 return ret;
4966
4970 /* This is just a security blanket to placate dragons. 4967 /* This is just a security blanket to placate dragons.
4971 * On some systems, we very sporadically observe that the first TLBs 4968 * On some systems, we very sporadically observe that the first TLBs
4972 * used by the CS may be stale, despite us poking the TLB reset. If 4969 * used by the CS may be stale, despite us poking the TLB reset. If
4973 * we hold the forcewake during initialisation these problems 4970 * we hold the forcewake during initialisation these problems
4974 * just magically go away. 4971 * just magically go away.
4975 */ 4972 */
4973 mutex_lock(&dev_priv->drm.struct_mutex);
4976 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4974 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4977 4975
4978 ret = i915_gem_init_userptr(dev_priv);
4979 if (ret)
4980 goto out_unlock;
4981
4982 ret = i915_gem_init_ggtt(dev_priv); 4976 ret = i915_gem_init_ggtt(dev_priv);
4983 if (ret) 4977 if (ret)
4984 goto out_unlock; 4978 goto out_unlock;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 68a58cce6ab1..7923dfd9963c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2951,9 +2951,6 @@ enum i915_power_well_id {
2951#define ILK_DPFC_CHICKEN _MMIO(0x43224) 2951#define ILK_DPFC_CHICKEN _MMIO(0x43224)
2952#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) 2952#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
2953#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) 2953#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
2954#define GLK_SKIP_SEG_EN (1<<12)
2955#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
2956#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
2957#define ILK_FBC_RT_BASE _MMIO(0x2128) 2954#define ILK_FBC_RT_BASE _MMIO(0x2128)
2958#define ILK_FBC_RT_VALID (1<<0) 2955#define ILK_FBC_RT_VALID (1<<0)
2959#define SNB_FBC_FRONT_BUFFER (1<<1) 2956#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -6980,6 +6977,7 @@ enum {
6980#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 6977#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
6981 6978
6982#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) 6979#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
6980#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
6983#define MASK_WAKEMEM (1<<13) 6981#define MASK_WAKEMEM (1<<13)
6984 6982
6985#define SKL_DFSM _MMIO(0x51000) 6983#define SKL_DFSM _MMIO(0x51000)
@@ -7029,6 +7027,8 @@ enum {
7029#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) 7027#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
7030#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 7028#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
7031 7029
7030#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
7031
7032#define GEN7_L3SQCREG1 _MMIO(0xB010) 7032#define GEN7_L3SQCREG1 _MMIO(0xB010)
7033#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 7033#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
7034 7034
@@ -8525,6 +8525,7 @@ enum skl_power_gate {
8525#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 8525#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
8526#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 8526#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
8527#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) 8527#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
8528#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
8528#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) 8529#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
8529#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 8530#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
8530#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) 8531#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index e8ca67a129d2..ac236b88c99c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
367 struct dma_fence *dma; 367 struct dma_fence *dma;
368 struct timer_list timer; 368 struct timer_list timer;
369 struct irq_work work; 369 struct irq_work work;
370 struct rcu_head rcu;
370}; 371};
371 372
372static void timer_i915_sw_fence_wake(struct timer_list *t) 373static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
406 del_timer_sync(&cb->timer); 407 del_timer_sync(&cb->timer);
407 dma_fence_put(cb->dma); 408 dma_fence_put(cb->dma);
408 409
409 kfree(cb); 410 kfree_rcu(cb, rcu);
410} 411}
411 412
412int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 413int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5f8b9f1f40f1..bcbc7abe6693 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
186 struct intel_wait *wait, *n, *first; 186 struct intel_wait *wait, *n, *first;
187 187
188 if (!b->irq_armed) 188 if (!b->irq_armed)
189 return; 189 goto wakeup_signaler;
190 190
191 /* We only disarm the irq when we are idle (all requests completed), 191 /* We only disarm the irq when we are idle (all requests completed),
192 * so if the bottom-half remains asleep, it missed the request 192 * so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
208 b->waiters = RB_ROOT; 208 b->waiters = RB_ROOT;
209 209
210 spin_unlock_irq(&b->rb_lock); 210 spin_unlock_irq(&b->rb_lock);
211
212 /*
213 * The signaling thread may be asleep holding a reference to a request,
214 * that had its signaling cancelled prior to being preempted. We need
215 * to kick the signaler, just in case, to release any such reference.
216 */
217wakeup_signaler:
218 wake_up_process(b->signaler);
211} 219}
212 220
213static bool use_fake_irq(const struct intel_breadcrumbs *b) 221static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
651 } 659 }
652 660
653 if (unlikely(do_schedule)) { 661 if (unlikely(do_schedule)) {
654 DEFINE_WAIT(exec);
655
656 if (kthread_should_park()) 662 if (kthread_should_park())
657 kthread_parkme(); 663 kthread_parkme();
658 664
659 if (kthread_should_stop()) { 665 if (unlikely(kthread_should_stop())) {
660 GEM_BUG_ON(request); 666 i915_gem_request_put(request);
661 break; 667 break;
662 } 668 }
663 669
664 if (request)
665 add_wait_queue(&request->execute, &exec);
666
667 schedule(); 670 schedule();
668
669 if (request)
670 remove_wait_queue(&request->execute, &exec);
671 } 671 }
672 i915_gem_request_put(request); 672 i915_gem_request_put(request);
673 } while (1); 673 } while (1);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62b71c0..60cf4e58389a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
860 860
861static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 861static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
862{ 862{
863 int min_cdclk = skl_calc_cdclk(0, vco);
864 u32 val; 863 u32 val;
865 864
866 WARN_ON(vco != 8100000 && vco != 8640000); 865 WARN_ON(vco != 8100000 && vco != 8640000);
867 866
868 /* select the minimum CDCLK before enabling DPLL 0 */
869 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
870 I915_WRITE(CDCLK_CTL, val);
871 POSTING_READ(CDCLK_CTL);
872
873 /* 867 /*
874 * We always enable DPLL0 with the lowest link rate possible, but still 868 * We always enable DPLL0 with the lowest link rate possible, but still
875 * taking into account the VCO required to operate the eDP panel at the 869 * taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
923{ 917{
924 int cdclk = cdclk_state->cdclk; 918 int cdclk = cdclk_state->cdclk;
925 int vco = cdclk_state->vco; 919 int vco = cdclk_state->vco;
926 u32 freq_select, pcu_ack; 920 u32 freq_select, pcu_ack, cdclk_ctl;
927 int ret; 921 int ret;
928 922
929 WARN_ON((cdclk == 24000) != (vco == 0)); 923 WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
940 return; 934 return;
941 } 935 }
942 936
943 /* set CDCLK_CTL */ 937 /* Choose frequency for this cdclk */
944 switch (cdclk) { 938 switch (cdclk) {
945 case 450000: 939 case 450000:
946 case 432000: 940 case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
968 dev_priv->cdclk.hw.vco != vco) 962 dev_priv->cdclk.hw.vco != vco)
969 skl_dpll0_disable(dev_priv); 963 skl_dpll0_disable(dev_priv);
970 964
965 cdclk_ctl = I915_READ(CDCLK_CTL);
966
967 if (dev_priv->cdclk.hw.vco != vco) {
968 /* Wa Display #1183: skl,kbl,cfl */
969 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
970 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
971 I915_WRITE(CDCLK_CTL, cdclk_ctl);
972 }
973
974 /* Wa Display #1183: skl,kbl,cfl */
975 cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
976 I915_WRITE(CDCLK_CTL, cdclk_ctl);
977 POSTING_READ(CDCLK_CTL);
978
971 if (dev_priv->cdclk.hw.vco != vco) 979 if (dev_priv->cdclk.hw.vco != vco)
972 skl_dpll0_enable(dev_priv, vco); 980 skl_dpll0_enable(dev_priv, vco);
973 981
974 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); 982 /* Wa Display #1183: skl,kbl,cfl */
983 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
984 I915_WRITE(CDCLK_CTL, cdclk_ctl);
985
986 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
987 I915_WRITE(CDCLK_CTL, cdclk_ctl);
988
989 /* Wa Display #1183: skl,kbl,cfl */
990 cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
991 I915_WRITE(CDCLK_CTL, cdclk_ctl);
975 POSTING_READ(CDCLK_CTL); 992 POSTING_READ(CDCLK_CTL);
976 993
977 /* inform PCU of the change */ 994 /* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c18fd4258..58a3755544b2 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2128,9 +2128,12 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2128 if (WARN_ON(!pll)) 2128 if (WARN_ON(!pll))
2129 return; 2129 return;
2130 2130
2131 mutex_lock(&dev_priv->dpll_lock);
2132
2131 if (IS_CANNONLAKE(dev_priv)) { 2133 if (IS_CANNONLAKE(dev_priv)) {
2132 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ 2134 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
2133 val = I915_READ(DPCLKA_CFGCR0); 2135 val = I915_READ(DPCLKA_CFGCR0);
2136 val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
2134 val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port); 2137 val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
2135 I915_WRITE(DPCLKA_CFGCR0, val); 2138 I915_WRITE(DPCLKA_CFGCR0, val);
2136 2139
@@ -2156,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2156 } else if (INTEL_INFO(dev_priv)->gen < 9) { 2159 } else if (INTEL_INFO(dev_priv)->gen < 9) {
2157 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); 2160 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
2158 } 2161 }
2162
2163 mutex_unlock(&dev_priv->dpll_lock);
2159} 2164}
2160 2165
2161static void intel_ddi_clk_disable(struct intel_encoder *encoder) 2166static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 878acc432a4b..50f8443641b8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1000 return crtc->config->cpu_transcoder; 1000 return crtc->config->cpu_transcoder;
1001} 1001}
1002 1002
1003static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) 1003static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1004 enum pipe pipe)
1004{ 1005{
1005 i915_reg_t reg = PIPEDSL(pipe); 1006 i915_reg_t reg = PIPEDSL(pipe);
1006 u32 line1, line2; 1007 u32 line1, line2;
@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
1015 msleep(5); 1016 msleep(5);
1016 line2 = I915_READ(reg) & line_mask; 1017 line2 = I915_READ(reg) & line_mask;
1017 1018
1018 return line1 == line2; 1019 return line1 != line2;
1020}
1021
1022static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1023{
1024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1025 enum pipe pipe = crtc->pipe;
1026
1027 /* Wait for the display line to settle/start moving */
1028 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1029 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1030 pipe_name(pipe), onoff(state));
1031}
1032
1033static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1034{
1035 wait_for_pipe_scanline_moving(crtc, false);
1036}
1037
1038static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1039{
1040 wait_for_pipe_scanline_moving(crtc, true);
1019} 1041}
1020 1042
1021/* 1043/*
@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1038{ 1060{
1039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1040 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1062 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1041 enum pipe pipe = crtc->pipe;
1042 1063
1043 if (INTEL_GEN(dev_priv) >= 4) { 1064 if (INTEL_GEN(dev_priv) >= 4) {
1044 i915_reg_t reg = PIPECONF(cpu_transcoder); 1065 i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1049 100)) 1070 100))
1050 WARN(1, "pipe_off wait timed out\n"); 1071 WARN(1, "pipe_off wait timed out\n");
1051 } else { 1072 } else {
1052 /* Wait for the display line to settle */ 1073 intel_wait_for_pipe_scanline_stopped(crtc);
1053 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
1054 WARN(1, "pipe_off wait timed out\n");
1055 } 1074 }
1056} 1075}
1057 1076
@@ -1192,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1192 pipe_name(pipe)); 1211 pipe_name(pipe));
1193} 1212}
1194 1213
1195static void assert_cursor(struct drm_i915_private *dev_priv,
1196 enum pipe pipe, bool state)
1197{
1198 bool cur_state;
1199
1200 if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1201 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1202 else
1203 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1204
1205 I915_STATE_WARN(cur_state != state,
1206 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1207 pipe_name(pipe), onoff(state), onoff(cur_state));
1208}
1209#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1210#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1211
1212void assert_pipe(struct drm_i915_private *dev_priv, 1214void assert_pipe(struct drm_i915_private *dev_priv,
1213 enum pipe pipe, bool state) 1215 enum pipe pipe, bool state)
1214{ 1216{
@@ -1236,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1236 pipe_name(pipe), onoff(state), onoff(cur_state)); 1238 pipe_name(pipe), onoff(state), onoff(cur_state));
1237} 1239}
1238 1240
1239static void assert_plane(struct drm_i915_private *dev_priv, 1241static void assert_plane(struct intel_plane *plane, bool state)
1240 enum plane plane, bool state)
1241{ 1242{
1242 u32 val; 1243 bool cur_state = plane->get_hw_state(plane);
1243 bool cur_state;
1244 1244
1245 val = I915_READ(DSPCNTR(plane));
1246 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1247 I915_STATE_WARN(cur_state != state, 1245 I915_STATE_WARN(cur_state != state,
1248 "plane %c assertion failure (expected %s, current %s)\n", 1246 "%s assertion failure (expected %s, current %s)\n",
1249 plane_name(plane), onoff(state), onoff(cur_state)); 1247 plane->base.name, onoff(state), onoff(cur_state));
1250} 1248}
1251 1249
1252#define assert_plane_enabled(d, p) assert_plane(d, p, true) 1250#define assert_plane_enabled(p) assert_plane(p, true)
1253#define assert_plane_disabled(d, p) assert_plane(d, p, false) 1251#define assert_plane_disabled(p) assert_plane(p, false)
1254 1252
1255static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1253static void assert_planes_disabled(struct intel_crtc *crtc)
1256 enum pipe pipe)
1257{ 1254{
1258 int i; 1255 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1259 1256 struct intel_plane *plane;
1260 /* Primary planes are fixed to pipes on gen4+ */
1261 if (INTEL_GEN(dev_priv) >= 4) {
1262 u32 val = I915_READ(DSPCNTR(pipe));
1263 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1264 "plane %c assertion failure, should be disabled but not\n",
1265 plane_name(pipe));
1266 return;
1267 }
1268
1269 /* Need to check both planes against the pipe */
1270 for_each_pipe(dev_priv, i) {
1271 u32 val = I915_READ(DSPCNTR(i));
1272 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1273 DISPPLANE_SEL_PIPE_SHIFT;
1274 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1275 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1276 plane_name(i), pipe_name(pipe));
1277 }
1278}
1279
1280static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1281 enum pipe pipe)
1282{
1283 int sprite;
1284 1257
1285 if (INTEL_GEN(dev_priv) >= 9) { 1258 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1286 for_each_sprite(dev_priv, pipe, sprite) { 1259 assert_plane_disabled(plane);
1287 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1288 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1289 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1290 sprite, pipe_name(pipe));
1291 }
1292 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1293 for_each_sprite(dev_priv, pipe, sprite) {
1294 u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
1295 I915_STATE_WARN(val & SP_ENABLE,
1296 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1297 sprite_name(pipe, sprite), pipe_name(pipe));
1298 }
1299 } else if (INTEL_GEN(dev_priv) >= 7) {
1300 u32 val = I915_READ(SPRCTL(pipe));
1301 I915_STATE_WARN(val & SPRITE_ENABLE,
1302 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1303 plane_name(pipe), pipe_name(pipe));
1304 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
1305 u32 val = I915_READ(DVSCNTR(pipe));
1306 I915_STATE_WARN(val & DVS_ENABLE,
1307 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1308 plane_name(pipe), pipe_name(pipe));
1309 }
1310} 1260}
1311 1261
1312static void assert_vblank_disabled(struct drm_crtc *crtc) 1262static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1899,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1899 1849
1900 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1850 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1901 1851
1902 assert_planes_disabled(dev_priv, pipe); 1852 assert_planes_disabled(crtc);
1903 assert_cursor_disabled(dev_priv, pipe);
1904 assert_sprites_disabled(dev_priv, pipe);
1905 1853
1906 /* 1854 /*
1907 * A pipe without a PLL won't actually be able to drive bits from 1855 * A pipe without a PLL won't actually be able to drive bits from
@@ -1936,15 +1884,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1936 POSTING_READ(reg); 1884 POSTING_READ(reg);
1937 1885
1938 /* 1886 /*
1939 * Until the pipe starts DSL will read as 0, which would cause 1887 * Until the pipe starts PIPEDSL reads will return a stale value,
1940 * an apparent vblank timestamp jump, which messes up also the 1888 * which causes an apparent vblank timestamp jump when PIPEDSL
1941 * frame count when it's derived from the timestamps. So let's 1889 * resets to its proper value. That also messes up the frame count
1942 * wait for the pipe to start properly before we call 1890 * when it's derived from the timestamps. So let's wait for the
1943 * drm_crtc_vblank_on() 1891 * pipe to start properly before we call drm_crtc_vblank_on()
1944 */ 1892 */
1945 if (dev->max_vblank_count == 0 && 1893 if (dev->max_vblank_count == 0)
1946 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1894 intel_wait_for_pipe_scanline_moving(crtc);
1947 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
1948} 1895}
1949 1896
1950/** 1897/**
@@ -1971,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
1971 * Make sure planes won't keep trying to pump pixels to us, 1918 * Make sure planes won't keep trying to pump pixels to us,
1972 * or we might hang the display. 1919 * or we might hang the display.
1973 */ 1920 */
1974 assert_planes_disabled(dev_priv, pipe); 1921 assert_planes_disabled(crtc);
1975 assert_cursor_disabled(dev_priv, pipe);
1976 assert_sprites_disabled(dev_priv, pipe);
1977 1922
1978 reg = PIPECONF(cpu_transcoder); 1923 reg = PIPECONF(cpu_transcoder);
1979 val = I915_READ(reg); 1924 val = I915_READ(reg);
@@ -2802,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2802 crtc_state->active_planes); 2747 crtc_state->active_planes);
2803} 2748}
2804 2749
2750static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2751 struct intel_plane *plane)
2752{
2753 struct intel_crtc_state *crtc_state =
2754 to_intel_crtc_state(crtc->base.state);
2755 struct intel_plane_state *plane_state =
2756 to_intel_plane_state(plane->base.state);
2757
2758 intel_set_plane_visible(crtc_state, plane_state, false);
2759
2760 if (plane->id == PLANE_PRIMARY)
2761 intel_pre_disable_primary_noatomic(&crtc->base);
2762
2763 trace_intel_disable_plane(&plane->base, crtc);
2764 plane->disable_plane(plane, crtc);
2765}
2766
2805static void 2767static void
2806intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2768intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2807 struct intel_initial_plane_config *plane_config) 2769 struct intel_initial_plane_config *plane_config)
@@ -2859,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2859 * simplest solution is to just disable the primary plane now and 2821 * simplest solution is to just disable the primary plane now and
2860 * pretend the BIOS never had it enabled. 2822 * pretend the BIOS never had it enabled.
2861 */ 2823 */
2862 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2824 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2863 to_intel_plane_state(plane_state),
2864 false);
2865 intel_pre_disable_primary_noatomic(&intel_crtc->base);
2866 trace_intel_disable_plane(primary, intel_crtc);
2867 intel_plane->disable_plane(intel_plane, intel_crtc);
2868 2825
2869 return; 2826 return;
2870 2827
@@ -3367,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
3367 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3324 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3368} 3325}
3369 3326
3327static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
3328{
3329
3330 struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
3331 enum intel_display_power_domain power_domain;
3332 enum plane plane = primary->plane;
3333 enum pipe pipe = primary->pipe;
3334 bool ret;
3335
3336 /*
3337 * Not 100% correct for planes that can move between pipes,
3338 * but that's only the case for gen2-4 which don't have any
3339 * display power wells.
3340 */
3341 power_domain = POWER_DOMAIN_PIPE(pipe);
3342 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3343 return false;
3344
3345 ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
3346
3347 intel_display_power_put(dev_priv, power_domain);
3348
3349 return ret;
3350}
3351
3370static u32 3352static u32
3371intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3353intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3372{ 3354{
@@ -4848,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4848 * a vblank wait. 4830 * a vblank wait.
4849 */ 4831 */
4850 4832
4851 assert_plane_enabled(dev_priv, crtc->plane); 4833 assert_plane_enabled(to_intel_plane(crtc->base.primary));
4834
4852 if (IS_BROADWELL(dev_priv)) { 4835 if (IS_BROADWELL(dev_priv)) {
4853 mutex_lock(&dev_priv->pcu_lock); 4836 mutex_lock(&dev_priv->pcu_lock);
4854 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 4837 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4881,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
4881 if (!crtc->config->ips_enabled) 4864 if (!crtc->config->ips_enabled)
4882 return; 4865 return;
4883 4866
4884 assert_plane_enabled(dev_priv, crtc->plane); 4867 assert_plane_enabled(to_intel_plane(crtc->base.primary));
4868
4885 if (IS_BROADWELL(dev_priv)) { 4869 if (IS_BROADWELL(dev_priv)) {
4886 mutex_lock(&dev_priv->pcu_lock); 4870 mutex_lock(&dev_priv->pcu_lock);
4887 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4871 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5881,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5882 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5866 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5883 enum intel_display_power_domain domain; 5867 enum intel_display_power_domain domain;
5868 struct intel_plane *plane;
5884 u64 domains; 5869 u64 domains;
5885 struct drm_atomic_state *state; 5870 struct drm_atomic_state *state;
5886 struct intel_crtc_state *crtc_state; 5871 struct intel_crtc_state *crtc_state;
@@ -5889,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5889 if (!intel_crtc->active) 5874 if (!intel_crtc->active)
5890 return; 5875 return;
5891 5876
5892 if (crtc->primary->state->visible) { 5877 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
5893 intel_pre_disable_primary_noatomic(crtc); 5878 const struct intel_plane_state *plane_state =
5879 to_intel_plane_state(plane->base.state);
5894 5880
5895 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 5881 if (plane_state->base.visible)
5896 crtc->primary->state->visible = false; 5882 intel_plane_disable_noatomic(intel_crtc, plane);
5897 } 5883 }
5898 5884
5899 state = drm_atomic_state_alloc(crtc->dev); 5885 state = drm_atomic_state_alloc(crtc->dev);
@@ -9459,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
9459 i845_update_cursor(plane, NULL, NULL); 9445 i845_update_cursor(plane, NULL, NULL);
9460} 9446}
9461 9447
9448static bool i845_cursor_get_hw_state(struct intel_plane *plane)
9449{
9450 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9451 enum intel_display_power_domain power_domain;
9452 bool ret;
9453
9454 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9455 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9456 return false;
9457
9458 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9459
9460 intel_display_power_put(dev_priv, power_domain);
9461
9462 return ret;
9463}
9464
9462static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9465static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9463 const struct intel_plane_state *plane_state) 9466 const struct intel_plane_state *plane_state)
9464{ 9467{
@@ -9652,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
9652 i9xx_update_cursor(plane, NULL, NULL); 9655 i9xx_update_cursor(plane, NULL, NULL);
9653} 9656}
9654 9657
9658static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
9659{
9660 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9661 enum intel_display_power_domain power_domain;
9662 enum pipe pipe = plane->pipe;
9663 bool ret;
9664
9665 /*
9666 * Not 100% correct for planes that can move between pipes,
9667 * but that's only the case for gen2-3 which don't have any
9668 * display power wells.
9669 */
9670 power_domain = POWER_DOMAIN_PIPE(pipe);
9671 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9672 return false;
9673
9674 ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
9675
9676 intel_display_power_put(dev_priv, power_domain);
9677
9678 return ret;
9679}
9655 9680
9656/* VESA 640x480x72Hz mode to set on the pipe */ 9681/* VESA 640x480x72Hz mode to set on the pipe */
9657static const struct drm_display_mode load_detect_mode = { 9682static const struct drm_display_mode load_detect_mode = {
@@ -9926,11 +9951,10 @@ found:
9926 } 9951 }
9927 9952
9928 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9953 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
9954 drm_framebuffer_put(fb);
9929 if (ret) 9955 if (ret)
9930 goto fail; 9956 goto fail;
9931 9957
9932 drm_framebuffer_put(fb);
9933
9934 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9958 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
9935 if (ret) 9959 if (ret)
9936 goto fail; 9960 goto fail;
@@ -12527,11 +12551,15 @@ static int intel_atomic_commit(struct drm_device *dev,
12527 INIT_WORK(&state->commit_work, intel_atomic_commit_work); 12551 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12528 12552
12529 i915_sw_fence_commit(&intel_state->commit_ready); 12553 i915_sw_fence_commit(&intel_state->commit_ready);
12530 if (nonblock) 12554 if (nonblock && intel_state->modeset) {
12555 queue_work(dev_priv->modeset_wq, &state->commit_work);
12556 } else if (nonblock) {
12531 queue_work(system_unbound_wq, &state->commit_work); 12557 queue_work(system_unbound_wq, &state->commit_work);
12532 else 12558 } else {
12559 if (intel_state->modeset)
12560 flush_workqueue(dev_priv->modeset_wq);
12533 intel_atomic_commit_tail(state); 12561 intel_atomic_commit_tail(state);
12534 12562 }
12535 12563
12536 return 0; 12564 return 0;
12537} 12565}
@@ -13177,13 +13205,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13177 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13205 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13178 primary->check_plane = intel_check_primary_plane; 13206 primary->check_plane = intel_check_primary_plane;
13179 13207
13180 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 13208 if (INTEL_GEN(dev_priv) >= 10) {
13181 intel_primary_formats = skl_primary_formats; 13209 intel_primary_formats = skl_primary_formats;
13182 num_formats = ARRAY_SIZE(skl_primary_formats); 13210 num_formats = ARRAY_SIZE(skl_primary_formats);
13183 modifiers = skl_format_modifiers_ccs; 13211 modifiers = skl_format_modifiers_ccs;
13184 13212
13185 primary->update_plane = skl_update_plane; 13213 primary->update_plane = skl_update_plane;
13186 primary->disable_plane = skl_disable_plane; 13214 primary->disable_plane = skl_disable_plane;
13215 primary->get_hw_state = skl_plane_get_hw_state;
13187 } else if (INTEL_GEN(dev_priv) >= 9) { 13216 } else if (INTEL_GEN(dev_priv) >= 9) {
13188 intel_primary_formats = skl_primary_formats; 13217 intel_primary_formats = skl_primary_formats;
13189 num_formats = ARRAY_SIZE(skl_primary_formats); 13218 num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13194,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13194 13223
13195 primary->update_plane = skl_update_plane; 13224 primary->update_plane = skl_update_plane;
13196 primary->disable_plane = skl_disable_plane; 13225 primary->disable_plane = skl_disable_plane;
13226 primary->get_hw_state = skl_plane_get_hw_state;
13197 } else if (INTEL_GEN(dev_priv) >= 4) { 13227 } else if (INTEL_GEN(dev_priv) >= 4) {
13198 intel_primary_formats = i965_primary_formats; 13228 intel_primary_formats = i965_primary_formats;
13199 num_formats = ARRAY_SIZE(i965_primary_formats); 13229 num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13201,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13201 13231
13202 primary->update_plane = i9xx_update_primary_plane; 13232 primary->update_plane = i9xx_update_primary_plane;
13203 primary->disable_plane = i9xx_disable_primary_plane; 13233 primary->disable_plane = i9xx_disable_primary_plane;
13234 primary->get_hw_state = i9xx_plane_get_hw_state;
13204 } else { 13235 } else {
13205 intel_primary_formats = i8xx_primary_formats; 13236 intel_primary_formats = i8xx_primary_formats;
13206 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13237 num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13208,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13208 13239
13209 primary->update_plane = i9xx_update_primary_plane; 13240 primary->update_plane = i9xx_update_primary_plane;
13210 primary->disable_plane = i9xx_disable_primary_plane; 13241 primary->disable_plane = i9xx_disable_primary_plane;
13242 primary->get_hw_state = i9xx_plane_get_hw_state;
13211 } 13243 }
13212 13244
13213 if (INTEL_GEN(dev_priv) >= 9) 13245 if (INTEL_GEN(dev_priv) >= 9)
@@ -13297,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13297 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 13329 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13298 cursor->update_plane = i845_update_cursor; 13330 cursor->update_plane = i845_update_cursor;
13299 cursor->disable_plane = i845_disable_cursor; 13331 cursor->disable_plane = i845_disable_cursor;
13332 cursor->get_hw_state = i845_cursor_get_hw_state;
13300 cursor->check_plane = i845_check_cursor; 13333 cursor->check_plane = i845_check_cursor;
13301 } else { 13334 } else {
13302 cursor->update_plane = i9xx_update_cursor; 13335 cursor->update_plane = i9xx_update_cursor;
13303 cursor->disable_plane = i9xx_disable_cursor; 13336 cursor->disable_plane = i9xx_disable_cursor;
13337 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13304 cursor->check_plane = i9xx_check_cursor; 13338 cursor->check_plane = i9xx_check_cursor;
13305 } 13339 }
13306 13340
@@ -14445,6 +14479,8 @@ int intel_modeset_init(struct drm_device *dev)
14445 enum pipe pipe; 14479 enum pipe pipe;
14446 struct intel_crtc *crtc; 14480 struct intel_crtc *crtc;
14447 14481
14482 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
14483
14448 drm_mode_config_init(dev); 14484 drm_mode_config_init(dev);
14449 14485
14450 dev->mode_config.min_width = 0; 14486 dev->mode_config.min_width = 0;
@@ -14643,38 +14679,56 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14643 14679
14644void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 14680void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14645{ 14681{
14682 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
14683
14646 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 14684 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
14647 pipe_name(pipe)); 14685 pipe_name(pipe));
14648 14686
14649 assert_plane_disabled(dev_priv, PLANE_A); 14687 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
14650 assert_plane_disabled(dev_priv, PLANE_B); 14688 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
14689 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
14690 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
14691 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
14651 14692
14652 I915_WRITE(PIPECONF(pipe), 0); 14693 I915_WRITE(PIPECONF(pipe), 0);
14653 POSTING_READ(PIPECONF(pipe)); 14694 POSTING_READ(PIPECONF(pipe));
14654 14695
14655 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) 14696 intel_wait_for_pipe_scanline_stopped(crtc);
14656 DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
14657 14697
14658 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 14698 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
14659 POSTING_READ(DPLL(pipe)); 14699 POSTING_READ(DPLL(pipe));
14660} 14700}
14661 14701
14662static bool 14702static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
14663intel_check_plane_mapping(struct intel_crtc *crtc) 14703 struct intel_plane *primary)
14664{ 14704{
14665 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14705 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14666 u32 val; 14706 enum plane plane = primary->plane;
14707 u32 val = I915_READ(DSPCNTR(plane));
14667 14708
14668 if (INTEL_INFO(dev_priv)->num_pipes == 1) 14709 return (val & DISPLAY_PLANE_ENABLE) == 0 ||
14669 return true; 14710 (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
14711}
14670 14712
14671 val = I915_READ(DSPCNTR(!crtc->plane)); 14713static void
14714intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
14715{
14716 struct intel_crtc *crtc;
14672 14717
14673 if ((val & DISPLAY_PLANE_ENABLE) && 14718 if (INTEL_GEN(dev_priv) >= 4)
14674 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 14719 return;
14675 return false;
14676 14720
14677 return true; 14721 for_each_intel_crtc(&dev_priv->drm, crtc) {
14722 struct intel_plane *plane =
14723 to_intel_plane(crtc->base.primary);
14724
14725 if (intel_plane_mapping_ok(crtc, plane))
14726 continue;
14727
14728 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
14729 plane->base.name);
14730 intel_plane_disable_noatomic(crtc, plane);
14731 }
14678} 14732}
14679 14733
14680static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 14734static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14730,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
14730 14784
14731 /* Disable everything but the primary plane */ 14785 /* Disable everything but the primary plane */
14732 for_each_intel_plane_on_crtc(dev, crtc, plane) { 14786 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14733 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 14787 const struct intel_plane_state *plane_state =
14734 continue; 14788 to_intel_plane_state(plane->base.state);
14735 14789
14736 trace_intel_disable_plane(&plane->base, crtc); 14790 if (plane_state->base.visible &&
14737 plane->disable_plane(plane, crtc); 14791 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
14792 intel_plane_disable_noatomic(crtc, plane);
14738 } 14793 }
14739 } 14794 }
14740 14795
14741 /* We need to sanitize the plane -> pipe mapping first because this will
14742 * disable the crtc (and hence change the state) if it is wrong. Note
14743 * that gen4+ has a fixed plane -> pipe mapping. */
14744 if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
14745 bool plane;
14746
14747 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
14748 crtc->base.base.id, crtc->base.name);
14749
14750 /* Pipe has the wrong plane attached and the plane is active.
14751 * Temporarily change the plane mapping and disable everything
14752 * ... */
14753 plane = crtc->plane;
14754 crtc->base.primary->state->visible = true;
14755 crtc->plane = !plane;
14756 intel_crtc_disable_noatomic(&crtc->base, ctx);
14757 crtc->plane = plane;
14758 }
14759
14760 /* Adjust the state of the output pipe according to whether we 14796 /* Adjust the state of the output pipe according to whether we
14761 * have active connectors/encoders. */ 14797 * have active connectors/encoders. */
14762 if (crtc->active && !intel_crtc_has_encoders(crtc)) 14798 if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14861,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
14861 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 14897 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
14862} 14898}
14863 14899
14864static bool primary_get_hw_state(struct intel_plane *plane)
14865{
14866 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14867
14868 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
14869}
14870
14871/* FIXME read out full plane state for all planes */ 14900/* FIXME read out full plane state for all planes */
14872static void readout_plane_state(struct intel_crtc *crtc) 14901static void readout_plane_state(struct intel_crtc *crtc)
14873{ 14902{
14874 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 14903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14875 bool visible; 14904 struct intel_crtc_state *crtc_state =
14905 to_intel_crtc_state(crtc->base.state);
14906 struct intel_plane *plane;
14876 14907
14877 visible = crtc->active && primary_get_hw_state(primary); 14908 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14909 struct intel_plane_state *plane_state =
14910 to_intel_plane_state(plane->base.state);
14911 bool visible = plane->get_hw_state(plane);
14878 14912
14879 intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), 14913 intel_set_plane_visible(crtc_state, plane_state, visible);
14880 to_intel_plane_state(primary->base.state), 14914 }
14881 visible);
14882} 14915}
14883 14916
14884static void intel_modeset_readout_hw_state(struct drm_device *dev) 14917static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15076,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
15076 /* HW state is read out, now we need to sanitize this mess. */ 15109 /* HW state is read out, now we need to sanitize this mess. */
15077 get_encoder_power_domains(dev_priv); 15110 get_encoder_power_domains(dev_priv);
15078 15111
15112 intel_sanitize_plane_mapping(dev_priv);
15113
15079 for_each_intel_encoder(dev, encoder) { 15114 for_each_intel_encoder(dev, encoder) {
15080 intel_sanitize_encoder(encoder); 15115 intel_sanitize_encoder(encoder);
15081 } 15116 }
@@ -15252,6 +15287,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
15252 intel_cleanup_gt_powersave(dev_priv); 15287 intel_cleanup_gt_powersave(dev_priv);
15253 15288
15254 intel_teardown_gmbus(dev_priv); 15289 intel_teardown_gmbus(dev_priv);
15290
15291 destroy_workqueue(dev_priv->modeset_wq);
15255} 15292}
15256 15293
15257void intel_connector_attach_encoder(struct intel_connector *connector, 15294void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6c7f8bca574e..5d77f75a9f9c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -862,6 +862,7 @@ struct intel_plane {
862 const struct intel_plane_state *plane_state); 862 const struct intel_plane_state *plane_state);
863 void (*disable_plane)(struct intel_plane *plane, 863 void (*disable_plane)(struct intel_plane *plane,
864 struct intel_crtc *crtc); 864 struct intel_crtc *crtc);
865 bool (*get_hw_state)(struct intel_plane *plane);
865 int (*check_plane)(struct intel_plane *plane, 866 int (*check_plane)(struct intel_plane *plane,
866 struct intel_crtc_state *crtc_state, 867 struct intel_crtc_state *crtc_state,
867 struct intel_plane_state *state); 868 struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
1924 const struct intel_crtc_state *crtc_state, 1925 const struct intel_crtc_state *crtc_state,
1925 const struct intel_plane_state *plane_state); 1926 const struct intel_plane_state *plane_state);
1926void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); 1927void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
1928bool skl_plane_get_hw_state(struct intel_plane *plane);
1927 1929
1928/* intel_tv.c */ 1930/* intel_tv.c */
1929void intel_tv_init(struct drm_i915_private *dev_priv); 1931void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..6074e04dc99f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
1390 if (ret) 1390 if (ret)
1391 return ret; 1391 return ret;
1392 1392
1393 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1394 ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1395 if (ret)
1396 return ret;
1397
1393 /* WaToEnableHwFixForPushConstHWBug:glk */ 1398 /* WaToEnableHwFixForPushConstHWBug:glk */
1394 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1399 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1395 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1400 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf65288ffff..5809b29044fc 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
193 }; 193 };
194 194
195 if (!pci_dev_present(atom_hdaudio_ids)) { 195 if (!pci_dev_present(atom_hdaudio_ids)) {
196 DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n"); 196 DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
197 lpe_present = true; 197 lpe_present = true;
198 } 198 }
199 } 199 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e25607435..e71a8cd50498 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
974 974
975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); 975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
976 976
977 if (i915_gem_request_completed(request))
978 return;
979
977 if (prio <= READ_ONCE(request->priotree.priority)) 980 if (prio <= READ_ONCE(request->priotree.priority))
978 return; 981 return;
979 982
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f4a4e9496893..f0d0dbab4150 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -124,7 +124,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
124 124
125static void glk_init_clock_gating(struct drm_i915_private *dev_priv) 125static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
126{ 126{
127 u32 val;
128 gen9_init_clock_gating(dev_priv); 127 gen9_init_clock_gating(dev_priv);
129 128
130 /* 129 /*
@@ -144,11 +143,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
144 I915_WRITE(CHICKEN_MISC_2, val); 143 I915_WRITE(CHICKEN_MISC_2, val);
145 } 144 }
146 145
147 /* Display WA #1133: WaFbcSkipSegments:glk */
148 val = I915_READ(ILK_DPFC_CHICKEN);
149 val &= ~GLK_SKIP_SEG_COUNT_MASK;
150 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
151 I915_WRITE(ILK_DPFC_CHICKEN, val);
152} 146}
153 147
154static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) 148static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -8517,7 +8511,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
8517 8511
8518static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) 8512static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
8519{ 8513{
8520 u32 val;
8521 cnp_init_clock_gating(dev_priv); 8514 cnp_init_clock_gating(dev_priv);
8522 8515
8523 /* This is not an Wa. Enable for better image quality */ 8516 /* This is not an Wa. Enable for better image quality */
@@ -8537,12 +8530,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
8537 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, 8530 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
8538 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | 8531 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
8539 SARBUNIT_CLKGATE_DIS); 8532 SARBUNIT_CLKGATE_DIS);
8540
8541 /* Display WA #1133: WaFbcSkipSegments:cnl */
8542 val = I915_READ(ILK_DPFC_CHICKEN);
8543 val &= ~GLK_SKIP_SEG_COUNT_MASK;
8544 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
8545 I915_WRITE(ILK_DPFC_CHICKEN, val);
8546} 8533}
8547 8534
8548static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) 8535static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430fccdc..55ea5eb3b7df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
590 struct drm_i915_private *dev_priv = to_i915(dev); 590 struct drm_i915_private *dev_priv = to_i915(dev);
591 591
592 if (dev_priv->psr.active) { 592 if (dev_priv->psr.active) {
593 i915_reg_t psr_ctl; 593 i915_reg_t psr_status;
594 u32 psr_status_mask; 594 u32 psr_status_mask;
595 595
596 if (dev_priv->psr.aux_frame_sync) 596 if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
599 0); 599 0);
600 600
601 if (dev_priv->psr.psr2_support) { 601 if (dev_priv->psr.psr2_support) {
602 psr_ctl = EDP_PSR2_CTL; 602 psr_status = EDP_PSR2_STATUS_CTL;
603 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 603 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
604 604
605 I915_WRITE(psr_ctl, 605 I915_WRITE(EDP_PSR2_CTL,
606 I915_READ(psr_ctl) & 606 I915_READ(EDP_PSR2_CTL) &
607 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); 607 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
608 608
609 } else { 609 } else {
610 psr_ctl = EDP_PSR_STATUS_CTL; 610 psr_status = EDP_PSR_STATUS_CTL;
611 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 611 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
612 612
613 I915_WRITE(psr_ctl, 613 I915_WRITE(EDP_PSR_CTL,
614 I915_READ(psr_ctl) & ~EDP_PSR_ENABLE); 614 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
615 } 615 }
616 616
617 /* Wait till PSR is idle */ 617 /* Wait till PSR is idle */
618 if (intel_wait_for_register(dev_priv, 618 if (intel_wait_for_register(dev_priv,
619 psr_ctl, psr_status_mask, 0, 619 psr_status, psr_status_mask, 0,
620 2000)) 620 2000))
621 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 621 DRM_ERROR("Timed out waiting for PSR Idle State\n");
622 622
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c63d3b..7e115f3927f6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
598 598
599 DRM_DEBUG_KMS("Enabling DC5\n"); 599 DRM_DEBUG_KMS("Enabling DC5\n");
600 600
601 /* Wa Display #1183: skl,kbl,cfl */
602 if (IS_GEN9_BC(dev_priv))
603 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
604 SKL_SELECT_ALTERNATE_DC_EXIT);
605
601 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 606 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
602} 607}
603 608
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
625{ 630{
626 DRM_DEBUG_KMS("Disabling DC6\n"); 631 DRM_DEBUG_KMS("Disabling DC6\n");
627 632
633 /* Wa Display #1183: skl,kbl,cfl */
634 if (IS_GEN9_BC(dev_priv))
635 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
636 SKL_SELECT_ALTERNATE_DC_EXIT);
637
628 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 638 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
629} 639}
630 640
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1786 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1796 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1787 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1797 BIT_ULL(POWER_DOMAIN_MODESET) | \
1788 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1798 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1799 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1789 BIT_ULL(POWER_DOMAIN_INIT)) 1800 BIT_ULL(POWER_DOMAIN_INIT))
1790 1801
1791#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1802#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4fcf80ca91dd..4a8a5d918a83 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
329 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 329 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
330} 330}
331 331
332bool
333skl_plane_get_hw_state(struct intel_plane *plane)
334{
335 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
336 enum intel_display_power_domain power_domain;
337 enum plane_id plane_id = plane->id;
338 enum pipe pipe = plane->pipe;
339 bool ret;
340
341 power_domain = POWER_DOMAIN_PIPE(pipe);
342 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
343 return false;
344
345 ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
346
347 intel_display_power_put(dev_priv, power_domain);
348
349 return ret;
350}
351
332static void 352static void
333chv_update_csc(struct intel_plane *plane, uint32_t format) 353chv_update_csc(struct intel_plane *plane, uint32_t format)
334{ 354{
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
506 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 526 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
507} 527}
508 528
529static bool
530vlv_plane_get_hw_state(struct intel_plane *plane)
531{
532 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
533 enum intel_display_power_domain power_domain;
534 enum plane_id plane_id = plane->id;
535 enum pipe pipe = plane->pipe;
536 bool ret;
537
538 power_domain = POWER_DOMAIN_PIPE(pipe);
539 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
540 return false;
541
542 ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
543
544 intel_display_power_put(dev_priv, power_domain);
545
546 return ret;
547}
548
509static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, 549static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
510 const struct intel_plane_state *plane_state) 550 const struct intel_plane_state *plane_state)
511{ 551{
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
646 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 686 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
647} 687}
648 688
689static bool
690ivb_plane_get_hw_state(struct intel_plane *plane)
691{
692 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
693 enum intel_display_power_domain power_domain;
694 enum pipe pipe = plane->pipe;
695 bool ret;
696
697 power_domain = POWER_DOMAIN_PIPE(pipe);
698 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
699 return false;
700
701 ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
702
703 intel_display_power_put(dev_priv, power_domain);
704
705 return ret;
706}
707
649static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, 708static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
650 const struct intel_plane_state *plane_state) 709 const struct intel_plane_state *plane_state)
651{ 710{
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
777 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 836 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
778} 837}
779 838
839static bool
840g4x_plane_get_hw_state(struct intel_plane *plane)
841{
842 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
843 enum intel_display_power_domain power_domain;
844 enum pipe pipe = plane->pipe;
845 bool ret;
846
847 power_domain = POWER_DOMAIN_PIPE(pipe);
848 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
849 return false;
850
851 ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
852
853 intel_display_power_put(dev_priv, power_domain);
854
855 return ret;
856}
857
780static int 858static int
781intel_check_sprite_plane(struct intel_plane *plane, 859intel_check_sprite_plane(struct intel_plane *plane,
782 struct intel_crtc_state *crtc_state, 860 struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1232 1310
1233 intel_plane->update_plane = skl_update_plane; 1311 intel_plane->update_plane = skl_update_plane;
1234 intel_plane->disable_plane = skl_disable_plane; 1312 intel_plane->disable_plane = skl_disable_plane;
1313 intel_plane->get_hw_state = skl_plane_get_hw_state;
1235 1314
1236 plane_formats = skl_plane_formats; 1315 plane_formats = skl_plane_formats;
1237 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 1316 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1242 1321
1243 intel_plane->update_plane = skl_update_plane; 1322 intel_plane->update_plane = skl_update_plane;
1244 intel_plane->disable_plane = skl_disable_plane; 1323 intel_plane->disable_plane = skl_disable_plane;
1324 intel_plane->get_hw_state = skl_plane_get_hw_state;
1245 1325
1246 plane_formats = skl_plane_formats; 1326 plane_formats = skl_plane_formats;
1247 num_plane_formats = ARRAY_SIZE(skl_plane_formats); 1327 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1252 1332
1253 intel_plane->update_plane = vlv_update_plane; 1333 intel_plane->update_plane = vlv_update_plane;
1254 intel_plane->disable_plane = vlv_disable_plane; 1334 intel_plane->disable_plane = vlv_disable_plane;
1335 intel_plane->get_hw_state = vlv_plane_get_hw_state;
1255 1336
1256 plane_formats = vlv_plane_formats; 1337 plane_formats = vlv_plane_formats;
1257 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 1338 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1267 1348
1268 intel_plane->update_plane = ivb_update_plane; 1349 intel_plane->update_plane = ivb_update_plane;
1269 intel_plane->disable_plane = ivb_disable_plane; 1350 intel_plane->disable_plane = ivb_disable_plane;
1351 intel_plane->get_hw_state = ivb_plane_get_hw_state;
1270 1352
1271 plane_formats = snb_plane_formats; 1353 plane_formats = snb_plane_formats;
1272 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1354 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1277 1359
1278 intel_plane->update_plane = g4x_update_plane; 1360 intel_plane->update_plane = g4x_update_plane;
1279 intel_plane->disable_plane = g4x_disable_plane; 1361 intel_plane->disable_plane = g4x_disable_plane;
1362 intel_plane->get_hw_state = g4x_plane_get_hw_state;
1280 1363
1281 modifiers = i9xx_plane_format_modifiers; 1364 modifiers = i9xx_plane_format_modifiers;
1282 if (IS_GEN6(dev_priv)) { 1365 if (IS_GEN6(dev_priv)) {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0760b93e9d1f..baab93398e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
121int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 121int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
122int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 122int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
123int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 123int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
124int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
124int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 125int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
125int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 126int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
126int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); 127int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2615912430cc..ef687414969e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
224 /* Determine if we can get a cache-coherent map, forcing 224 /* Determine if we can get a cache-coherent map, forcing
225 * uncached mapping if we can't. 225 * uncached mapping if we can't.
226 */ 226 */
227 if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) 227 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
228 nvbo->force_coherent = true; 228 nvbo->force_coherent = true;
229 } 229 }
230 230
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
262 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && 262 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
263 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) 263 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
264 continue; 264 continue;
265 if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host) 265 if ((flags & TTM_PL_FLAG_TT) &&
266 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
266 continue; 267 continue;
267 268
268 /* Select this page size if it's the first that supports 269 /* Select this page size if it's the first that supports
@@ -1446,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1446 args.nv50.ro = 0; 1447 args.nv50.ro = 0;
1447 args.nv50.kind = mem->kind; 1448 args.nv50.kind = mem->kind;
1448 args.nv50.comp = mem->comp; 1449 args.nv50.comp = mem->comp;
1450 argc = sizeof(args.nv50);
1449 break; 1451 break;
1450 case NVIF_CLASS_MEM_GF100: 1452 case NVIF_CLASS_MEM_GF100:
1451 args.gf100.version = 0; 1453 args.gf100.version = 0;
1452 args.gf100.ro = 0; 1454 args.gf100.ro = 0;
1453 args.gf100.kind = mem->kind; 1455 args.gf100.kind = mem->kind;
1456 argc = sizeof(args.gf100);
1454 break; 1457 break;
1455 default: 1458 default:
1456 WARN_ON(1); 1459 WARN_ON(1);
@@ -1458,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1458 } 1461 }
1459 1462
1460 ret = nvif_object_map_handle(&mem->mem.object, 1463 ret = nvif_object_map_handle(&mem->mem.object,
1461 &argc, argc, 1464 &args, argc,
1462 &handle, &length); 1465 &handle, &length);
1463 if (ret != 1) 1466 if (ret != 1)
1464 return ret ? ret : -EINVAL; 1467 return ret ? ret : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be3b913..56fe261b6268 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
152 work->cli = cli; 152 work->cli = cli;
153 mutex_lock(&cli->lock); 153 mutex_lock(&cli->lock);
154 list_add_tail(&work->head, &cli->worker); 154 list_add_tail(&work->head, &cli->worker);
155 mutex_unlock(&cli->lock);
156 if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence)) 155 if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
157 nouveau_cli_work_fence(fence, &work->cb); 156 nouveau_cli_work_fence(fence, &work->cb);
157 mutex_unlock(&cli->lock);
158} 158}
159 159
160static void 160static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3331e82ae9e7..96f6bd8aee5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -157,8 +157,8 @@ struct nouveau_drm {
157 struct nvif_object copy; 157 struct nvif_object copy;
158 int mtrr; 158 int mtrr;
159 int type_vram; 159 int type_vram;
160 int type_host; 160 int type_host[2];
161 int type_ncoh; 161 int type_ncoh[2];
162 } ttm; 162 } ttm;
163 163
164 /* GEM interface support */ 164 /* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
217 return dev->dev_private; 217 return dev->dev_private;
218} 218}
219 219
220static inline bool
221nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
222{
223 struct nvif_mmu *mmu = &drm->client.mmu;
224 return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
225}
226
220int nouveau_pmops_suspend(struct device *); 227int nouveau_pmops_suspend(struct device *);
221int nouveau_pmops_resume(struct device *); 228int nouveau_pmops_resume(struct device *);
222bool nouveau_pmops_runtime(void); 229bool nouveau_pmops_runtime(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c533d8e04afc..be7357bf2246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
429 drm_fb_helper_unregister_fbi(&fbcon->helper); 429 drm_fb_helper_unregister_fbi(&fbcon->helper);
430 drm_fb_helper_fini(&fbcon->helper); 430 drm_fb_helper_fini(&fbcon->helper);
431 431
432 if (nouveau_fb->nvbo) { 432 if (nouveau_fb && nouveau_fb->nvbo) {
433 nouveau_vma_del(&nouveau_fb->vma); 433 nouveau_vma_del(&nouveau_fb->vma);
434 nouveau_bo_unmap(nouveau_fb->nvbo); 434 nouveau_bo_unmap(nouveau_fb->nvbo);
435 nouveau_bo_unpin(nouveau_fb->nvbo); 435 nouveau_bo_unpin(nouveau_fb->nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 589a9621db76..c002f8968507 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
103 u8 type; 103 u8 type;
104 int ret; 104 int ret;
105 105
106 if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) 106 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
107 type = drm->ttm.type_ncoh; 107 type = drm->ttm.type_ncoh[!!mem->kind];
108 else 108 else
109 type = drm->ttm.type_host; 109 type = drm->ttm.type_host[0];
110 110
111 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) 111 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
112 mem->comp = mem->kind = 0; 112 mem->comp = mem->kind = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 08b974b30482..dff51a0ee028 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
235 drm->ttm.mem_global_ref.release = NULL; 235 drm->ttm.mem_global_ref.release = NULL;
236} 236}
237 237
238int 238static int
239nouveau_ttm_init(struct nouveau_drm *drm) 239nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
240{ 240{
241 struct nvkm_device *device = nvxx_device(&drm->client.device);
242 struct nvkm_pci *pci = device->pci;
243 struct nvif_mmu *mmu = &drm->client.mmu; 241 struct nvif_mmu *mmu = &drm->client.mmu;
244 struct drm_device *dev = drm->dev; 242 int typei;
245 int typei, ret;
246 243
247 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | 244 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
248 NVIF_MEM_COHERENT); 245 kind | NVIF_MEM_COHERENT);
249 if (typei < 0) 246 if (typei < 0)
250 return -ENOSYS; 247 return -ENOSYS;
251 248
252 drm->ttm.type_host = typei; 249 drm->ttm.type_host[!!kind] = typei;
253 250
254 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE); 251 typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
255 if (typei < 0) 252 if (typei < 0)
256 return -ENOSYS; 253 return -ENOSYS;
257 254
258 drm->ttm.type_ncoh = typei; 255 drm->ttm.type_ncoh[!!kind] = typei;
256 return 0;
257}
258
259int
260nouveau_ttm_init(struct nouveau_drm *drm)
261{
262 struct nvkm_device *device = nvxx_device(&drm->client.device);
263 struct nvkm_pci *pci = device->pci;
264 struct nvif_mmu *mmu = &drm->client.mmu;
265 struct drm_device *dev = drm->dev;
266 int typei, ret;
267
268 ret = nouveau_ttm_init_host(drm, 0);
269 if (ret)
270 return ret;
271
272 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
273 drm->client.device.info.chipset != 0x50) {
274 ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
275 if (ret)
276 return ret;
277 }
259 278
260 if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC && 279 if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
261 drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 280 drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 9e2628dd8e4d..f5371d96b003 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
67 nvif_vmm_put(&vma->vmm->vmm, &tmp); 67 nvif_vmm_put(&vma->vmm->vmm, &tmp);
68 } 68 }
69 list_del(&vma->head); 69 list_del(&vma->head);
70 *pvma = NULL;
71 kfree(*pvma); 70 kfree(*pvma);
71 *pvma = NULL;
72 } 72 }
73} 73}
74 74
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e14643615698..08e77cd55e6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
1251 .i2c = g94_i2c_new, 1251 .i2c = g94_i2c_new,
1252 .imem = nv50_instmem_new, 1252 .imem = nv50_instmem_new,
1253 .mc = g98_mc_new, 1253 .mc = g98_mc_new,
1254 .mmu = g84_mmu_new, 1254 .mmu = mcp77_mmu_new,
1255 .mxm = nv50_mxm_new, 1255 .mxm = nv50_mxm_new,
1256 .pci = g94_pci_new, 1256 .pci = g94_pci_new,
1257 .therm = g84_therm_new, 1257 .therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
1283 .i2c = g94_i2c_new, 1283 .i2c = g94_i2c_new,
1284 .imem = nv50_instmem_new, 1284 .imem = nv50_instmem_new,
1285 .mc = g98_mc_new, 1285 .mc = g98_mc_new,
1286 .mmu = g84_mmu_new, 1286 .mmu = mcp77_mmu_new,
1287 .mxm = nv50_mxm_new, 1287 .mxm = nv50_mxm_new,
1288 .pci = g94_pci_new, 1288 .pci = g94_pci_new,
1289 .therm = g84_therm_new, 1289 .therm = g84_therm_new,
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
2369 .imem = gk20a_instmem_new, 2369 .imem = gk20a_instmem_new,
2370 .ltc = gp100_ltc_new, 2370 .ltc = gp100_ltc_new,
2371 .mc = gp10b_mc_new, 2371 .mc = gp10b_mc_new,
2372 .mmu = gf100_mmu_new, 2372 .mmu = gp10b_mmu_new,
2373 .secboot = gp10b_secboot_new, 2373 .secboot = gp10b_secboot_new,
2374 .pmu = gm20b_pmu_new, 2374 .pmu = gm20b_pmu_new,
2375 .timer = gk20a_timer_new, 2375 .timer = gk20a_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a37b4f3..700fc754f28a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
174 .links = gf119_sor_dp_links, 174 .links = gf119_sor_dp_links,
175 .power = g94_sor_dp_power, 175 .power = g94_sor_dp_power,
176 .pattern = gf119_sor_dp_pattern, 176 .pattern = gf119_sor_dp_pattern,
177 .drive = gf119_sor_dp_drive,
177 .vcpi = gf119_sor_dp_vcpi, 178 .vcpi = gf119_sor_dp_vcpi,
178 .audio = gf119_sor_dp_audio, 179 .audio = gf119_sor_dp_audio,
179 .audio_sym = gf119_sor_dp_audio_sym, 180 .audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 9646adec57cb..243f0a5c8a62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -73,7 +73,8 @@ static int
73nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) 73nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
74{ 74{
75 struct nvkm_bar *bar = nvkm_bar(subdev); 75 struct nvkm_bar *bar = nvkm_bar(subdev);
76 bar->func->bar1.fini(bar); 76 if (bar->func->bar1.fini)
77 bar->func->bar1.fini(bar);
77 return 0; 78 return 0;
78} 79}
79 80
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index b10077d38839..35878fb538f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -26,7 +26,6 @@ gk20a_bar_func = {
26 .dtor = gf100_bar_dtor, 26 .dtor = gf100_bar_dtor,
27 .oneinit = gf100_bar_oneinit, 27 .oneinit = gf100_bar_oneinit,
28 .bar1.init = gf100_bar_bar1_init, 28 .bar1.init = gf100_bar_bar1_init,
29 .bar1.fini = gf100_bar_bar1_fini,
30 .bar1.wait = gf100_bar_bar1_wait, 29 .bar1.wait = gf100_bar_bar1_wait,
31 .bar1.vmm = gf100_bar_bar1_vmm, 30 .bar1.vmm = gf100_bar_bar1_vmm,
32 .flush = g84_bar_flush, 31 .flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 972370ed36f0..7c7efa4ea0d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
36 if (data) { 36 if (data) {
37 *ver = nvbios_rd08(bios, data + 0x00); 37 *ver = nvbios_rd08(bios, data + 0x00);
38 switch (*ver) { 38 switch (*ver) {
39 case 0x20:
39 case 0x21: 40 case 0x21:
40 case 0x30: 41 case 0x30:
41 case 0x40: 42 case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
63 if (data && idx < *cnt) { 64 if (data && idx < *cnt) {
64 u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len); 65 u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
65 switch (*ver * !!outp) { 66 switch (*ver * !!outp) {
67 case 0x20:
66 case 0x21: 68 case 0x21:
67 case 0x30: 69 case 0x30:
68 *hdr = nvbios_rd08(bios, data + 0x04); 70 *hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
96 info->type = nvbios_rd16(bios, data + 0x00); 98 info->type = nvbios_rd16(bios, data + 0x00);
97 info->mask = nvbios_rd16(bios, data + 0x02); 99 info->mask = nvbios_rd16(bios, data + 0x02);
98 switch (*ver) { 100 switch (*ver) {
101 case 0x20:
102 info->mask |= 0x00c0; /* match any link */
103 /* fall-through */
99 case 0x21: 104 case 0x21:
100 case 0x30: 105 case 0x30:
101 info->flags = nvbios_rd08(bios, data + 0x05); 106 info->flags = nvbios_rd08(bios, data + 0x05);
102 info->script[0] = nvbios_rd16(bios, data + 0x06); 107 info->script[0] = nvbios_rd16(bios, data + 0x06);
103 info->script[1] = nvbios_rd16(bios, data + 0x08); 108 info->script[1] = nvbios_rd16(bios, data + 0x08);
104 info->lnkcmp = nvbios_rd16(bios, data + 0x0a); 109 if (*len >= 0x0c)
110 info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
105 if (*len >= 0x0f) { 111 if (*len >= 0x0f) {
106 info->script[2] = nvbios_rd16(bios, data + 0x0c); 112 info->script[2] = nvbios_rd16(bios, data + 0x0c);
107 info->script[3] = nvbios_rd16(bios, data + 0x0e); 113 info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
170 memset(info, 0x00, sizeof(*info)); 176 memset(info, 0x00, sizeof(*info));
171 if (data) { 177 if (data) {
172 switch (*ver) { 178 switch (*ver) {
179 case 0x20:
173 case 0x21: 180 case 0x21:
174 info->dc = nvbios_rd08(bios, data + 0x02); 181 info->dc = nvbios_rd08(bios, data + 0x02);
175 info->pe = nvbios_rd08(bios, data + 0x03); 182 info->pe = nvbios_rd08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 1ba7289684aa..db48a1daca0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
249 iobj->base.memory.ptrs = &nv50_instobj_fast; 249 iobj->base.memory.ptrs = &nv50_instobj_fast;
250 else 250 else
251 iobj->base.memory.ptrs = &nv50_instobj_slow; 251 iobj->base.memory.ptrs = &nv50_instobj_slow;
252 refcount_inc(&iobj->maps); 252 refcount_set(&iobj->maps, 1);
253 } 253 }
254 254
255 mutex_unlock(&imem->subdev.mutex); 255 mutex_unlock(&imem->subdev.mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 352a65f9371c..67ee983bb026 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
4nvkm-y += nvkm/subdev/mmu/nv44.o 4nvkm-y += nvkm/subdev/mmu/nv44.o
5nvkm-y += nvkm/subdev/mmu/nv50.o 5nvkm-y += nvkm/subdev/mmu/nv50.o
6nvkm-y += nvkm/subdev/mmu/g84.o 6nvkm-y += nvkm/subdev/mmu/g84.o
7nvkm-y += nvkm/subdev/mmu/mcp77.o
7nvkm-y += nvkm/subdev/mmu/gf100.o 8nvkm-y += nvkm/subdev/mmu/gf100.o
8nvkm-y += nvkm/subdev/mmu/gk104.o 9nvkm-y += nvkm/subdev/mmu/gk104.o
9nvkm-y += nvkm/subdev/mmu/gk20a.o 10nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
22nvkm-y += nvkm/subdev/mmu/vmmnv41.o 23nvkm-y += nvkm/subdev/mmu/vmmnv41.o
23nvkm-y += nvkm/subdev/mmu/vmmnv44.o 24nvkm-y += nvkm/subdev/mmu/vmmnv44.o
24nvkm-y += nvkm/subdev/mmu/vmmnv50.o 25nvkm-y += nvkm/subdev/mmu/vmmnv50.o
26nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
25nvkm-y += nvkm/subdev/mmu/vmmgf100.o 27nvkm-y += nvkm/subdev/mmu/vmmgf100.o
26nvkm-y += nvkm/subdev/mmu/vmmgk104.o 28nvkm-y += nvkm/subdev/mmu/vmmgk104.o
27nvkm-y += nvkm/subdev/mmu/vmmgk20a.o 29nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 000000000000..0527b50730d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "mem.h"
23#include "vmm.h"
24
25#include <nvif/class.h>
26
27static const struct nvkm_mmu_func
28mcp77_mmu = {
29 .dma_bits = 40,
30 .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
31 .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
32 .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
33 .kind = nv50_mmu_kind,
34 .kind_sys = true,
35};
36
37int
38mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
39{
40 return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
41}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 6d8f61ea467a..da06e64d8a7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
95 const struct nvkm_vmm_desc_func *func; 95 const struct nvkm_vmm_desc_func *func;
96}; 96};
97 97
98extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
99extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
100
98extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[]; 101extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
99extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[]; 102extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
100extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[]; 103extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
169 const char *, struct nvkm_vmm **); 172 const char *, struct nvkm_vmm **);
170int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 173int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
171 174
175int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
176void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
177int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
178void nv50_vmm_flush(struct nvkm_vmm *, int);
179
172int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, 180int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
173 struct nvkm_mmu *, u64, u64, void *, u32, 181 struct nvkm_mmu *, u64, u64, void *, u32,
174 struct lock_class_key *, const char *, struct nvkm_vmm **); 182 struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
200 struct lock_class_key *, const char *, struct nvkm_vmm **); 208 struct lock_class_key *, const char *, struct nvkm_vmm **);
201int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 209int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
202 struct lock_class_key *, const char *, struct nvkm_vmm **); 210 struct lock_class_key *, const char *, struct nvkm_vmm **);
211int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
212 struct lock_class_key *, const char *, struct nvkm_vmm **);
203int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 213int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
204 struct lock_class_key *, const char *, struct nvkm_vmm **); 214 struct lock_class_key *, const char *, struct nvkm_vmm **);
205int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, 215int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 000000000000..e63d984cbfd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "vmm.h"
23
24static const struct nvkm_vmm_func
25mcp77_vmm = {
26 .join = nv50_vmm_join,
27 .part = nv50_vmm_part,
28 .valid = nv50_vmm_valid,
29 .flush = nv50_vmm_flush,
30 .page_block = 1 << 29,
31 .page = {
32 { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
33 { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
34 {}
35 }
36};
37
38int
39mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
40 struct lock_class_key *key, const char *name,
41 struct nvkm_vmm **pvmm)
42{
43 return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
44 argv, argc, key, name, pvmm);
45}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 863a2edd9861..64f75d906202 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -32,7 +32,7 @@ static inline void
32nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 32nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
34{ 34{
35 u64 next = addr | map->type, data; 35 u64 next = addr + map->type, data;
36 u32 pten; 36 u32 pten;
37 int log2blk; 37 int log2blk;
38 38
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
70 nvkm_kmap(pt->memory); 70 nvkm_kmap(pt->memory);
71 while (ptes--) { 71 while (ptes--) {
72 const u64 data = *map->dma++ | map->type; 72 const u64 data = *map->dma++ + map->type;
73 VMM_WO064(pt, vmm, ptei++ * 8, data); 73 VMM_WO064(pt, vmm, ptei++ * 8, data);
74 map->type += map->ctag; 74 map->type += map->ctag;
75 } 75 }
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
163 .pde = nv50_vmm_pgd_pde, 163 .pde = nv50_vmm_pgd_pde,
164}; 164};
165 165
166static const struct nvkm_vmm_desc 166const struct nvkm_vmm_desc
167nv50_vmm_desc_12[] = { 167nv50_vmm_desc_12[] = {
168 { PGT, 17, 8, 0x1000, &nv50_vmm_pgt }, 168 { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
169 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, 169 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
170 {} 170 {}
171}; 171};
172 172
173static const struct nvkm_vmm_desc 173const struct nvkm_vmm_desc
174nv50_vmm_desc_16[] = { 174nv50_vmm_desc_16[] = {
175 { PGT, 13, 8, 0x1000, &nv50_vmm_pgt }, 175 { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
176 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, 176 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
177 {} 177 {}
178}; 178};
179 179
180static void 180void
181nv50_vmm_flush(struct nvkm_vmm *vmm, int level) 181nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
182{ 182{
183 struct nvkm_subdev *subdev = &vmm->mmu->subdev; 183 struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
223 mutex_unlock(&subdev->mutex); 223 mutex_unlock(&subdev->mutex);
224} 224}
225 225
226static int 226int
227nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, 227nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
228 struct nvkm_vmm_map *map) 228 struct nvkm_vmm_map *map)
229{ 229{
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
321 return 0; 321 return 0;
322} 322}
323 323
324static void 324void
325nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 325nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
326{ 326{
327 struct nvkm_vmm_join *join; 327 struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
335 } 335 }
336} 336}
337 337
338static int 338int
339nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 339nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
340{ 340{
341 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset; 341 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index b1b1f3626b96..ee2431a7804e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
71 struct nvkm_pci *pci = arg; 71 struct nvkm_pci *pci = arg;
72 struct nvkm_device *device = pci->subdev.device; 72 struct nvkm_device *device = pci->subdev.device;
73 bool handled = false; 73 bool handled = false;
74
75 if (pci->irq < 0)
76 return IRQ_HANDLED;
77
74 nvkm_mc_intr_unarm(device); 78 nvkm_mc_intr_unarm(device);
75 if (pci->msi) 79 if (pci->msi)
76 pci->func->msi_rearm(pci); 80 pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
84{ 88{
85 struct nvkm_pci *pci = nvkm_pci(subdev); 89 struct nvkm_pci *pci = nvkm_pci(subdev);
86 90
87 if (pci->irq >= 0) {
88 free_irq(pci->irq, pci);
89 pci->irq = -1;
90 }
91
92 if (pci->agp.bridge) 91 if (pci->agp.bridge)
93 nvkm_agp_fini(pci); 92 nvkm_agp_fini(pci);
94 93
@@ -108,8 +107,20 @@ static int
108nvkm_pci_oneinit(struct nvkm_subdev *subdev) 107nvkm_pci_oneinit(struct nvkm_subdev *subdev)
109{ 108{
110 struct nvkm_pci *pci = nvkm_pci(subdev); 109 struct nvkm_pci *pci = nvkm_pci(subdev);
111 if (pci_is_pcie(pci->pdev)) 110 struct pci_dev *pdev = pci->pdev;
112 return nvkm_pcie_oneinit(pci); 111 int ret;
112
113 if (pci_is_pcie(pci->pdev)) {
114 ret = nvkm_pcie_oneinit(pci);
115 if (ret)
116 return ret;
117 }
118
119 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
120 if (ret)
121 return ret;
122
123 pci->irq = pdev->irq;
113 return 0; 124 return 0;
114} 125}
115 126
@@ -117,7 +128,6 @@ static int
117nvkm_pci_init(struct nvkm_subdev *subdev) 128nvkm_pci_init(struct nvkm_subdev *subdev)
118{ 129{
119 struct nvkm_pci *pci = nvkm_pci(subdev); 130 struct nvkm_pci *pci = nvkm_pci(subdev);
120 struct pci_dev *pdev = pci->pdev;
121 int ret; 131 int ret;
122 132
123 if (pci->agp.bridge) { 133 if (pci->agp.bridge) {
@@ -131,21 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
131 if (pci->func->init) 141 if (pci->func->init)
132 pci->func->init(pci); 142 pci->func->init(pci);
133 143
134 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); 144 /* Ensure MSI interrupts are armed, for the case where there are
135 if (ret) 145 * already interrupts pending (for whatever reason) at load time.
136 return ret; 146 */
147 if (pci->msi)
148 pci->func->msi_rearm(pci);
137 149
138 pci->irq = pdev->irq; 150 return 0;
139 return ret;
140} 151}
141 152
142static void * 153static void *
143nvkm_pci_dtor(struct nvkm_subdev *subdev) 154nvkm_pci_dtor(struct nvkm_subdev *subdev)
144{ 155{
145 struct nvkm_pci *pci = nvkm_pci(subdev); 156 struct nvkm_pci *pci = nvkm_pci(subdev);
157
146 nvkm_agp_dtor(pci); 158 nvkm_agp_dtor(pci);
159
160 if (pci->irq >= 0) {
161 /* freq_irq() will call the handler, we use pci->irq == -1
162 * to signal that it's been torn down and should be a noop.
163 */
164 int irq = pci->irq;
165 pci->irq = -1;
166 free_irq(irq, pci);
167 }
168
147 if (pci->msi) 169 if (pci->msi)
148 pci_disable_msi(pci->pdev); 170 pci_disable_msi(pci->pdev);
171
149 return nvkm_pci(subdev); 172 return nvkm_pci(subdev);
150} 173}
151 174
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626eddf24d5..23db74ae1826 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
78 78
79 /* then read the message */ 79 /* then read the message */
80 msg.len = cnt & 0xf; 80 msg.len = cnt & 0xf;
81 if (msg.len > CEC_MAX_MSG_SIZE - 2)
82 msg.len = CEC_MAX_MSG_SIZE - 2;
81 msg.msg[0] = hdmi_read_reg(core->base, 83 msg.msg[0] = hdmi_read_reg(core->base,
82 HDMI_CEC_RX_CMD_HEADER); 84 HDMI_CEC_RX_CMD_HEADER);
83 msg.msg[1] = hdmi_read_reg(core->base, 85 msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
104 } 106 }
105} 107}
106 108
107static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
108{
109 if (stat1 & 2) {
110 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
111
112 cec_transmit_done(core->adap,
113 CEC_TX_STATUS_NACK |
114 CEC_TX_STATUS_MAX_RETRIES,
115 0, (dbg3 >> 4) & 7, 0, 0);
116 } else if (stat1 & 1) {
117 cec_transmit_done(core->adap,
118 CEC_TX_STATUS_ARB_LOST |
119 CEC_TX_STATUS_MAX_RETRIES,
120 0, 0, 0, 0);
121 } else if (stat1 == 0) {
122 cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
123 0, 0, 0, 0);
124 }
125}
126
127void hdmi4_cec_irq(struct hdmi_core_data *core) 109void hdmi4_cec_irq(struct hdmi_core_data *core)
128{ 110{
129 u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0); 111 u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
132 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0); 114 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
133 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1); 115 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
134 116
135 if (stat0 & 0x40) 117 if (stat0 & 0x20) {
118 cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
119 0, 0, 0, 0);
136 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7); 120 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
137 else if (stat0 & 0x24) 121 } else if (stat1 & 0x02) {
138 hdmi_cec_transmit_fifo_empty(core, stat1);
139 if (stat1 & 2) {
140 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3); 122 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
141 123
142 cec_transmit_done(core->adap, 124 cec_transmit_done(core->adap,
143 CEC_TX_STATUS_NACK | 125 CEC_TX_STATUS_NACK |
144 CEC_TX_STATUS_MAX_RETRIES, 126 CEC_TX_STATUS_MAX_RETRIES,
145 0, (dbg3 >> 4) & 7, 0, 0); 127 0, (dbg3 >> 4) & 7, 0, 0);
146 } else if (stat1 & 1) { 128 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
147 cec_transmit_done(core->adap,
148 CEC_TX_STATUS_ARB_LOST |
149 CEC_TX_STATUS_MAX_RETRIES,
150 0, 0, 0, 0);
151 } 129 }
152 if (stat0 & 0x02) 130 if (stat0 & 0x02)
153 hdmi_cec_received_msg(core); 131 hdmi_cec_received_msg(core);
154 if (stat1 & 0x3)
155 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
156} 132}
157 133
158static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap) 134static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
231 /* 207 /*
232 * Enable CEC interrupts: 208 * Enable CEC interrupts:
233 * Transmit Buffer Full/Empty Change event 209 * Transmit Buffer Full/Empty Change event
234 * Transmitter FIFO Empty event
235 * Receiver FIFO Not Empty event 210 * Receiver FIFO Not Empty event
236 */ 211 */
237 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26); 212 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
238 /* 213 /*
239 * Enable CEC interrupts: 214 * Enable CEC interrupts:
240 * RX FIFO Overrun Error event
241 * Short Pulse Detected event
242 * Frame Retransmit Count Exceeded event 215 * Frame Retransmit Count Exceeded event
243 * Start Bit Irregularity event
244 */ 216 */
245 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f); 217 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
246 218
247 /* cec calibration enable (self clearing) */ 219 /* cec calibration enable (self clearing) */
248 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03); 220 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index dda904ec0534..500b6fb3e028 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
175 writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG); 175 writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
176} 176}
177 177
178static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
179 const struct drm_display_mode *mode)
180{
181 struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
182 unsigned long rate = mode->clock * 1000;
183 unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
184 long rounded_rate;
185
186 /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
187 if (rate > 165000000)
188 return MODE_CLOCK_HIGH;
189 rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
190 if (rounded_rate > 0 &&
191 max_t(unsigned long, rounded_rate, rate) -
192 min_t(unsigned long, rounded_rate, rate) < diff)
193 return MODE_OK;
194 return MODE_NOCLOCK;
195}
196
178static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { 197static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
179 .atomic_check = sun4i_hdmi_atomic_check, 198 .atomic_check = sun4i_hdmi_atomic_check,
180 .disable = sun4i_hdmi_disable, 199 .disable = sun4i_hdmi_disable,
181 .enable = sun4i_hdmi_enable, 200 .enable = sun4i_hdmi_enable,
182 .mode_set = sun4i_hdmi_mode_set, 201 .mode_set = sun4i_hdmi_mode_set,
202 .mode_valid = sun4i_hdmi_mode_valid,
183}; 203};
184 204
185static const struct drm_encoder_funcs sun4i_hdmi_funcs = { 205static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index dc332ea56f6c..3ecffa52c814 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
102 goto out; 102 goto out;
103 } 103 }
104 104
105 if (abs(rate - rounded / i) < 105 if (!best_parent ||
106 abs(rate - best_parent / best_div)) { 106 abs(rate - rounded / i / j) <
107 abs(rate - best_parent / best_half /
108 best_div)) {
107 best_parent = rounded; 109 best_parent = rounded;
108 best_div = i; 110 best_half = i;
111 best_div = j;
109 } 112 }
110 } 113 }
111 } 114 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e122f5b2a395..f4284b51bdca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
724 if (IS_ERR(tcon->crtc)) { 724 if (IS_ERR(tcon->crtc)) {
725 dev_err(dev, "Couldn't create our CRTC\n"); 725 dev_err(dev, "Couldn't create our CRTC\n");
726 ret = PTR_ERR(tcon->crtc); 726 ret = PTR_ERR(tcon->crtc);
727 goto err_free_clocks; 727 goto err_free_dotclock;
728 } 728 }
729 729
730 ret = sun4i_rgb_init(drm, tcon); 730 ret = sun4i_rgb_init(drm, tcon);
731 if (ret < 0) 731 if (ret < 0)
732 goto err_free_clocks; 732 goto err_free_dotclock;
733 733
734 if (tcon->quirks->needs_de_be_mux) { 734 if (tcon->quirks->needs_de_be_mux) {
735 /* 735 /*
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1dedac802..476079f1255f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
2656 name, err); 2656 name, err);
2657 goto remove; 2657 goto remove;
2658 } 2658 }
2659 } else {
2660 /* fall back to the module clock on SOR0 (eDP/LVDS only) */
2661 sor->clk_out = sor->clk;
2659 } 2662 }
2660 2663
2661 sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); 2664 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 8d7172e8381d..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -81,6 +81,7 @@ struct ttm_page_pool {
81 char *name; 81 char *name;
82 unsigned long nfrees; 82 unsigned long nfrees;
83 unsigned long nrefills; 83 unsigned long nrefills;
84 unsigned int order;
84}; 85};
85 86
86/** 87/**
@@ -222,6 +223,17 @@ static struct kobj_type ttm_pool_kobj_type = {
222static struct ttm_pool_manager *_manager; 223static struct ttm_pool_manager *_manager;
223 224
224#ifndef CONFIG_X86 225#ifndef CONFIG_X86
226static int set_pages_wb(struct page *page, int numpages)
227{
228#if IS_ENABLED(CONFIG_AGP)
229 int i;
230
231 for (i = 0; i < numpages; i++)
232 unmap_page_from_agp(page++);
233#endif
234 return 0;
235}
236
225static int set_pages_array_wb(struct page **pages, int addrinarray) 237static int set_pages_array_wb(struct page **pages, int addrinarray)
226{ 238{
227#if IS_ENABLED(CONFIG_AGP) 239#if IS_ENABLED(CONFIG_AGP)
@@ -284,13 +296,23 @@ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
284} 296}
285 297
286/* set memory back to wb and free the pages. */ 298/* set memory back to wb and free the pages. */
287static void ttm_pages_put(struct page *pages[], unsigned npages) 299static void ttm_pages_put(struct page *pages[], unsigned npages,
300 unsigned int order)
288{ 301{
289 unsigned i; 302 unsigned int i, pages_nr = (1 << order);
290 if (set_pages_array_wb(pages, npages)) 303
291 pr_err("Failed to set %d pages to wb!\n", npages); 304 if (order == 0) {
292 for (i = 0; i < npages; ++i) 305 if (set_pages_array_wb(pages, npages))
293 __free_page(pages[i]); 306 pr_err("Failed to set %d pages to wb!\n", npages);
307 }
308
309 for (i = 0; i < npages; ++i) {
310 if (order > 0) {
311 if (set_pages_wb(pages[i], pages_nr))
312 pr_err("Failed to set %d pages to wb!\n", pages_nr);
313 }
314 __free_pages(pages[i], order);
315 }
294} 316}
295 317
296static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 318static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -353,7 +375,7 @@ restart:
353 */ 375 */
354 spin_unlock_irqrestore(&pool->lock, irq_flags); 376 spin_unlock_irqrestore(&pool->lock, irq_flags);
355 377
356 ttm_pages_put(pages_to_free, freed_pages); 378 ttm_pages_put(pages_to_free, freed_pages, pool->order);
357 if (likely(nr_free != FREE_ALL_PAGES)) 379 if (likely(nr_free != FREE_ALL_PAGES))
358 nr_free -= freed_pages; 380 nr_free -= freed_pages;
359 381
@@ -388,7 +410,7 @@ restart:
388 spin_unlock_irqrestore(&pool->lock, irq_flags); 410 spin_unlock_irqrestore(&pool->lock, irq_flags);
389 411
390 if (freed_pages) 412 if (freed_pages)
391 ttm_pages_put(pages_to_free, freed_pages); 413 ttm_pages_put(pages_to_free, freed_pages, pool->order);
392out: 414out:
393 if (pages_to_free != static_buf) 415 if (pages_to_free != static_buf)
394 kfree(pages_to_free); 416 kfree(pages_to_free);
@@ -412,6 +434,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
412 struct ttm_page_pool *pool; 434 struct ttm_page_pool *pool;
413 int shrink_pages = sc->nr_to_scan; 435 int shrink_pages = sc->nr_to_scan;
414 unsigned long freed = 0; 436 unsigned long freed = 0;
437 unsigned int nr_free_pool;
415 438
416 if (!mutex_trylock(&lock)) 439 if (!mutex_trylock(&lock))
417 return SHRINK_STOP; 440 return SHRINK_STOP;
@@ -419,12 +442,20 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
419 /* select start pool in round robin fashion */ 442 /* select start pool in round robin fashion */
420 for (i = 0; i < NUM_POOLS; ++i) { 443 for (i = 0; i < NUM_POOLS; ++i) {
421 unsigned nr_free = shrink_pages; 444 unsigned nr_free = shrink_pages;
445 unsigned page_nr;
446
422 if (shrink_pages == 0) 447 if (shrink_pages == 0)
423 break; 448 break;
449
424 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 450 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
451 page_nr = (1 << pool->order);
425 /* OK to use static buffer since global mutex is held. */ 452 /* OK to use static buffer since global mutex is held. */
426 shrink_pages = ttm_page_pool_free(pool, nr_free, true); 453 nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
427 freed += nr_free - shrink_pages; 454 shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
455 freed += (nr_free_pool - shrink_pages) << pool->order;
456 if (freed >= sc->nr_to_scan)
457 break;
458 shrink_pages <<= pool->order;
428 } 459 }
429 mutex_unlock(&lock); 460 mutex_unlock(&lock);
430 return freed; 461 return freed;
@@ -436,9 +467,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
436{ 467{
437 unsigned i; 468 unsigned i;
438 unsigned long count = 0; 469 unsigned long count = 0;
470 struct ttm_page_pool *pool;
439 471
440 for (i = 0; i < NUM_POOLS; ++i) 472 for (i = 0; i < NUM_POOLS; ++i) {
441 count += _manager->pools[i].npages; 473 pool = &_manager->pools[i];
474 count += (pool->npages << pool->order);
475 }
442 476
443 return count; 477 return count;
444} 478}
@@ -510,8 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
510 int r = 0; 544 int r = 0;
511 unsigned i, j, cpages; 545 unsigned i, j, cpages;
512 unsigned npages = 1 << order; 546 unsigned npages = 1 << order;
513 unsigned max_cpages = min(count, 547 unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
514 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
515 548
516 /* allocate array for page caching change */ 549 /* allocate array for page caching change */
517 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); 550 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -845,7 +878,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
845#endif 878#endif
846 struct list_head plist; 879 struct list_head plist;
847 struct page *p = NULL; 880 struct page *p = NULL;
848 unsigned count; 881 unsigned count, first;
849 int r; 882 int r;
850 883
851 /* No pool for cached pages */ 884 /* No pool for cached pages */
@@ -886,6 +919,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
886 } 919 }
887#endif 920#endif
888 921
922 first = i;
889 while (npages) { 923 while (npages) {
890 p = alloc_page(gfp_flags); 924 p = alloc_page(gfp_flags);
891 if (!p) { 925 if (!p) {
@@ -893,6 +927,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
893 return -ENOMEM; 927 return -ENOMEM;
894 } 928 }
895 929
930 /* Swap the pages if we detect consecutive order */
931 if (i > first && pages[i - 1] == p - 1)
932 swap(p, pages[i - 1]);
933
896 pages[i++] = p; 934 pages[i++] = p;
897 --npages; 935 --npages;
898 } 936 }
@@ -921,8 +959,15 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
921 r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, 959 r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
922 npages - count, 0); 960 npages - count, 0);
923 961
924 list_for_each_entry(p, &plist, lru) 962 first = count;
925 pages[count++] = p; 963 list_for_each_entry(p, &plist, lru) {
964 struct page *tmp = p;
965
966 /* Swap the pages if we detect consecutive order */
967 if (count > first && pages[count - 1] == tmp - 1)
968 swap(tmp, pages[count - 1]);
969 pages[count++] = tmp;
970 }
926 971
927 if (r) { 972 if (r) {
928 /* If there is any pages in the list put them back to 973 /* If there is any pages in the list put them back to
@@ -937,7 +982,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
937} 982}
938 983
939static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, 984static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
940 char *name) 985 char *name, unsigned int order)
941{ 986{
942 spin_lock_init(&pool->lock); 987 spin_lock_init(&pool->lock);
943 pool->fill_lock = false; 988 pool->fill_lock = false;
@@ -945,35 +990,43 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
945 pool->npages = pool->nfrees = 0; 990 pool->npages = pool->nfrees = 0;
946 pool->gfp_flags = flags; 991 pool->gfp_flags = flags;
947 pool->name = name; 992 pool->name = name;
993 pool->order = order;
948} 994}
949 995
950int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 996int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
951{ 997{
952 int ret; 998 int ret;
999#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1000 unsigned order = HPAGE_PMD_ORDER;
1001#else
1002 unsigned order = 0;
1003#endif
953 1004
954 WARN_ON(_manager); 1005 WARN_ON(_manager);
955 1006
956 pr_info("Initializing pool allocator\n"); 1007 pr_info("Initializing pool allocator\n");
957 1008
958 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1009 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1010 if (!_manager)
1011 return -ENOMEM;
959 1012
960 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); 1013 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
961 1014
962 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); 1015 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
963 1016
964 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 1017 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
965 GFP_USER | GFP_DMA32, "wc dma"); 1018 GFP_USER | GFP_DMA32, "wc dma", 0);
966 1019
967 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 1020 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
968 GFP_USER | GFP_DMA32, "uc dma"); 1021 GFP_USER | GFP_DMA32, "uc dma", 0);
969 1022
970 ttm_page_pool_init_locked(&_manager->wc_pool_huge, 1023 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
971 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), 1024 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
972 "wc huge"); 1025 "wc huge", order);
973 1026
974 ttm_page_pool_init_locked(&_manager->uc_pool_huge, 1027 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
975 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) 1028 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
976 , "uc huge"); 1029 , "uc huge", order);
977 1030
978 _manager->options.max_size = max_pages; 1031 _manager->options.max_size = max_pages;
979 _manager->options.small = SMALL_ALLOCATION; 1032 _manager->options.small = SMALL_ALLOCATION;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 4ae45d7dac42..2decc8e2c79f 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -637,7 +637,8 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
637 mutex_lock(&bo->madv_lock); 637 mutex_lock(&bo->madv_lock);
638 switch (bo->madv) { 638 switch (bo->madv) {
639 case VC4_MADV_WILLNEED: 639 case VC4_MADV_WILLNEED:
640 refcount_inc(&bo->usecnt); 640 if (!refcount_inc_not_zero(&bo->usecnt))
641 refcount_set(&bo->usecnt, 1);
641 ret = 0; 642 ret = 0;
642 break; 643 break;
643 case VC4_MADV_DONTNEED: 644 case VC4_MADV_DONTNEED:
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89a83a9..c94cce96544c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
146 struct vc4_exec_info *exec[2]; 146 struct vc4_exec_info *exec[2];
147 struct vc4_bo *bo; 147 struct vc4_bo *bo;
148 unsigned long irqflags; 148 unsigned long irqflags;
149 unsigned int i, j, unref_list_count, prev_idx; 149 unsigned int i, j, k, unref_list_count;
150 150
151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); 151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
152 if (!kernel_state) 152 if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
182 return; 182 return;
183 } 183 }
184 184
185 prev_idx = 0; 185 k = 0;
186 for (i = 0; i < 2; i++) { 186 for (i = 0; i < 2; i++) {
187 if (!exec[i]) 187 if (!exec[i])
188 continue; 188 continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
197 WARN_ON(!refcount_read(&bo->usecnt)); 197 WARN_ON(!refcount_read(&bo->usecnt));
198 refcount_inc(&bo->usecnt); 198 refcount_inc(&bo->usecnt);
199 drm_gem_object_get(&exec[i]->bo[j]->base); 199 drm_gem_object_get(&exec[i]->bo[j]->base);
200 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; 200 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
201 } 201 }
202 202
203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { 203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
205 * because they are naturally unpurgeable. 205 * because they are naturally unpurgeable.
206 */ 206 */
207 drm_gem_object_get(&bo->base.base); 207 drm_gem_object_get(&bo->base.base);
208 kernel_state->bo[j + prev_idx] = &bo->base.base; 208 kernel_state->bo[k++] = &bo->base.base;
209 j++;
210 } 209 }
211 prev_idx = j + 1;
212 } 210 }
213 211
212 WARN_ON_ONCE(k != state->bo_count);
213
214 if (exec[0]) 214 if (exec[0])
215 state->start_bin = exec[0]->ct0ca; 215 state->start_bin = exec[0]->ct0ca;
216 if (exec[1]) 216 if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
436 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); 436 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
437} 437}
438 438
439static void
440vc4_flush_texture_caches(struct drm_device *dev)
441{
442 struct vc4_dev *vc4 = to_vc4_dev(dev);
443
444 V3D_WRITE(V3D_L2CACTL,
445 V3D_L2CACTL_L2CCLR);
446
447 V3D_WRITE(V3D_SLCACTL,
448 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
449 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
450}
451
439/* Sets the registers for the next job to be actually be executed in 452/* Sets the registers for the next job to be actually be executed in
440 * the hardware. 453 * the hardware.
441 * 454 *
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
474 if (!exec) 487 if (!exec)
475 return; 488 return;
476 489
490 /* A previous RCL may have written to one of our textures, and
491 * our full cache flush at bin time may have occurred before
492 * that RCL completed. Flush the texture cache now, but not
493 * the instructions or uniforms (since we don't write those
494 * from an RCL).
495 */
496 vc4_flush_texture_caches(dev);
497
477 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); 498 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
478} 499}
479 500
@@ -888,8 +909,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
888 /* If we got force-completed because of GPU reset rather than 909 /* If we got force-completed because of GPU reset rather than
889 * through our IRQ handler, signal the fence now. 910 * through our IRQ handler, signal the fence now.
890 */ 911 */
891 if (exec->fence) 912 if (exec->fence) {
892 dma_fence_signal(exec->fence); 913 dma_fence_signal(exec->fence);
914 dma_fence_put(exec->fence);
915 }
893 916
894 if (exec->bo) { 917 if (exec->bo) {
895 for (i = 0; i < exec->bo_count; i++) { 918 for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e5377993..3dd62d75f531 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
139 list_move_tail(&exec->head, &vc4->job_done_list); 139 list_move_tail(&exec->head, &vc4->job_done_list);
140 if (exec->fence) { 140 if (exec->fence) {
141 dma_fence_signal_locked(exec->fence); 141 dma_fence_signal_locked(exec->fence);
142 dma_fence_put(exec->fence);
142 exec->fence = NULL; 143 exec->fence = NULL;
143 } 144 }
144 vc4_submit_next_render_job(dev); 145 vc4_submit_next_render_job(dev);
@@ -208,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
208{ 209{
209 struct vc4_dev *vc4 = to_vc4_dev(dev); 210 struct vc4_dev *vc4 = to_vc4_dev(dev);
210 211
211 /* Undo the effects of a previous vc4_irq_uninstall. */
212 enable_irq(dev->irq);
213
214 /* Enable both the render done and out of memory interrupts. */ 212 /* Enable both the render done and out of memory interrupts. */
215 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 213 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
216 214
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43840b8..493f392b3a0a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
327 return ret; 327 return ret;
328 328
329 vc4_v3d_init_hw(vc4->dev); 329 vc4_v3d_init_hw(vc4->dev);
330
331 /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
332 enable_irq(vc4->dev->irq);
330 vc4_irq_postinstall(vc4->dev); 333 vc4_irq_postinstall(vc4->dev);
331 334
332 return 0; 335 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..87e8af5776a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2731 } 2731 }
2732 2732
2733 view_type = vmw_view_cmd_to_type(header->id); 2733 view_type = vmw_view_cmd_to_type(header->id);
2734 if (view_type == vmw_view_max)
2735 return -EINVAL;
2734 cmd = container_of(header, typeof(*cmd), header); 2736 cmd = container_of(header, typeof(*cmd), header);
2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2737 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736 user_surface_converter, 2738 user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740b3724..fcd58145d0da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
697 vps->pinned = 0; 697 vps->pinned = 0;
698 698
699 /* Mapping is managed by prepare_fb/cleanup_fb */ 699 /* Mapping is managed by prepare_fb/cleanup_fb */
700 memset(&vps->guest_map, 0, sizeof(vps->guest_map));
701 memset(&vps->host_map, 0, sizeof(vps->host_map)); 700 memset(&vps->host_map, 0, sizeof(vps->host_map));
702 vps->cpp = 0; 701 vps->cpp = 0;
703 702
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
760 759
761 760
762 /* Should have been freed by cleanup_fb */ 761 /* Should have been freed by cleanup_fb */
763 if (vps->guest_map.virtual) {
764 DRM_ERROR("Guest mapping not freed\n");
765 ttm_bo_kunmap(&vps->guest_map);
766 }
767
768 if (vps->host_map.virtual) { 762 if (vps->host_map.virtual) {
769 DRM_ERROR("Host mapping not freed\n"); 763 DRM_ERROR("Host mapping not freed\n");
770 ttm_bo_kunmap(&vps->host_map); 764 ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1869 */ 1863 */
1870int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) 1864int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
1871{ 1865{
1872 return -ENOSYS; 1866 return -EINVAL;
1873} 1867}
1874 1868
1875/** 1869/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c8389ff21..cd9da2dd79af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
175 int pinned; 175 int pinned;
176 176
177 /* For CPU Blit */ 177 /* For CPU Blit */
178 struct ttm_bo_kmap_obj host_map, guest_map; 178 struct ttm_bo_kmap_obj host_map;
179 unsigned int cpp; 179 unsigned int cpp;
180}; 180};
181 181
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8a09807c5de..3824595fece1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
266 .set_property = vmw_du_connector_set_property, 266 .set_property = vmw_du_connector_set_property,
267 .destroy = vmw_ldu_connector_destroy, 267 .destroy = vmw_ldu_connector_destroy,
268 .reset = vmw_du_connector_reset, 268 .reset = vmw_du_connector_reset,
269 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 269 .atomic_duplicate_state = vmw_du_connector_duplicate_state,
270 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 270 .atomic_destroy_state = vmw_du_connector_destroy_state,
271 .atomic_set_property = vmw_du_connector_atomic_set_property, 271 .atomic_set_property = vmw_du_connector_atomic_set_property,
272 .atomic_get_property = vmw_du_connector_atomic_get_property, 272 .atomic_get_property = vmw_du_connector_atomic_get_property,
273}; 273};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bc5f6026573d..63a4cd794b73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
420 .set_property = vmw_du_connector_set_property, 420 .set_property = vmw_du_connector_set_property,
421 .destroy = vmw_sou_connector_destroy, 421 .destroy = vmw_sou_connector_destroy,
422 .reset = vmw_du_connector_reset, 422 .reset = vmw_du_connector_reset,
423 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 423 .atomic_duplicate_state = vmw_du_connector_duplicate_state,
424 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 424 .atomic_destroy_state = vmw_du_connector_destroy_state,
425 .atomic_set_property = vmw_du_connector_atomic_set_property, 425 .atomic_set_property = vmw_du_connector_atomic_set_property,
426 .atomic_get_property = vmw_du_connector_atomic_get_property, 426 .atomic_get_property = vmw_du_connector_atomic_get_property,
427}; 427};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437fd787..b68d74888ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
114 bool defined; 114 bool defined;
115 115
116 /* For CPU Blit */ 116 /* For CPU Blit */
117 struct ttm_bo_kmap_obj host_map, guest_map; 117 struct ttm_bo_kmap_obj host_map;
118 unsigned int cpp; 118 unsigned int cpp;
119}; 119};
120 120
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
695 s32 src_pitch, dst_pitch; 695 s32 src_pitch, dst_pitch;
696 u8 *src, *dst; 696 u8 *src, *dst;
697 bool not_used; 697 bool not_used;
698 698 struct ttm_bo_kmap_obj guest_map;
699 int ret;
699 700
700 if (!dirty->num_hits) 701 if (!dirty->num_hits)
701 return; 702 return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
706 if (width == 0 || height == 0) 707 if (width == 0 || height == 0)
707 return; 708 return;
708 709
710 ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
711 &guest_map);
712 if (ret) {
713 DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
714 ret);
715 goto out_cleanup;
716 }
709 717
710 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 718 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
711 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 719 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
713 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; 721 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
714 722
715 dst_pitch = ddirty->pitch; 723 dst_pitch = ddirty->pitch;
716 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 724 dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
717 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; 725 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
718 726
719 727
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
772 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 780 vmw_fifo_commit(dev_priv, sizeof(*cmd));
773 } 781 }
774 782
783 ttm_bo_kunmap(&guest_map);
775out_cleanup: 784out_cleanup:
776 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; 785 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
777 ddirty->right = ddirty->bottom = S32_MIN; 786 ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
1109{ 1118{
1110 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 1119 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
1111 1120
1112 if (vps->guest_map.virtual)
1113 ttm_bo_kunmap(&vps->guest_map);
1114
1115 if (vps->host_map.virtual) 1121 if (vps->host_map.virtual)
1116 ttm_bo_kunmap(&vps->host_map); 1122 ttm_bo_kunmap(&vps->host_map);
1117 1123
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1277 */ 1283 */
1278 if (vps->content_fb_type == SEPARATE_DMA && 1284 if (vps->content_fb_type == SEPARATE_DMA &&
1279 !(dev_priv->capabilities & SVGA_CAP_3D)) { 1285 !(dev_priv->capabilities & SVGA_CAP_3D)) {
1280
1281 struct vmw_framebuffer_dmabuf *new_vfbd;
1282
1283 new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
1284
1285 ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
1286 NULL);
1287 if (ret)
1288 goto out_srf_unpin;
1289
1290 ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
1291 new_vfbd->buffer->base.num_pages,
1292 &vps->guest_map);
1293
1294 ttm_bo_unreserve(&new_vfbd->buffer->base);
1295
1296 if (ret) {
1297 DRM_ERROR("Failed to map content buffer to CPU\n");
1298 goto out_srf_unpin;
1299 }
1300
1301 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, 1286 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
1302 vps->surf->res.backup->base.num_pages, 1287 vps->surf->res.backup->base.num_pages,
1303 &vps->host_map); 1288 &vps->host_map);
1304 if (ret) { 1289 if (ret) {
1305 DRM_ERROR("Failed to map display buffer to CPU\n"); 1290 DRM_ERROR("Failed to map display buffer to CPU\n");
1306 ttm_bo_kunmap(&vps->guest_map);
1307 goto out_srf_unpin; 1291 goto out_srf_unpin;
1308 } 1292 }
1309 1293
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1350 stdu->display_srf = vps->surf; 1334 stdu->display_srf = vps->surf;
1351 stdu->content_fb_type = vps->content_fb_type; 1335 stdu->content_fb_type = vps->content_fb_type;
1352 stdu->cpp = vps->cpp; 1336 stdu->cpp = vps->cpp;
1353 memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
1354 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); 1337 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
1355 1338
1356 if (!stdu->defined) 1339 if (!stdu->defined)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f3fcb836a1f9..0c3f608131cf 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
551 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 551 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
552 break; 552 break;
553 default: 553 default:
554 hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag); 554 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
555 ret = 0; 555 ret = 0;
556 } 556 }
557 557
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 68cdc962265b..271f31461da4 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
696 (u8 *)&word, 2); 696 (u8 *)&word, 2);
697 break; 697 break;
698 case I2C_SMBUS_I2C_BLOCK_DATA: 698 case I2C_SMBUS_I2C_BLOCK_DATA:
699 size = I2C_SMBUS_BLOCK_DATA; 699 if (read_write == I2C_SMBUS_READ) {
700 /* fallthrough */ 700 read_length = data->block[0];
701 count = cp2112_write_read_req(buf, addr, read_length,
702 command, NULL, 0);
703 } else {
704 count = cp2112_write_req(buf, addr, command,
705 data->block + 1,
706 data->block[0]);
707 }
708 break;
701 case I2C_SMBUS_BLOCK_DATA: 709 case I2C_SMBUS_BLOCK_DATA:
702 if (I2C_SMBUS_READ == read_write) { 710 if (I2C_SMBUS_READ == read_write) {
703 count = cp2112_write_read_req(buf, addr, 711 count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
785 case I2C_SMBUS_WORD_DATA: 793 case I2C_SMBUS_WORD_DATA:
786 data->word = le16_to_cpup((__le16 *)buf); 794 data->word = le16_to_cpup((__le16 *)buf);
787 break; 795 break;
796 case I2C_SMBUS_I2C_BLOCK_DATA:
797 memcpy(data->block + 1, buf, read_length);
798 break;
788 case I2C_SMBUS_BLOCK_DATA: 799 case I2C_SMBUS_BLOCK_DATA:
789 if (read_length > I2C_SMBUS_BLOCK_MAX) { 800 if (read_length > I2C_SMBUS_BLOCK_MAX) {
790 ret = -EPROTO; 801 ret = -EPROTO;
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545fc3ae..edc0f64bb584 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -32,10 +32,6 @@
32 32
33#ifdef CONFIG_HOLTEK_FF 33#ifdef CONFIG_HOLTEK_FF
34 34
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
37MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
38
39/* 35/*
40 * These commands and parameters are currently known: 36 * These commands and parameters are currently known:
41 * 37 *
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
223 .probe = holtek_probe, 219 .probe = holtek_probe,
224}; 220};
225module_hid_driver(holtek_driver); 221module_hid_driver(holtek_driver);
222
223MODULE_LICENSE("GPL");
224MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
225MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 19f0cf37e0ed..ba0a092ae085 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -659,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
659 */ 659 */
660 return; 660 return;
661 } 661 }
662 mutex_lock(&vmbus_connection.channel_mutex);
663 /* 662 /*
664 * Close all the sub-channels first and then close the 663 * Close all the sub-channels first and then close the
665 * primary channel. 664 * primary channel.
666 */ 665 */
667 list_for_each_safe(cur, tmp, &channel->sc_list) { 666 list_for_each_safe(cur, tmp, &channel->sc_list) {
668 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 667 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
669 vmbus_close_internal(cur_channel);
670 if (cur_channel->rescind) { 668 if (cur_channel->rescind) {
669 wait_for_completion(&cur_channel->rescind_event);
670 mutex_lock(&vmbus_connection.channel_mutex);
671 vmbus_close_internal(cur_channel);
671 hv_process_channel_removal( 672 hv_process_channel_removal(
672 cur_channel->offermsg.child_relid); 673 cur_channel->offermsg.child_relid);
674 } else {
675 mutex_lock(&vmbus_connection.channel_mutex);
676 vmbus_close_internal(cur_channel);
673 } 677 }
678 mutex_unlock(&vmbus_connection.channel_mutex);
674 } 679 }
675 /* 680 /*
676 * Now close the primary. 681 * Now close the primary.
677 */ 682 */
683 mutex_lock(&vmbus_connection.channel_mutex);
678 vmbus_close_internal(channel); 684 vmbus_close_internal(channel);
679 mutex_unlock(&vmbus_connection.channel_mutex); 685 mutex_unlock(&vmbus_connection.channel_mutex);
680} 686}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ec5454f3f4a6..c21020b69114 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
333 return NULL; 333 return NULL;
334 334
335 spin_lock_init(&channel->lock); 335 spin_lock_init(&channel->lock);
336 init_completion(&channel->rescind_event);
336 337
337 INIT_LIST_HEAD(&channel->sc_list); 338 INIT_LIST_HEAD(&channel->sc_list);
338 INIT_LIST_HEAD(&channel->percpu_list); 339 INIT_LIST_HEAD(&channel->percpu_list);
@@ -898,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
898 /* 899 /*
899 * Now wait for offer handling to complete. 900 * Now wait for offer handling to complete.
900 */ 901 */
902 vmbus_rescind_cleanup(channel);
901 while (READ_ONCE(channel->probe_done) == false) { 903 while (READ_ONCE(channel->probe_done) == false) {
902 /* 904 /*
903 * We wait here until any channel offer is currently 905 * We wait here until any channel offer is currently
@@ -913,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
913 if (channel->device_obj) { 915 if (channel->device_obj) {
914 if (channel->chn_rescind_callback) { 916 if (channel->chn_rescind_callback) {
915 channel->chn_rescind_callback(channel); 917 channel->chn_rescind_callback(channel);
916 vmbus_rescind_cleanup(channel);
917 return; 918 return;
918 } 919 }
919 /* 920 /*
@@ -922,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
922 */ 923 */
923 dev = get_device(&channel->device_obj->device); 924 dev = get_device(&channel->device_obj->device);
924 if (dev) { 925 if (dev) {
925 vmbus_rescind_cleanup(channel);
926 vmbus_device_unregister(channel->device_obj); 926 vmbus_device_unregister(channel->device_obj);
927 put_device(dev); 927 put_device(dev);
928 } 928 }
@@ -936,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
936 * 2. Then close the primary channel. 936 * 2. Then close the primary channel.
937 */ 937 */
938 mutex_lock(&vmbus_connection.channel_mutex); 938 mutex_lock(&vmbus_connection.channel_mutex);
939 vmbus_rescind_cleanup(channel);
940 if (channel->state == CHANNEL_OPEN_STATE) { 939 if (channel->state == CHANNEL_OPEN_STATE) {
941 /* 940 /*
942 * The channel is currently not open; 941 * The channel is currently not open;
943 * it is safe for us to cleanup the channel. 942 * it is safe for us to cleanup the channel.
944 */ 943 */
945 hv_process_channel_removal(rescind->child_relid); 944 hv_process_channel_removal(rescind->child_relid);
945 } else {
946 complete(&channel->rescind_event);
946 } 947 }
947 mutex_unlock(&vmbus_connection.channel_mutex); 948 mutex_unlock(&vmbus_connection.channel_mutex);
948 } 949 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 76ed9a216f10..610223f0e945 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
1378 pr_debug("child device %s unregistered\n", 1378 pr_debug("child device %s unregistered\n",
1379 dev_name(&device_obj->device)); 1379 dev_name(&device_obj->device));
1380 1380
1381 kset_unregister(device_obj->channels_kset);
1382
1381 /* 1383 /*
1382 * Kick off the process of unregistering the device. 1384 * Kick off the process of unregistering the device.
1383 * This will call vmbus_remove() and eventually vmbus_device_release() 1385 * This will call vmbus_remove() and eventually vmbus_device_release()
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c9790e2c3440..af5123042990 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
143 struct hwmon_device *hwdev, int index) 143 struct hwmon_device *hwdev, int index)
144{ 144{
145 struct hwmon_thermal_data *tdata; 145 struct hwmon_thermal_data *tdata;
146 struct thermal_zone_device *tzd;
146 147
147 tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL); 148 tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
148 if (!tdata) 149 if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
151 tdata->hwdev = hwdev; 152 tdata->hwdev = hwdev;
152 tdata->index = index; 153 tdata->index = index;
153 154
154 devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, 155 tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
155 &hwmon_thermal_ops); 156 &hwmon_thermal_ops);
157 /*
158 * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
159 * so ignore that error but forward any other error.
160 */
161 if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
162 return PTR_ERR(tzd);
156 163
157 return 0; 164 return 0;
158} 165}
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
621 if (!chip->ops->is_visible(drvdata, hwmon_temp, 628 if (!chip->ops->is_visible(drvdata, hwmon_temp,
622 hwmon_temp_input, j)) 629 hwmon_temp_input, j))
623 continue; 630 continue;
624 if (info[i]->config[j] & HWMON_T_INPUT) 631 if (info[i]->config[j] & HWMON_T_INPUT) {
625 hwmon_thermal_add_sensor(dev, hwdev, j); 632 err = hwmon_thermal_add_sensor(dev,
633 hwdev, j);
634 if (err)
635 goto free_device;
636 }
626 } 637 }
627 } 638 }
628 } 639 }
629 640
630 return hdev; 641 return hdev;
631 642
643free_device:
644 device_unregister(hdev);
632free_hwmon: 645free_hwmon:
633 kfree(hwdev); 646 kfree(hwdev);
634ida_remove: 647ida_remove:
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7c6da2..7da75644c750 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
42 * @len: length of the data packet 42 * @len: length of the data packet
43 */ 43 */
44static void notrace 44static void notrace
45stm_ftrace_write(const void *buf, unsigned int len) 45stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
46{ 46{
47 stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len); 47 struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
48
49 stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
48} 50}
49 51
50static int stm_ftrace_link(struct stm_source_data *data) 52static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dadb2dc5..44cffad43701 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
379 return 0; 379 return 0;
380} 380}
381 381
382static struct platform_device_id cht_wc_i2c_adap_id_table[] = { 382static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
383 { .name = "cht_wcove_ext_chgr" }, 383 { .name = "cht_wcove_ext_chgr" },
384 {}, 384 {},
385}; 385};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d32e5f..462948e2c535 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
983 983
984 if (adapdata->smba) { 984 if (adapdata->smba) {
985 i2c_del_adapter(adap); 985 i2c_del_adapter(adap);
986 if (adapdata->port == (0 << 1)) { 986 if (adapdata->port == (0 << piix4_port_shift_sb800)) {
987 release_region(adapdata->smba, SMBIOSIZE); 987 release_region(adapdata->smba, SMBIOSIZE);
988 if (adapdata->sb800_main) 988 if (adapdata->sb800_main)
989 release_region(SB800_PIIX4_SMB_IDX, 2); 989 release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab51761f8c5..d4f9cef251ac 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * i2c-stm32.h 3 * i2c-stm32.h
3 * 4 *
4 * Copyright (C) M'boumba Cedric Madianga 2017 5 * Copyright (C) M'boumba Cedric Madianga 2017
6 * Copyright (C) STMicroelectronics 2017
5 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 7 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
6 * 8 *
7 * License terms: GNU General Public License (GPL), version 2
8 */ 9 */
9 10
10#ifndef _I2C_STM32_H 11#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec108496f15..47c8d00de53f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32 I2C controller 3 * Driver for STMicroelectronics STM32 I2C controller
3 * 4 *
@@ -6,11 +7,11 @@
6 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf 7 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf
7 * 8 *
8 * Copyright (C) M'boumba Cedric Madianga 2016 9 * Copyright (C) M'boumba Cedric Madianga 2016
10 * Copyright (C) STMicroelectronics 2017
9 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 11 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
10 * 12 *
11 * This driver is based on i2c-st.c 13 * This driver is based on i2c-st.c
12 * 14 *
13 * License terms: GNU General Public License (GPL), version 2
14 */ 15 */
15 16
16#include <linux/clk.h> 17#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c2e9aa..b445b3bb0bb1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32F7 I2C controller 3 * Driver for STMicroelectronics STM32F7 I2C controller
3 * 4 *
@@ -7,11 +8,11 @@
7 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf 8 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
8 * 9 *
9 * Copyright (C) M'boumba Cedric Madianga 2017 10 * Copyright (C) M'boumba Cedric Madianga 2017
11 * Copyright (C) STMicroelectronics 2017
10 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 12 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
11 * 13 *
12 * This driver is based on i2c-stm32f4.c 14 * This driver is based on i2c-stm32f4.c
13 * 15 *
14 * License terms: GNU General Public License (GPL), version 2
15 */ 16 */
16#include <linux/clk.h> 17#include <linux/clk.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 706164b4c5be..f7829a74140c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
821{ 821{
822 if (!client) 822 if (!client)
823 return; 823 return;
824 if (client->dev.of_node) 824
825 if (client->dev.of_node) {
825 of_node_clear_flag(client->dev.of_node, OF_POPULATED); 826 of_node_clear_flag(client->dev.of_node, OF_POPULATED);
827 of_node_put(client->dev.of_node);
828 }
829
826 if (ACPI_COMPANION(&client->dev)) 830 if (ACPI_COMPANION(&client->dev))
827 acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); 831 acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
828 device_unregister(&client->dev); 832 device_unregister(&client->dev);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 4bb9927afd01..a1082c04ac5c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
397 the underlying bus driver */ 397 the underlying bus driver */
398 break; 398 break;
399 case I2C_SMBUS_I2C_BLOCK_DATA: 399 case I2C_SMBUS_I2C_BLOCK_DATA:
400 if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
401 dev_err(&adapter->dev, "Invalid block %s size %d\n",
402 read_write == I2C_SMBUS_READ ? "read" : "write",
403 data->block[0]);
404 return -EINVAL;
405 }
406
400 if (read_write == I2C_SMBUS_READ) { 407 if (read_write == I2C_SMBUS_READ) {
401 msg[1].len = data->block[0]; 408 msg[1].len = data->block[0];
402 } else { 409 } else {
403 msg[0].len = data->block[0] + 1; 410 msg[0].len = data->block[0] + 1;
404 if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
405 dev_err(&adapter->dev,
406 "Invalid block write size %d\n",
407 data->block[0]);
408 return -EINVAL;
409 }
410 for (i = 1; i <= data->block[0]; i++) 411 for (i = 1; i <= data->block[0]; i++)
411 msgbuf0[i] = data->block[i]; 412 msgbuf0[i] = data->block[i];
412 } 413 }
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 3576ec73ec23..9ad60421d360 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -1011,7 +1011,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
1011 platform_set_drvdata(pdev, indio_dev); 1011 platform_set_drvdata(pdev, indio_dev);
1012 1012
1013 ddata->irq = platform_get_irq_byname(pdev, "adcdone"); 1013 ddata->irq = platform_get_irq_byname(pdev, "adcdone");
1014 if (!ddata->irq) 1014 if (ddata->irq < 0)
1015 return -ENODEV; 1015 return -ENODEV;
1016 1016
1017 error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL, 1017 error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 9c6932ffc0af..36047147ce7c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
221 221
222struct meson_sar_adc_data { 222struct meson_sar_adc_data {
223 bool has_bl30_integration; 223 bool has_bl30_integration;
224 u32 bandgap_reg;
224 unsigned int resolution; 225 unsigned int resolution;
225 const char *name; 226 const char *name;
227 const struct regmap_config *regmap_config;
226}; 228};
227 229
228struct meson_sar_adc_priv { 230struct meson_sar_adc_priv {
@@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
242 int calibscale; 244 int calibscale;
243}; 245};
244 246
245static const struct regmap_config meson_sar_adc_regmap_config = { 247static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
246 .reg_bits = 8, 248 .reg_bits = 8,
247 .val_bits = 32, 249 .val_bits = 32,
248 .reg_stride = 4, 250 .reg_stride = 4,
249 .max_register = MESON_SAR_ADC_REG13, 251 .max_register = MESON_SAR_ADC_REG13,
250}; 252};
251 253
254static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
255 .reg_bits = 8,
256 .val_bits = 32,
257 .reg_stride = 4,
258 .max_register = MESON_SAR_ADC_DELTA_10,
259};
260
252static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev) 261static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
253{ 262{
254 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 263 struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
600 init.num_parents = 1; 609 init.num_parents = 1;
601 610
602 priv->clk_gate.reg = base + MESON_SAR_ADC_REG3; 611 priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
603 priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN); 612 priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
604 priv->clk_gate.hw.init = &init; 613 priv->clk_gate.hw.init = &init;
605 614
606 priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw); 615 priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
@@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
685 return 0; 694 return 0;
686} 695}
687 696
697static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
698{
699 struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
700 u32 enable_mask;
701
702 if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
703 enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
704 else
705 enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
706
707 regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
708 on_off ? enable_mask : 0);
709}
710
688static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) 711static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
689{ 712{
690 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 713 struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
717 regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1); 740 regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
718 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, 741 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
719 MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval); 742 MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
720 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, 743
721 MESON_SAR_ADC_REG11_BANDGAP_EN, 744 meson_sar_adc_set_bandgap(indio_dev, true);
722 MESON_SAR_ADC_REG11_BANDGAP_EN); 745
723 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, 746 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
724 MESON_SAR_ADC_REG3_ADC_EN, 747 MESON_SAR_ADC_REG3_ADC_EN,
725 MESON_SAR_ADC_REG3_ADC_EN); 748 MESON_SAR_ADC_REG3_ADC_EN);
@@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
739err_adc_clk: 762err_adc_clk:
740 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, 763 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
741 MESON_SAR_ADC_REG3_ADC_EN, 0); 764 MESON_SAR_ADC_REG3_ADC_EN, 0);
742 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, 765 meson_sar_adc_set_bandgap(indio_dev, false);
743 MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
744 clk_disable_unprepare(priv->sana_clk); 766 clk_disable_unprepare(priv->sana_clk);
745err_sana_clk: 767err_sana_clk:
746 clk_disable_unprepare(priv->core_clk); 768 clk_disable_unprepare(priv->core_clk);
@@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
765 787
766 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, 788 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
767 MESON_SAR_ADC_REG3_ADC_EN, 0); 789 MESON_SAR_ADC_REG3_ADC_EN, 0);
768 regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, 790
769 MESON_SAR_ADC_REG11_BANDGAP_EN, 0); 791 meson_sar_adc_set_bandgap(indio_dev, false);
770 792
771 clk_disable_unprepare(priv->sana_clk); 793 clk_disable_unprepare(priv->sana_clk);
772 clk_disable_unprepare(priv->core_clk); 794 clk_disable_unprepare(priv->core_clk);
@@ -844,30 +866,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
844 866
845static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { 867static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
846 .has_bl30_integration = false, 868 .has_bl30_integration = false,
869 .bandgap_reg = MESON_SAR_ADC_DELTA_10,
870 .regmap_config = &meson_sar_adc_regmap_config_meson8,
847 .resolution = 10, 871 .resolution = 10,
848 .name = "meson-meson8-saradc", 872 .name = "meson-meson8-saradc",
849}; 873};
850 874
851static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { 875static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
852 .has_bl30_integration = false, 876 .has_bl30_integration = false,
877 .bandgap_reg = MESON_SAR_ADC_DELTA_10,
878 .regmap_config = &meson_sar_adc_regmap_config_meson8,
853 .resolution = 10, 879 .resolution = 10,
854 .name = "meson-meson8b-saradc", 880 .name = "meson-meson8b-saradc",
855}; 881};
856 882
857static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { 883static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
858 .has_bl30_integration = true, 884 .has_bl30_integration = true,
885 .bandgap_reg = MESON_SAR_ADC_REG11,
886 .regmap_config = &meson_sar_adc_regmap_config_gxbb,
859 .resolution = 10, 887 .resolution = 10,
860 .name = "meson-gxbb-saradc", 888 .name = "meson-gxbb-saradc",
861}; 889};
862 890
863static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { 891static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
864 .has_bl30_integration = true, 892 .has_bl30_integration = true,
893 .bandgap_reg = MESON_SAR_ADC_REG11,
894 .regmap_config = &meson_sar_adc_regmap_config_gxbb,
865 .resolution = 12, 895 .resolution = 12,
866 .name = "meson-gxl-saradc", 896 .name = "meson-gxl-saradc",
867}; 897};
868 898
869static const struct meson_sar_adc_data meson_sar_adc_gxm_data = { 899static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
870 .has_bl30_integration = true, 900 .has_bl30_integration = true,
901 .bandgap_reg = MESON_SAR_ADC_REG11,
902 .regmap_config = &meson_sar_adc_regmap_config_gxbb,
871 .resolution = 12, 903 .resolution = 12,
872 .name = "meson-gxm-saradc", 904 .name = "meson-gxm-saradc",
873}; 905};
@@ -945,7 +977,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
945 return ret; 977 return ret;
946 978
947 priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, 979 priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
948 &meson_sar_adc_regmap_config); 980 priv->data->regmap_config);
949 if (IS_ERR(priv->regmap)) 981 if (IS_ERR(priv->regmap))
950 return PTR_ERR(priv->regmap); 982 return PTR_ERR(priv->regmap);
951 983
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 203ffb9cad6a..147a8c14235f 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
371 mutex_unlock(&indio_dev->mlock); 371 mutex_unlock(&indio_dev->mlock);
372 break; 372 break;
373 case IIO_CHAN_INFO_SCALE: 373 case IIO_CHAN_INFO_SCALE:
374 *val = 1; /* 0.0625 */ 374 *val = 1000; /* 62.5 */
375 *val2 = 16; 375 *val2 = 16;
376 ret = IIO_VAL_FRACTIONAL; 376 ret = IIO_VAL_FRACTIONAL;
377 break; 377 break;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9c4cfd19b739..2f0998ebeed2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
631 * iio_format_value() - Formats a IIO value into its string representation 631 * iio_format_value() - Formats a IIO value into its string representation
632 * @buf: The buffer to which the formatted value gets written 632 * @buf: The buffer to which the formatted value gets written
633 * which is assumed to be big enough (i.e. PAGE_SIZE). 633 * which is assumed to be big enough (i.e. PAGE_SIZE).
634 * @type: One of the IIO_VAL_... constants. This decides how the val 634 * @type: One of the IIO_VAL_* constants. This decides how the val
635 * and val2 parameters are formatted. 635 * and val2 parameters are formatted.
636 * @size: Number of IIO value entries contained in vals 636 * @size: Number of IIO value entries contained in vals
637 * @vals: Pointer to the values, exact meaning depends on the 637 * @vals: Pointer to the values, exact meaning depends on the
@@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
639 * 639 *
640 * Return: 0 by default, a negative number on failure or the 640 * Return: 0 by default, a negative number on failure or the
641 * total number of characters written for a type that belongs 641 * total number of characters written for a type that belongs
642 * to the IIO_VAL_... constant. 642 * to the IIO_VAL_* constant.
643 */ 643 */
644ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 644ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
645{ 645{
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 53c5d653e780..df23dbcc030a 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -869,6 +869,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev)
869static void sx9500_gpio_probe(struct i2c_client *client, 869static void sx9500_gpio_probe(struct i2c_client *client,
870 struct sx9500_data *data) 870 struct sx9500_data *data)
871{ 871{
872 struct gpio_desc *gpiod_int;
872 struct device *dev; 873 struct device *dev;
873 874
874 if (!client) 875 if (!client)
@@ -876,6 +877,14 @@ static void sx9500_gpio_probe(struct i2c_client *client,
876 877
877 dev = &client->dev; 878 dev = &client->dev;
878 879
880 if (client->irq <= 0) {
881 gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN);
882 if (IS_ERR(gpiod_int))
883 dev_err(dev, "gpio get irq failed\n");
884 else
885 client->irq = gpiod_to_irq(gpiod_int);
886 }
887
879 data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH); 888 data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
880 if (IS_ERR(data->gpiod_rst)) { 889 if (IS_ERR(data->gpiod_rst)) {
881 dev_warn(dev, "gpio get reset pin failed\n"); 890 dev_warn(dev, "gpio get reset pin failed\n");
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c38b06..1df7da47f431 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
763 } 763 }
764 764
765 if (ret) { 765 if (ret) {
766 hfi1_rcd_put(fd->uctxt);
767 fd->uctxt = NULL;
768 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); 766 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
769 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); 767 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
770 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); 768 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
769 hfi1_rcd_put(fd->uctxt);
770 fd->uctxt = NULL;
771 } 771 }
772 772
773 return ret; 773 return ret;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5007280321b6..4236c8086820 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1543,7 +1543,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1543 return err; 1543 return err;
1544 1544
1545 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1545 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1546 !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) 1546 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1547 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1547 return err; 1548 return err;
1548 1549
1549 mutex_lock(&dev->lb_mutex); 1550 mutex_lock(&dev->lb_mutex);
@@ -1561,7 +1562,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1561 mlx5_core_dealloc_transport_domain(dev->mdev, tdn); 1562 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1562 1563
1563 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1564 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1564 !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) 1565 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1566 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1565 return; 1567 return;
1566 1568
1567 mutex_lock(&dev->lb_mutex); 1569 mutex_lock(&dev->lb_mutex);
@@ -4711,7 +4713,8 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
4711 return err; 4713 return err;
4712 4714
4713 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 4715 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4714 MLX5_CAP_GEN(dev->mdev, disable_local_lb)) 4716 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4717 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4715 mutex_init(&dev->lb_mutex); 4718 mutex_init(&dev->lb_mutex);
4716 4719
4717 return 0; 4720 return 0;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d5cc95ffd4ac..39d24bf694a8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4674,12 +4674,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
4674 4674
4675 memset(ah_attr, 0, sizeof(*ah_attr)); 4675 memset(ah_attr, 0, sizeof(*ah_attr));
4676 4676
4677 ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); 4677 if (!path->port || path->port > ibdev->num_ports)
4678 rdma_ah_set_port_num(ah_attr, path->port);
4679 if (rdma_ah_get_port_num(ah_attr) == 0 ||
4680 rdma_ah_get_port_num(ah_attr) > ibdev->num_ports)
4681 return; 4678 return;
4682 4679
4680 ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
4681
4683 rdma_ah_set_port_num(ah_attr, path->port); 4682 rdma_ah_set_port_num(ah_attr, path->port);
4684 rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); 4683 rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
4685 4684
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index dfbb8fdda5f6..962fbcb57dc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1458,8 +1458,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1458 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1458 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1459 int e = skb_queue_empty(&priv->cm.skb_queue); 1459 int e = skb_queue_empty(&priv->cm.skb_queue);
1460 1460
1461 if (skb_dst(skb)) 1461 skb_dst_update_pmtu(skb, mtu);
1462 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1463 1462
1464 skb_queue_tail(&priv->cm.skb_queue, skb); 1463 skb_queue_tail(&priv->cm.skb_queue, skb);
1465 if (e) 1464 if (e)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index fd55163801a3..fff40b097947 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
741{ 741{
742 struct isert_conn *isert_conn = cma_id->qp->qp_context; 742 struct isert_conn *isert_conn = cma_id->qp->qp_context;
743 743
744 ib_drain_qp(isert_conn->qp);
744 list_del_init(&isert_conn->node); 745 list_del_init(&isert_conn->node);
745 isert_conn->cm_id = NULL; 746 isert_conn->cm_id = NULL;
746 isert_put_conn(isert_conn); 747 isert_put_conn(isert_conn);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) 163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
164#define DELTA(x,y) ((y)-(x)) 164#define DELTA(x,y) ((y)-(x))
165#define TIME_NAME "TSC" 165#define TIME_NAME "TSC"
166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) 166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
167#define GET_TIME(x) do { x = get_cycles(); } while (0) 167#define GET_TIME(x) do { x = get_cycles(); } while (0)
168#define DELTA(x,y) ((y)-(x)) 168#define DELTA(x,y) ((y)-(x))
169#define TIME_NAME "get_cycles" 169#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d86e59515b9c..d88d3e0f59fb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,6 +229,7 @@ static const struct xpad_device {
229 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 229 { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
230 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, 230 { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, 231 { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
232 { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
232 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, 233 { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
233 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, 234 { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
234 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, 235 { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -476,6 +477,22 @@ static const u8 xboxone_hori_init[] = {
476}; 477};
477 478
478/* 479/*
480 * This packet is required for some of the PDP pads to start
481 * sending input reports. One of those pads is (0x0e6f:0x02ab).
482 */
483static const u8 xboxone_pdp_init1[] = {
484 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
485};
486
487/*
488 * This packet is required for some of the PDP pads to start
489 * sending input reports. One of those pads is (0x0e6f:0x02ab).
490 */
491static const u8 xboxone_pdp_init2[] = {
492 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
493};
494
495/*
479 * A specific rumble packet is required for some PowerA pads to start 496 * A specific rumble packet is required for some PowerA pads to start
480 * sending input reports. One of those pads is (0x24c6:0x543a). 497 * sending input reports. One of those pads is (0x24c6:0x543a).
481 */ 498 */
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
505 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), 522 XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
506 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), 523 XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
507 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), 524 XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
525 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
526 XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
508 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), 527 XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
509 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), 528 XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
510 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), 529 XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1651 return union_desc; 1651 return union_desc;
1652 1652
1653 dev_err(&intf->dev, 1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)", 1654 "Union descriptor too short (%d vs %zd)\n",
1655 union_desc->bLength, sizeof(*union_desc)); 1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL; 1656 return NULL;
1657 } 1657 }
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6c51d404874b..c37aea9ac272 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
178 twl4030_vibra_suspend, twl4030_vibra_resume); 178 twl4030_vibra_suspend, twl4030_vibra_resume);
179 179
180static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, 180static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
181 struct device_node *node) 181 struct device_node *parent)
182{ 182{
183 struct device_node *node;
184
183 if (pdata && pdata->coexist) 185 if (pdata && pdata->coexist)
184 return true; 186 return true;
185 187
186 node = of_find_node_by_name(node, "codec"); 188 node = of_get_child_by_name(parent, "codec");
187 if (node) { 189 if (node) {
188 of_node_put(node); 190 of_node_put(node);
189 return true; 191 return true;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 5690eb7ff954..15e0d352c4cc 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
248 int vddvibr_uV = 0; 248 int vddvibr_uV = 0;
249 int error; 249 int error;
250 250
251 of_node_get(twl6040_core_dev->of_node); 251 twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
252 twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
253 "vibra"); 252 "vibra");
254 if (!twl6040_core_node) { 253 if (!twl6040_core_node) {
255 dev_err(&pdev->dev, "parent of node is missing?\n"); 254 dev_err(&pdev->dev, "parent of node is missing?\n");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
326 0, width, 0, 0); 326 0, width, 0, 0);
327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
328 0, height, 0, 0); 328 0, height, 0, 0);
329 input_set_abs_params(mtouch, ABS_MT_PRESSURE,
330 0, 255, 0, 0);
331 329
332 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); 330 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
333 if (ret) { 331 if (ret) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 579b899add26..dbe57da8c1a1 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1250 case SS4_PACKET_ID_MULTI: 1250 case SS4_PACKET_ID_MULTI:
1251 if (priv->flags & ALPS_BUTTONPAD) { 1251 if (priv->flags & ALPS_BUTTONPAD) {
1252 if (IS_SS4PLUS_DEV(priv->dev_id)) { 1252 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1253 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); 1253 f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1254 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); 1254 f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1255 no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
1255 } else { 1256 } else {
1256 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); 1257 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1257 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); 1258 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1259 no_data_x = SS4_MFPACKET_NO_AX_BL;
1258 } 1260 }
1261 no_data_y = SS4_MFPACKET_NO_AY_BL;
1259 1262
1260 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); 1263 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
1261 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); 1264 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
1262 no_data_x = SS4_MFPACKET_NO_AX_BL;
1263 no_data_y = SS4_MFPACKET_NO_AY_BL;
1264 } else { 1265 } else {
1265 if (IS_SS4PLUS_DEV(priv->dev_id)) { 1266 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1266 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); 1267 f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1267 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); 1268 f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1269 no_data_x = SS4_PLUS_MFPACKET_NO_AX;
1268 } else { 1270 } else {
1269 f->mt[0].x = SS4_STD_MF_X_V2(p, 0); 1271 f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
1270 f->mt[1].x = SS4_STD_MF_X_V2(p, 1); 1272 f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
1273 no_data_x = SS4_MFPACKET_NO_AX;
1271 } 1274 }
1275 no_data_y = SS4_MFPACKET_NO_AY;
1276
1272 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); 1277 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
1273 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); 1278 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
1274 no_data_x = SS4_MFPACKET_NO_AX;
1275 no_data_y = SS4_MFPACKET_NO_AY;
1276 } 1279 }
1277 1280
1278 f->first_mp = 0; 1281 f->first_mp = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index c80a7c76cb76..79b6d69d1486 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
141#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F) 141#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
142 142
143 143
144#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ 144#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
145#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ 145#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
146#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */ 146#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
147#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */ 147#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
148#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
149#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
148 150
149/* 151/*
150 * enum V7_PACKET_ID - defines the packet type for V7 152 * enum V7_PACKET_ID - defines the packet type for V7
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1613 case 5: 1613 case 5:
1614 etd->hw_version = 3; 1614 etd->hw_version = 3;
1615 break; 1615 break;
1616 case 6 ... 14: 1616 case 6 ... 15:
1617 etd->hw_version = 4; 1617 etd->hw_version = 4;
1618 break; 1618 break;
1619 default: 1619 default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ee5466a374bf..cd9f61cb3fc6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
173 "LEN0046", /* X250 */ 173 "LEN0046", /* X250 */
174 "LEN004a", /* W541 */ 174 "LEN004a", /* W541 */
175 "LEN200f", /* T450s */ 175 "LEN200f", /* T450s */
176 "LEN2018", /* T460p */
176 NULL 177 NULL
177}; 178};
178 179
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0871010f18d5..bbd29220dbe9 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -19,6 +19,13 @@
19#include "psmouse.h" 19#include "psmouse.h"
20#include "trackpoint.h" 20#include "trackpoint.h"
21 21
22static const char * const trackpoint_variants[] = {
23 [TP_VARIANT_IBM] = "IBM",
24 [TP_VARIANT_ALPS] = "ALPS",
25 [TP_VARIANT_ELAN] = "Elan",
26 [TP_VARIANT_NXP] = "NXP",
27};
28
22/* 29/*
23 * Power-on Reset: Resets all trackpoint parameters, including RAM values, 30 * Power-on Reset: Resets all trackpoint parameters, including RAM values,
24 * to defaults. 31 * to defaults.
@@ -26,7 +33,7 @@
26 */ 33 */
27static int trackpoint_power_on_reset(struct ps2dev *ps2dev) 34static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
28{ 35{
29 unsigned char results[2]; 36 u8 results[2];
30 int tries = 0; 37 int tries = 0;
31 38
32 /* Issue POR command, and repeat up to once if 0xFC00 received */ 39 /* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
38 45
39 /* Check for success response -- 0xAA00 */ 46 /* Check for success response -- 0xAA00 */
40 if (results[0] != 0xAA || results[1] != 0x00) 47 if (results[0] != 0xAA || results[1] != 0x00)
41 return -1; 48 return -ENODEV;
42 49
43 return 0; 50 return 0;
44} 51}
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
46/* 53/*
47 * Device IO: read, write and toggle bit 54 * Device IO: read, write and toggle bit
48 */ 55 */
49static int trackpoint_read(struct ps2dev *ps2dev, 56static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
50 unsigned char loc, unsigned char *results)
51{ 57{
52 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || 58 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
53 ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) { 59 ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
57 return 0; 63 return 0;
58} 64}
59 65
60static int trackpoint_write(struct ps2dev *ps2dev, 66static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
61 unsigned char loc, unsigned char val)
62{ 67{
63 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || 68 if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
64 ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) || 69 ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
70 return 0; 75 return 0;
71} 76}
72 77
73static int trackpoint_toggle_bit(struct ps2dev *ps2dev, 78static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
74 unsigned char loc, unsigned char mask)
75{ 79{
76 /* Bad things will happen if the loc param isn't in this range */ 80 /* Bad things will happen if the loc param isn't in this range */
77 if (loc < 0x20 || loc >= 0x2F) 81 if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
87 return 0; 91 return 0;
88} 92}
89 93
90static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, 94static int trackpoint_update_bit(struct ps2dev *ps2dev,
91 unsigned char mask, unsigned char value) 95 u8 loc, u8 mask, u8 value)
92{ 96{
93 int retval = 0; 97 int retval = 0;
94 unsigned char data; 98 u8 data;
95 99
96 trackpoint_read(ps2dev, loc, &data); 100 trackpoint_read(ps2dev, loc, &data);
97 if (((data & mask) == mask) != !!value) 101 if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
105 */ 109 */
106struct trackpoint_attr_data { 110struct trackpoint_attr_data {
107 size_t field_offset; 111 size_t field_offset;
108 unsigned char command; 112 u8 command;
109 unsigned char mask; 113 u8 mask;
110 unsigned char inverted; 114 bool inverted;
111 unsigned char power_on_default; 115 u8 power_on_default;
112}; 116};
113 117
114static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) 118static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
119 void *data, char *buf)
115{ 120{
116 struct trackpoint_data *tp = psmouse->private; 121 struct trackpoint_data *tp = psmouse->private;
117 struct trackpoint_attr_data *attr = data; 122 struct trackpoint_attr_data *attr = data;
118 unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); 123 u8 value = *(u8 *)((void *)tp + attr->field_offset);
119 124
120 if (attr->inverted) 125 if (attr->inverted)
121 value = !value; 126 value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
128{ 133{
129 struct trackpoint_data *tp = psmouse->private; 134 struct trackpoint_data *tp = psmouse->private;
130 struct trackpoint_attr_data *attr = data; 135 struct trackpoint_attr_data *attr = data;
131 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 136 u8 *field = (void *)tp + attr->field_offset;
132 unsigned char value; 137 u8 value;
133 int err; 138 int err;
134 139
135 err = kstrtou8(buf, 10, &value); 140 err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
157{ 162{
158 struct trackpoint_data *tp = psmouse->private; 163 struct trackpoint_data *tp = psmouse->private;
159 struct trackpoint_attr_data *attr = data; 164 struct trackpoint_attr_data *attr = data;
160 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 165 bool *field = (void *)tp + attr->field_offset;
161 unsigned int value; 166 bool value;
162 int err; 167 int err;
163 168
164 err = kstrtouint(buf, 10, &value); 169 err = kstrtobool(buf, &value);
165 if (err) 170 if (err)
166 return err; 171 return err;
167 172
168 if (value > 1)
169 return -EINVAL;
170
171 if (attr->inverted) 173 if (attr->inverted)
172 value = !value; 174 value = !value;
173 175
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
193 &trackpoint_attr_##_name, \ 195 &trackpoint_attr_##_name, \
194 trackpoint_show_int_attr, trackpoint_set_bit_attr) 196 trackpoint_show_int_attr, trackpoint_set_bit_attr)
195 197
196#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
197do { \
198 struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
199 \
200 trackpoint_update_bit(&_psmouse->ps2dev, \
201 _attr->command, _attr->mask, _tp->_name); \
202} while (0)
203
204#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
205do { \
206 if (!_power_on || \
207 _tp->_name != trackpoint_attr_##_name.power_on_default) { \
208 if (!trackpoint_attr_##_name.mask) \
209 trackpoint_write(&_psmouse->ps2dev, \
210 trackpoint_attr_##_name.command, \
211 _tp->_name); \
212 else \
213 TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
214 } \
215} while (0)
216
217#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
218 (_tp->_name = trackpoint_attr_##_name.power_on_default)
219
220TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS); 198TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
221TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED); 199TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
222TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA); 200TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
229TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); 207TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
230TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); 208TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
231 209
232TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, 210TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
233 TP_DEF_PTSON); 211 TP_DEF_PTSON);
234TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0, 212TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
235 TP_DEF_SKIPBACK); 213 TP_DEF_SKIPBACK);
236TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1, 214TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
237 TP_DEF_EXT_DEV); 215 TP_DEF_EXT_DEV);
238 216
217static bool trackpoint_is_attr_available(struct psmouse *psmouse,
218 struct attribute *attr)
219{
220 struct trackpoint_data *tp = psmouse->private;
221
222 return tp->variant_id == TP_VARIANT_IBM ||
223 attr == &psmouse_attr_sensitivity.dattr.attr ||
224 attr == &psmouse_attr_press_to_select.dattr.attr;
225}
226
227static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
228 struct attribute *attr, int n)
229{
230 struct device *dev = container_of(kobj, struct device, kobj);
231 struct serio *serio = to_serio_port(dev);
232 struct psmouse *psmouse = serio_get_drvdata(serio);
233
234 return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
235}
236
239static struct attribute *trackpoint_attrs[] = { 237static struct attribute *trackpoint_attrs[] = {
240 &psmouse_attr_sensitivity.dattr.attr, 238 &psmouse_attr_sensitivity.dattr.attr,
241 &psmouse_attr_speed.dattr.attr, 239 &psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
255}; 253};
256 254
257static struct attribute_group trackpoint_attr_group = { 255static struct attribute_group trackpoint_attr_group = {
258 .attrs = trackpoint_attrs, 256 .is_visible = trackpoint_is_attr_visible,
257 .attrs = trackpoint_attrs,
259}; 258};
260 259
261static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) 260#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
262{ 261do { \
263 unsigned char param[2] = { 0 }; 262 struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
263 \
264 if ((!_power_on || _tp->_name != _attr->power_on_default) && \
265 trackpoint_is_attr_available(_psmouse, \
266 &psmouse_attr_##_name.dattr.attr)) { \
267 if (!_attr->mask) \
268 trackpoint_write(&_psmouse->ps2dev, \
269 _attr->command, _tp->_name); \
270 else \
271 trackpoint_update_bit(&_psmouse->ps2dev, \
272 _attr->command, _attr->mask, \
273 _tp->_name); \
274 } \
275} while (0)
264 276
265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 277#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
266 return -1; 278do { \
279 _tp->_name = trackpoint_attr_##_name.power_on_default; \
280} while (0)
267 281
268 /* add new TP ID. */ 282static int trackpoint_start_protocol(struct psmouse *psmouse,
269 if (!(param[0] & TP_MAGIC_IDENT)) 283 u8 *variant_id, u8 *firmware_id)
270 return -1; 284{
285 u8 param[2] = { 0 };
286 int error;
271 287
272 if (firmware_id) 288 error = ps2_command(&psmouse->ps2dev,
273 *firmware_id = param[1]; 289 param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
290 if (error)
291 return error;
292
293 switch (param[0]) {
294 case TP_VARIANT_IBM:
295 case TP_VARIANT_ALPS:
296 case TP_VARIANT_ELAN:
297 case TP_VARIANT_NXP:
298 if (variant_id)
299 *variant_id = param[0];
300 if (firmware_id)
301 *firmware_id = param[1];
302 return 0;
303 }
274 304
275 return 0; 305 return -ENODEV;
276} 306}
277 307
278/* 308/*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
285{ 315{
286 struct trackpoint_data *tp = psmouse->private; 316 struct trackpoint_data *tp = psmouse->private;
287 317
288 if (!in_power_on_state) { 318 if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
289 /* 319 /*
290 * Disable features that may make device unusable 320 * Disable features that may make device unusable
291 * with this driver. 321 * with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
347 377
348static void trackpoint_disconnect(struct psmouse *psmouse) 378static void trackpoint_disconnect(struct psmouse *psmouse)
349{ 379{
350 sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); 380 device_remove_group(&psmouse->ps2dev.serio->dev,
381 &trackpoint_attr_group);
351 382
352 kfree(psmouse->private); 383 kfree(psmouse->private);
353 psmouse->private = NULL; 384 psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
355 386
356static int trackpoint_reconnect(struct psmouse *psmouse) 387static int trackpoint_reconnect(struct psmouse *psmouse)
357{ 388{
358 int reset_fail; 389 struct trackpoint_data *tp = psmouse->private;
390 int error;
391 bool was_reset;
359 392
360 if (trackpoint_start_protocol(psmouse, NULL)) 393 error = trackpoint_start_protocol(psmouse, NULL, NULL);
361 return -1; 394 if (error)
395 return error;
362 396
363 reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev); 397 was_reset = tp->variant_id == TP_VARIANT_IBM &&
364 if (trackpoint_sync(psmouse, !reset_fail)) 398 trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
365 return -1; 399
400 error = trackpoint_sync(psmouse, was_reset);
401 if (error)
402 return error;
366 403
367 return 0; 404 return 0;
368} 405}
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
370int trackpoint_detect(struct psmouse *psmouse, bool set_properties) 407int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
371{ 408{
372 struct ps2dev *ps2dev = &psmouse->ps2dev; 409 struct ps2dev *ps2dev = &psmouse->ps2dev;
373 unsigned char firmware_id; 410 struct trackpoint_data *tp;
374 unsigned char button_info; 411 u8 variant_id;
412 u8 firmware_id;
413 u8 button_info;
375 int error; 414 int error;
376 415
377 if (trackpoint_start_protocol(psmouse, &firmware_id)) 416 error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
378 return -1; 417 if (error)
418 return error;
379 419
380 if (!set_properties) 420 if (!set_properties)
381 return 0; 421 return 0;
382 422
383 if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { 423 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
384 psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); 424 if (!tp)
385 button_info = 0x33;
386 }
387
388 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
389 if (!psmouse->private)
390 return -ENOMEM; 425 return -ENOMEM;
391 426
392 psmouse->vendor = "IBM"; 427 trackpoint_defaults(tp);
428 tp->variant_id = variant_id;
429 tp->firmware_id = firmware_id;
430
431 psmouse->private = tp;
432
433 psmouse->vendor = trackpoint_variants[variant_id];
393 psmouse->name = "TrackPoint"; 434 psmouse->name = "TrackPoint";
394 435
395 psmouse->reconnect = trackpoint_reconnect; 436 psmouse->reconnect = trackpoint_reconnect;
396 psmouse->disconnect = trackpoint_disconnect; 437 psmouse->disconnect = trackpoint_disconnect;
397 438
439 if (variant_id != TP_VARIANT_IBM) {
440 /* Newer variants do not support extended button query. */
441 button_info = 0x33;
442 } else {
443 error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
444 if (error) {
445 psmouse_warn(psmouse,
446 "failed to get extended button data, assuming 3 buttons\n");
447 button_info = 0x33;
448 } else if (!button_info) {
449 psmouse_warn(psmouse,
450 "got 0 in extended button data, assuming 3 buttons\n");
451 button_info = 0x33;
452 }
453 }
454
398 if ((button_info & 0x0f) >= 3) 455 if ((button_info & 0x0f) >= 3)
399 __set_bit(BTN_MIDDLE, psmouse->dev->keybit); 456 input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
400 457
401 __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); 458 __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
402 __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); 459 __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
403 460
404 trackpoint_defaults(psmouse->private); 461 if (variant_id != TP_VARIANT_IBM ||
405 462 trackpoint_power_on_reset(ps2dev) != 0) {
406 error = trackpoint_power_on_reset(ps2dev); 463 /*
407 464 * Write defaults to TP if we did not reset the trackpoint.
408 /* Write defaults to TP only if reset fails. */ 465 */
409 if (error)
410 trackpoint_sync(psmouse, false); 466 trackpoint_sync(psmouse, false);
467 }
411 468
412 error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); 469 error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
413 if (error) { 470 if (error) {
414 psmouse_err(psmouse, 471 psmouse_err(psmouse,
415 "failed to create sysfs attributes, error: %d\n", 472 "failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
420 } 477 }
421 478
422 psmouse_info(psmouse, 479 psmouse_info(psmouse,
423 "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", 480 "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
424 firmware_id, 481 psmouse->vendor, firmware_id,
425 (button_info & 0xf0) >> 4, button_info & 0x0f); 482 (button_info & 0xf0) >> 4, button_info & 0x0f);
426 483
427 return 0; 484 return 0;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 88055755f82e..10a039148234 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,10 +21,16 @@
21#define TP_COMMAND 0xE2 /* Commands start with this */ 21#define TP_COMMAND 0xE2 /* Commands start with this */
22 22
23#define TP_READ_ID 0xE1 /* Sent for device identification */ 23#define TP_READ_ID 0xE1 /* Sent for device identification */
24#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
25 /* by the firmware ID */
26 /* Firmware ID includes 0x1, 0x2, 0x3 */
27 24
25/*
26 * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
27 * 0x01 was the original IBM trackpoint, others implement very limited
28 * subset of trackpoint features.
29 */
30#define TP_VARIANT_IBM 0x01
31#define TP_VARIANT_ALPS 0x02
32#define TP_VARIANT_ELAN 0x03
33#define TP_VARIANT_NXP 0x04
28 34
29/* 35/*
30 * Commands 36 * Commands
@@ -136,18 +142,20 @@
136 142
137#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) 143#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
138 144
139struct trackpoint_data 145struct trackpoint_data {
140{ 146 u8 variant_id;
141 unsigned char sensitivity, speed, inertia, reach; 147 u8 firmware_id;
142 unsigned char draghys, mindrag; 148
143 unsigned char thresh, upthresh; 149 u8 sensitivity, speed, inertia, reach;
144 unsigned char ztime, jenks; 150 u8 draghys, mindrag;
145 unsigned char drift_time; 151 u8 thresh, upthresh;
152 u8 ztime, jenks;
153 u8 drift_time;
146 154
147 /* toggles */ 155 /* toggles */
148 unsigned char press_to_select; 156 bool press_to_select;
149 unsigned char skipback; 157 bool skipback;
150 unsigned char ext_dev; 158 bool ext_dev;
151}; 159};
152 160
153#ifdef CONFIG_MOUSE_PS2_TRACKPOINT 161#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4f2bb5947a4e..141ea228aac6 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, 230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
231 "Failed to process interrupt request: %d\n", ret); 231 "Failed to process interrupt request: %d\n", ret);
232 232
233 if (count) 233 if (count) {
234 kfree(attn_data.data); 234 kfree(attn_data.data);
235 attn_data.data = NULL;
236 }
235 237
236 if (!kfifo_is_empty(&drvdata->attn_fifo)) 238 if (!kfifo_is_empty(&drvdata->attn_fifo))
237 return rmi_irq_fn(irq, dev_id); 239 return rmi_irq_fn(irq, dev_id);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index ae966e333a2f..8a07ae147df6 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
570 570
571 dev_set_drvdata(&fn->dev, f01); 571 dev_set_drvdata(&fn->dev, f01);
572 572
573 error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group); 573 error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
574 if (error) 574 if (error)
575 dev_warn(&fn->dev, 575 dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
576 "Failed to create attribute group: %d\n", error);
577 576
578 return 0; 577 return 0;
579} 578}
580 579
580static void rmi_f01_remove(struct rmi_function *fn)
581{
582 /* Note that the bus device is used, not the F01 device */
583 sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
584}
585
581static int rmi_f01_config(struct rmi_function *fn) 586static int rmi_f01_config(struct rmi_function *fn)
582{ 587{
583 struct f01_data *f01 = dev_get_drvdata(&fn->dev); 588 struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
717 }, 722 },
718 .func = 0x01, 723 .func = 0x01,
719 .probe = rmi_f01_probe, 724 .probe = rmi_f01_probe,
725 .remove = rmi_f01_remove,
720 .config = rmi_f01_config, 726 .config = rmi_f01_config,
721 .attention = rmi_f01_attention, 727 .attention = rmi_f01_attention,
722 .suspend = rmi_f01_suspend, 728 .suspend = rmi_f01_suspend,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 7ed828a51f4c..3486d9403805 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
126 int data, n, ret; 126 int data, n, ret;
127 if (!np) 127 if (!np)
128 return -ENODEV; 128 return -ENODEV;
129 np = of_find_node_by_name(np, "touch"); 129 np = of_get_child_by_name(np, "touch");
130 if (!np) { 130 if (!np) {
131 dev_err(&pdev->dev, "Can't find touch node\n"); 131 dev_err(&pdev->dev, "Can't find touch node\n");
132 return -EINVAL; 132 return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
144 if (data) { 144 if (data) {
145 ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); 145 ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
146 if (ret < 0) 146 if (ret < 0)
147 return -EINVAL; 147 goto err_put_node;
148 } 148 }
149 /* set tsi prebias time */ 149 /* set tsi prebias time */
150 if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { 150 if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
151 ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); 151 ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
152 if (ret < 0) 152 if (ret < 0)
153 return -EINVAL; 153 goto err_put_node;
154 } 154 }
155 /* set prebias & prechg time of pen detect */ 155 /* set prebias & prechg time of pen detect */
156 data = 0; 156 data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
161 if (data) { 161 if (data) {
162 ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); 162 ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
163 if (ret < 0) 163 if (ret < 0)
164 return -EINVAL; 164 goto err_put_node;
165 } 165 }
166 of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); 166 of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
167
168 of_node_put(np);
169
167 return 0; 170 return 0;
171
172err_put_node:
173 of_node_put(np);
174
175 return -EINVAL;
168} 176}
169#else 177#else
170#define pm860x_touch_dt_init(x, y, z) (-1) 178#define pm860x_touch_dt_init(x, y, z) (-1)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..a458e5ec9e41 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/irq.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/async.h> 32#include <linux/async.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
1261 } 1262 }
1262 1263
1263 /* 1264 /*
1264 * Systems using device tree should set up interrupt via DTS, 1265 * Platform code (ACPI, DTS) should normally set up interrupt
1265 * the rest will use the default falling edge interrupts. 1266 * for us, but in case it did not let's fall back to using falling
1267 * edge to be compatible with older Chromebooks.
1266 */ 1268 */
1267 irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING; 1269 irqflags = irq_get_trigger_type(client->irq);
1270 if (!irqflags)
1271 irqflags = IRQF_TRIGGER_FALLING;
1268 1272
1269 error = devm_request_threaded_irq(&client->dev, client->irq, 1273 error = devm_request_threaded_irq(&client->dev, client->irq,
1270 NULL, elants_i2c_irq, 1274 NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
10#include <linux/of.h> 10#include <linux/of.h>
11#include <linux/firmware.h> 11#include <linux/firmware.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/gpio/machine.h>
15#include <linux/i2c.h> 14#include <linux/i2c.h>
16#include <linux/acpi.h> 15#include <linux/acpi.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 8d7f9c8f2771..9642f103b726 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -13,6 +13,7 @@
13#include <linux/input.h> 13#include <linux/input.h>
14#include <linux/input/mt.h> 14#include <linux/input/mt.h>
15#include <linux/input/touchscreen.h> 15#include <linux/input/touchscreen.h>
16#include <linux/module.h>
16 17
17static bool touchscreen_get_prop_u32(struct device *dev, 18static bool touchscreen_get_prop_u32(struct device *dev,
18 const char *property, 19 const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
185 input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y); 186 input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
186} 187}
187EXPORT_SYMBOL(touchscreen_report_pos); 188EXPORT_SYMBOL(touchscreen_report_pos);
189
190MODULE_LICENSE("GPL v2");
191MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 26b1cb8a88ec..675efa93d444 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -1,13 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd. 2// Samsung S6SY761 Touchscreen device driver
3 * Author: Andi Shyti <andi.shyti@samsung.com> 3//
4 * 4// Copyright (c) 2017 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Samsung S6SY761 Touchscreen device driver
10 */
11 6
12#include <asm/unaligned.h> 7#include <asm/unaligned.h>
13#include <linux/delay.h> 8#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c12d01899939..2a123e20a42e 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -1,13 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd. 2// STMicroelectronics FTS Touchscreen device driver
3 * Author: Andi Shyti <andi.shyti@samsung.com> 3//
4 * 4// Copyright (c) 2017 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * STMicroelectronics FTS Touchscreen device driver
10 */
11 6
12#include <linux/delay.h> 7#include <linux/delay.h>
13#include <linux/i2c.h> 8#include <linux/i2c.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb004091d..97baf88d9505 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4184 struct irq_cfg *cfg); 4184 struct irq_cfg *cfg);
4185 4185
4186static int irq_remapping_activate(struct irq_domain *domain, 4186static int irq_remapping_activate(struct irq_domain *domain,
4187 struct irq_data *irq_data, bool early) 4187 struct irq_data *irq_data, bool reserve)
4188{ 4188{
4189 struct amd_ir_data *data = irq_data->chip_data; 4189 struct amd_ir_data *data = irq_data->chip_data;
4190 struct irq_2_irte *irte_info = &data->irq_2_irte; 4190 struct irq_2_irte *irte_info = &data->irq_2_irte;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1699 domain->geometry.aperture_end = (1UL << ias) - 1; 1699 domain->geometry.aperture_end = (1UL << ias) - 1;
1700 domain->geometry.force_aperture = true; 1700 domain->geometry.force_aperture = true;
1701 smmu_domain->pgtbl_ops = pgtbl_ops;
1702 1701
1703 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1702 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1704 if (ret < 0) 1703 if (ret < 0) {
1705 free_io_pgtable_ops(pgtbl_ops); 1704 free_io_pgtable_ops(pgtbl_ops);
1705 return ret;
1706 }
1706 1707
1707 return ret; 1708 smmu_domain->pgtbl_ops = pgtbl_ops;
1709 return 0;
1708} 1710}
1709 1711
1710static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) 1712static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1731 1733
1732static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) 1734static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1733{ 1735{
1734 int i; 1736 int i, j;
1735 struct arm_smmu_master_data *master = fwspec->iommu_priv; 1737 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1736 struct arm_smmu_device *smmu = master->smmu; 1738 struct arm_smmu_device *smmu = master->smmu;
1737 1739
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1739 u32 sid = fwspec->ids[i]; 1741 u32 sid = fwspec->ids[i];
1740 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); 1742 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1741 1743
1744 /* Bridged PCI devices may end up with duplicated IDs */
1745 for (j = 0; j < i; j++)
1746 if (fwspec->ids[j] == sid)
1747 break;
1748 if (j < i)
1749 continue;
1750
1742 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); 1751 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1743 } 1752 }
1744} 1753}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a0babdbf7146..4a2de34895ec 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2250,10 +2250,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2250 uint64_t tmp; 2250 uint64_t tmp;
2251 2251
2252 if (!sg_res) { 2252 if (!sg_res) {
2253 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2254
2253 sg_res = aligned_nrpages(sg->offset, sg->length); 2255 sg_res = aligned_nrpages(sg->offset, sg->length);
2254 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2256 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2255 sg->dma_length = sg->length; 2257 sg->dma_length = sg->length;
2256 pteval = page_to_phys(sg_page(sg)) | prot; 2258 pteval = (sg_phys(sg) - pgoff) | prot;
2257 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2259 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2258 } 2260 }
2259 2261
@@ -3787,7 +3789,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3787 3789
3788 for_each_sg(sglist, sg, nelems, i) { 3790 for_each_sg(sglist, sg, nelems, i) {
3789 BUG_ON(!sg_page(sg)); 3791 BUG_ON(!sg_page(sg));
3790 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; 3792 sg->dma_address = sg_phys(sg);
3791 sg->dma_length = sg->length; 3793 sg->dma_length = sg->length;
3792 } 3794 }
3793 return nelems; 3795 return nelems;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 76a193c7fcfc..66f69af2c219 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
1397} 1397}
1398 1398
1399static int intel_irq_remapping_activate(struct irq_domain *domain, 1399static int intel_irq_remapping_activate(struct irq_domain *domain,
1400 struct irq_data *irq_data, bool early) 1400 struct irq_data *irq_data, bool reserve)
1401{ 1401{
1402 intel_ir_reconfigure_irte(irq_data, true); 1402 intel_ir_reconfigure_irte(irq_data, true);
1403 return 0; 1403 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4039e64cd342..06f025fd5726 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2303} 2303}
2304 2304
2305static int its_irq_domain_activate(struct irq_domain *domain, 2305static int its_irq_domain_activate(struct irq_domain *domain,
2306 struct irq_data *d, bool early) 2306 struct irq_data *d, bool reserve)
2307{ 2307{
2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2309 u32 event = its_get_event_id(d); 2309 u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
2818} 2818}
2819 2819
2820static int its_vpe_irq_domain_activate(struct irq_domain *domain, 2820static int its_vpe_irq_domain_activate(struct irq_domain *domain,
2821 struct irq_data *d, bool early) 2821 struct irq_data *d, bool reserve)
2822{ 2822{
2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2824 struct its_node *its; 2824 struct its_node *its;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 06f29cf5018a..cee59fe1321c 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
342 */ 342 */
343static struct lock_class_key intc_irqpin_irq_lock_class; 343static struct lock_class_key intc_irqpin_irq_lock_class;
344 344
345/* And this is for the request mutex */
346static struct lock_class_key intc_irqpin_irq_request_class;
347
345static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, 348static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
346 irq_hw_number_t hw) 349 irq_hw_number_t hw)
347{ 350{
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
352 355
353 intc_irqpin_dbg(&p->irq[hw], "map"); 356 intc_irqpin_dbg(&p->irq[hw], "map");
354 irq_set_chip_data(virq, h->host_data); 357 irq_set_chip_data(virq, h->host_data);
355 irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); 358 irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
359 &intc_irqpin_irq_request_class);
356 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 360 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
357 return 0; 361 return 0;
358} 362}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fd83c7f77a95..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev,
188{ 188{
189 del_timer_sync(&led_cdev->blink_timer); 189 del_timer_sync(&led_cdev->blink_timer);
190 190
191 clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
191 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); 192 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
192 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); 193 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
193 194
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b8ac591aaaa7..c546b567f3b5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1611 int l; 1611 int l;
1612 struct dm_buffer *b, *tmp; 1612 struct dm_buffer *b, *tmp;
1613 unsigned long freed = 0; 1613 unsigned long freed = 0;
1614 unsigned long count = nr_to_scan; 1614 unsigned long count = c->n_buffers[LIST_CLEAN] +
1615 c->n_buffers[LIST_DIRTY];
1615 unsigned long retain_target = get_retain_buffers(c); 1616 unsigned long retain_target = get_retain_buffers(c);
1616 1617
1617 for (l = 0; l < LIST_SIZE; l++) { 1618 for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
1647dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1648dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1648{ 1649{
1649 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); 1650 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1651 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1652 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1653 unsigned long retain_target = get_retain_buffers(c);
1650 1654
1651 return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); 1655 return (count < retain_target) ? 0 : (count - retain_target);
1652} 1656}
1653 1657
1654/* 1658/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14f9c6a..47407e43b96a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
3472{ 3472{
3473 int r; 3473 int r;
3474 3474
3475 r = dm_register_target(&cache_target);
3476 if (r) {
3477 DMERR("cache target registration failed: %d", r);
3478 return r;
3479 }
3480
3481 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3475 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3482 if (!migration_cache) { 3476 if (!migration_cache) {
3483 dm_unregister_target(&cache_target); 3477 dm_unregister_target(&cache_target);
3484 return -ENOMEM; 3478 return -ENOMEM;
3485 } 3479 }
3486 3480
3481 r = dm_register_target(&cache_target);
3482 if (r) {
3483 DMERR("cache target registration failed: %d", r);
3484 return r;
3485 }
3486
3487 return 0; 3487 return 0;
3488} 3488}
3489 3489
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..554d60394c06 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
1954 /* Ignore extra keys (which are used for IV etc) */ 1954 /* Ignore extra keys (which are used for IV etc) */
1955 subkey_size = crypt_subkey_size(cc); 1955 subkey_size = crypt_subkey_size(cc);
1956 1956
1957 if (crypt_integrity_hmac(cc)) 1957 if (crypt_integrity_hmac(cc)) {
1958 if (subkey_size < cc->key_mac_size)
1959 return -EINVAL;
1960
1958 crypt_copy_authenckey(cc->authenc_key, cc->key, 1961 crypt_copy_authenckey(cc->authenc_key, cc->key,
1959 subkey_size - cc->key_mac_size, 1962 subkey_size - cc->key_mac_size,
1960 cc->key_mac_size); 1963 cc->key_mac_size);
1964 }
1965
1961 for (i = 0; i < cc->tfms_count; i++) { 1966 for (i = 0; i < cc->tfms_count; i++) {
1962 if (crypt_integrity_hmac(cc)) 1967 if (crypt_integrity_hmac(cc))
1963 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
2053 2058
2054 ret = crypt_setkey(cc); 2059 ret = crypt_setkey(cc);
2055 2060
2056 /* wipe the kernel key payload copy in each case */
2057 memset(cc->key, 0, cc->key_size * sizeof(u8));
2058
2059 if (!ret) { 2061 if (!ret) {
2060 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2062 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2061 kzfree(cc->key_string); 2063 kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2523 } 2525 }
2524 } 2526 }
2525 2527
2528 /* wipe the kernel key payload copy */
2529 if (cc->key_string)
2530 memset(cc->key, 0, cc->key_size * sizeof(u8));
2531
2526 return ret; 2532 return ret;
2527} 2533}
2528 2534
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2741 if (!cc->tag_pool) { 2747 if (!cc->tag_pool) {
2742 ti->error = "Cannot allocate integrity tags mempool"; 2748 ti->error = "Cannot allocate integrity tags mempool";
2749 ret = -ENOMEM;
2743 goto bad; 2750 goto bad;
2744 } 2751 }
2745 2752
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2961 return ret; 2968 return ret;
2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2963 ret = cc->iv_gen_ops->init(cc); 2970 ret = cc->iv_gen_ops->init(cc);
2971 /* wipe the kernel key payload copy */
2972 if (cc->key_string)
2973 memset(cc->key, 0, cc->key_size * sizeof(u8));
2964 return ret; 2974 return ret;
2965 } 2975 }
2966 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2976 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3007 3017
3008static struct target_type crypt_target = { 3018static struct target_type crypt_target = {
3009 .name = "crypt", 3019 .name = "crypt",
3010 .version = {1, 18, 0}, 3020 .version = {1, 18, 1},
3011 .module = THIS_MODULE, 3021 .module = THIS_MODULE,
3012 .ctr = crypt_ctr, 3022 .ctr = crypt_ctr,
3013 .dtr = crypt_dtr, 3023 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2559 int r = 0; 2559 int r = 0;
2560 unsigned i; 2560 unsigned i;
2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size;
2562 unsigned char *crypt_data = NULL; 2562 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2563 struct skcipher_request *req = NULL;
2563 2564
2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2617 2618
2618 if (blocksize == 1) { 2619 if (blocksize == 1) {
2619 struct scatterlist *sg; 2620 struct scatterlist *sg;
2620 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2621
2621 unsigned char iv[ivsize]; 2622 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2622 skcipher_request_set_tfm(req, ic->journal_crypt); 2623 if (!req) {
2624 *error = "Could not allocate crypt request";
2625 r = -ENOMEM;
2626 goto bad;
2627 }
2628
2629 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2630 if (!crypt_iv) {
2631 *error = "Could not allocate iv";
2632 r = -ENOMEM;
2633 goto bad;
2634 }
2623 2635
2624 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2636 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2625 if (!ic->journal_xor) { 2637 if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2653 sg_set_buf(&sg[i], va, PAGE_SIZE);
2642 } 2654 }
2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2655 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2644 memset(iv, 0x00, ivsize); 2656 memset(crypt_iv, 0x00, ivsize);
2645 2657
2646 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2658 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2647 init_completion(&comp.comp); 2659 init_completion(&comp.comp);
2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2660 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2649 if (do_crypt(true, req, &comp)) 2661 if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2659 crypto_free_skcipher(ic->journal_crypt); 2671 crypto_free_skcipher(ic->journal_crypt);
2660 ic->journal_crypt = NULL; 2672 ic->journal_crypt = NULL;
2661 } else { 2673 } else {
2662 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2663 unsigned char iv[ivsize];
2664 unsigned crypt_len = roundup(ivsize, blocksize); 2674 unsigned crypt_len = roundup(ivsize, blocksize);
2665 2675
2676 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2677 if (!req) {
2678 *error = "Could not allocate crypt request";
2679 r = -ENOMEM;
2680 goto bad;
2681 }
2682
2683 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2684 if (!crypt_iv) {
2685 *error = "Could not allocate iv";
2686 r = -ENOMEM;
2687 goto bad;
2688 }
2689
2666 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2690 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2667 if (!crypt_data) { 2691 if (!crypt_data) {
2668 *error = "Unable to allocate crypt data"; 2692 *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2670 goto bad; 2694 goto bad;
2671 } 2695 }
2672 2696
2673 skcipher_request_set_tfm(req, ic->journal_crypt);
2674
2675 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2697 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2676 if (!ic->journal_scatterlist) { 2698 if (!ic->journal_scatterlist) {
2677 *error = "Unable to allocate sg list"; 2699 *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2695 struct skcipher_request *section_req; 2717 struct skcipher_request *section_req;
2696 __u32 section_le = cpu_to_le32(i); 2718 __u32 section_le = cpu_to_le32(i);
2697 2719
2698 memset(iv, 0x00, ivsize); 2720 memset(crypt_iv, 0x00, ivsize);
2699 memset(crypt_data, 0x00, crypt_len); 2721 memset(crypt_data, 0x00, crypt_len);
2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2722 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2701 2723
2702 sg_init_one(&sg, crypt_data, crypt_len); 2724 sg_init_one(&sg, crypt_data, crypt_len);
2703 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2725 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
2704 init_completion(&comp.comp); 2726 init_completion(&comp.comp);
2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2727 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2706 if (do_crypt(true, req, &comp)) 2728 if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
2758 } 2780 }
2759bad: 2781bad:
2760 kfree(crypt_data); 2782 kfree(crypt_data);
2783 kfree(crypt_iv);
2784 skcipher_request_free(req);
2785
2761 return r; 2786 return r;
2762} 2787}
2763 2788
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c8faa2b85842..f7810cc869ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -458,6 +458,38 @@ do { \
458} while (0) 458} while (0)
459 459
460/* 460/*
461 * Check whether bios must be queued in the device-mapper core rather
462 * than here in the target.
463 *
464 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
465 * the same value then we are not between multipath_presuspend()
466 * and multipath_resume() calls and we have no need to check
467 * for the DMF_NOFLUSH_SUSPENDING flag.
468 */
469static bool __must_push_back(struct multipath *m, unsigned long flags)
470{
471 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
473 dm_noflush_suspending(m->ti));
474}
475
476/*
477 * Following functions use READ_ONCE to get atomic access to
478 * all m->flags to avoid taking spinlock
479 */
480static bool must_push_back_rq(struct multipath *m)
481{
482 unsigned long flags = READ_ONCE(m->flags);
483 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
484}
485
486static bool must_push_back_bio(struct multipath *m)
487{
488 unsigned long flags = READ_ONCE(m->flags);
489 return __must_push_back(m, flags);
490}
491
492/*
461 * Map cloned requests (request-based multipath) 493 * Map cloned requests (request-based multipath)
462 */ 494 */
463static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 495static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
478 pgpath = choose_pgpath(m, nr_bytes); 510 pgpath = choose_pgpath(m, nr_bytes);
479 511
480 if (!pgpath) { 512 if (!pgpath) {
481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 513 if (must_push_back_rq(m))
482 return DM_MAPIO_DELAY_REQUEUE; 514 return DM_MAPIO_DELAY_REQUEUE;
483 dm_report_EIO(m); /* Failed */ 515 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL; 516 return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
553 } 585 }
554 586
555 if (!pgpath) { 587 if (!pgpath) {
556 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 588 if (must_push_back_bio(m))
557 return DM_MAPIO_REQUEUE; 589 return DM_MAPIO_REQUEUE;
558 dm_report_EIO(m); 590 dm_report_EIO(m);
559 return DM_MAPIO_KILL; 591 return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
651 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, 683 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
652 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || 684 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
653 (!save_old_value && queue_if_no_path)); 685 (!save_old_value && queue_if_no_path));
654 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, 686 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
655 queue_if_no_path || dm_noflush_suspending(m->ti));
656 spin_unlock_irqrestore(&m->lock, flags); 687 spin_unlock_irqrestore(&m->lock, flags);
657 688
658 if (!queue_if_no_path) { 689 if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1486 fail_path(pgpath); 1517 fail_path(pgpath);
1487 1518
1488 if (atomic_read(&m->nr_valid_paths) == 0 && 1519 if (atomic_read(&m->nr_valid_paths) == 0 &&
1489 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1520 !must_push_back_rq(m)) {
1490 if (error == BLK_STS_IOERR) 1521 if (error == BLK_STS_IOERR)
1491 dm_report_EIO(m); 1522 dm_report_EIO(m);
1492 /* complete with the original error */ 1523 /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1521 1552
1522 if (atomic_read(&m->nr_valid_paths) == 0 && 1553 if (atomic_read(&m->nr_valid_paths) == 0 &&
1523 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1554 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1524 dm_report_EIO(m); 1555 if (must_push_back_bio(m)) {
1525 *error = BLK_STS_IOERR; 1556 r = DM_ENDIO_REQUEUE;
1557 } else {
1558 dm_report_EIO(m);
1559 *error = BLK_STS_IOERR;
1560 }
1526 goto done; 1561 goto done;
1527 } 1562 }
1528 1563
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
1957{ 1992{
1958 int r; 1993 int r;
1959 1994
1960 r = dm_register_target(&multipath_target);
1961 if (r < 0) {
1962 DMERR("request-based register failed %d", r);
1963 r = -EINVAL;
1964 goto bad_register_target;
1965 }
1966
1967 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 1995 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1968 if (!kmultipathd) { 1996 if (!kmultipathd) {
1969 DMERR("failed to create workqueue kmpathd"); 1997 DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
1985 goto bad_alloc_kmpath_handlerd; 2013 goto bad_alloc_kmpath_handlerd;
1986 } 2014 }
1987 2015
2016 r = dm_register_target(&multipath_target);
2017 if (r < 0) {
2018 DMERR("request-based register failed %d", r);
2019 r = -EINVAL;
2020 goto bad_register_target;
2021 }
2022
1988 return 0; 2023 return 0;
1989 2024
2025bad_register_target:
2026 destroy_workqueue(kmpath_handlerd);
1990bad_alloc_kmpath_handlerd: 2027bad_alloc_kmpath_handlerd:
1991 destroy_workqueue(kmultipathd); 2028 destroy_workqueue(kmultipathd);
1992bad_alloc_kmultipathd: 2029bad_alloc_kmultipathd:
1993 dm_unregister_target(&multipath_target);
1994bad_register_target:
1995 return r; 2030 return r;
1996} 2031}
1997 2032
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42e1eda..a0613bd8ed00 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
2411 return r; 2411 return r;
2412 } 2412 }
2413 2413
2414 r = dm_register_target(&snapshot_target);
2415 if (r < 0) {
2416 DMERR("snapshot target register failed %d", r);
2417 goto bad_register_snapshot_target;
2418 }
2419
2420 r = dm_register_target(&origin_target);
2421 if (r < 0) {
2422 DMERR("Origin target register failed %d", r);
2423 goto bad_register_origin_target;
2424 }
2425
2426 r = dm_register_target(&merge_target);
2427 if (r < 0) {
2428 DMERR("Merge target register failed %d", r);
2429 goto bad_register_merge_target;
2430 }
2431
2432 r = init_origin_hash(); 2414 r = init_origin_hash();
2433 if (r) { 2415 if (r) {
2434 DMERR("init_origin_hash failed."); 2416 DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
2449 goto bad_pending_cache; 2431 goto bad_pending_cache;
2450 } 2432 }
2451 2433
2434 r = dm_register_target(&snapshot_target);
2435 if (r < 0) {
2436 DMERR("snapshot target register failed %d", r);
2437 goto bad_register_snapshot_target;
2438 }
2439
2440 r = dm_register_target(&origin_target);
2441 if (r < 0) {
2442 DMERR("Origin target register failed %d", r);
2443 goto bad_register_origin_target;
2444 }
2445
2446 r = dm_register_target(&merge_target);
2447 if (r < 0) {
2448 DMERR("Merge target register failed %d", r);
2449 goto bad_register_merge_target;
2450 }
2451
2452 return 0; 2452 return 0;
2453 2453
2454bad_pending_cache:
2455 kmem_cache_destroy(exception_cache);
2456bad_exception_cache:
2457 exit_origin_hash();
2458bad_origin_hash:
2459 dm_unregister_target(&merge_target);
2460bad_register_merge_target: 2454bad_register_merge_target:
2461 dm_unregister_target(&origin_target); 2455 dm_unregister_target(&origin_target);
2462bad_register_origin_target: 2456bad_register_origin_target:
2463 dm_unregister_target(&snapshot_target); 2457 dm_unregister_target(&snapshot_target);
2464bad_register_snapshot_target: 2458bad_register_snapshot_target:
2459 kmem_cache_destroy(pending_cache);
2460bad_pending_cache:
2461 kmem_cache_destroy(exception_cache);
2462bad_exception_cache:
2463 exit_origin_hash();
2464bad_origin_hash:
2465 dm_exception_store_exit(); 2465 dm_exception_store_exit();
2466 2466
2467 return r; 2467 return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88130b5d95f9..aaffd0c0ee9a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
453 453
454 refcount_set(&dd->count, 1); 454 refcount_set(&dd->count, 1);
455 list_add(&dd->list, &t->devices); 455 list_add(&dd->list, &t->devices);
456 goto out;
456 457
457 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 458 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
458 r = upgrade_mode(dd, mode, t->md); 459 r = upgrade_mode(dd, mode, t->md);
459 if (r) 460 if (r)
460 return r; 461 return r;
461 refcount_inc(&dd->count);
462 } 462 }
463 463 refcount_inc(&dd->count);
464out:
464 *result = dd->dm_dev; 465 *result = dd->dm_dev;
465 return 0; 466 return 0;
466} 467}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
80#define SECTOR_TO_BLOCK_SHIFT 3 80#define SECTOR_TO_BLOCK_SHIFT 3
81 81
82/* 82/*
83 * For btree insert:
83 * 3 for btree insert + 84 * 3 for btree insert +
84 * 2 for btree lookup used within space map 85 * 2 for btree lookup used within space map
86 * For btree remove:
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
85 */ 89 */
86#define THIN_MAX_CONCURRENT_LOCKS 5 90#define THIN_MAX_CONCURRENT_LOCKS 6
87 91
88/* This should be plenty */ 92/* This should be plenty */
89#define SPACE_MAP_ROOT_SIZE 128 93#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff9b4cf..f91d771fff4b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
4355 4355
4356static int __init dm_thin_init(void) 4356static int __init dm_thin_init(void)
4357{ 4357{
4358 int r; 4358 int r = -ENOMEM;
4359 4359
4360 pool_table_init(); 4360 pool_table_init();
4361 4361
4362 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4363 if (!_new_mapping_cache)
4364 return r;
4365
4362 r = dm_register_target(&thin_target); 4366 r = dm_register_target(&thin_target);
4363 if (r) 4367 if (r)
4364 return r; 4368 goto bad_new_mapping_cache;
4365 4369
4366 r = dm_register_target(&pool_target); 4370 r = dm_register_target(&pool_target);
4367 if (r) 4371 if (r)
4368 goto bad_pool_target; 4372 goto bad_thin_target;
4369
4370 r = -ENOMEM;
4371
4372 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4373 if (!_new_mapping_cache)
4374 goto bad_new_mapping_cache;
4375 4373
4376 return 0; 4374 return 0;
4377 4375
4378bad_new_mapping_cache: 4376bad_thin_target:
4379 dm_unregister_target(&pool_target);
4380bad_pool_target:
4381 dm_unregister_target(&thin_target); 4377 dm_unregister_target(&thin_target);
4378bad_new_mapping_cache:
4379 kmem_cache_destroy(_new_mapping_cache);
4382 4380
4383 return r; 4381 return r;
4384} 4382}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 41c050b59ec4..4e4dee0ec2de 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7605,7 +7605,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
7605 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7605 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7606 /* Still cleaning up */ 7606 /* Still cleaning up */
7607 resync = max_sectors; 7607 resync = max_sectors;
7608 } else 7608 } else if (resync > max_sectors)
7609 resync = max_sectors;
7610 else
7609 resync -= atomic_read(&mddev->recovery_active); 7611 resync -= atomic_read(&mddev->recovery_active);
7610 7612
7611 if (resync == 0) { 7613 if (resync == 0) {
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
683 pn->keys[1] = rn->keys[0]; 683 pn->keys[1] = rn->keys[0];
684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
685 685
686 /* 686 unlock_block(s->info, left);
687 * rejig the spine. This is ugly, since it knows too 687 unlock_block(s->info, right);
688 * much about the spine
689 */
690 if (s->nodes[0] != new_parent) {
691 unlock_block(s->info, s->nodes[0]);
692 s->nodes[0] = new_parent;
693 }
694 if (key < le64_to_cpu(rn->keys[0])) {
695 unlock_block(s->info, right);
696 s->nodes[1] = left;
697 } else {
698 unlock_block(s->info, left);
699 s->nodes[1] = right;
700 }
701 s->count = 2;
702
703 return 0; 688 return 0;
704} 689}
705 690
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index cc9d337a1ed3..6df398e3a008 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -809,11 +809,15 @@ static void flush_pending_writes(struct r1conf *conf)
809 spin_lock_irq(&conf->device_lock); 809 spin_lock_irq(&conf->device_lock);
810 810
811 if (conf->pending_bio_list.head) { 811 if (conf->pending_bio_list.head) {
812 struct blk_plug plug;
812 struct bio *bio; 813 struct bio *bio;
814
813 bio = bio_list_get(&conf->pending_bio_list); 815 bio = bio_list_get(&conf->pending_bio_list);
814 conf->pending_count = 0; 816 conf->pending_count = 0;
815 spin_unlock_irq(&conf->device_lock); 817 spin_unlock_irq(&conf->device_lock);
818 blk_start_plug(&plug);
816 flush_bio_list(conf, bio); 819 flush_bio_list(conf, bio);
820 blk_finish_plug(&plug);
817 } else 821 } else
818 spin_unlock_irq(&conf->device_lock); 822 spin_unlock_irq(&conf->device_lock);
819} 823}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b9edbc747a95..c131835cf008 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -894,10 +894,13 @@ static void flush_pending_writes(struct r10conf *conf)
894 spin_lock_irq(&conf->device_lock); 894 spin_lock_irq(&conf->device_lock);
895 895
896 if (conf->pending_bio_list.head) { 896 if (conf->pending_bio_list.head) {
897 struct blk_plug plug;
897 struct bio *bio; 898 struct bio *bio;
899
898 bio = bio_list_get(&conf->pending_bio_list); 900 bio = bio_list_get(&conf->pending_bio_list);
899 conf->pending_count = 0; 901 conf->pending_count = 0;
900 spin_unlock_irq(&conf->device_lock); 902 spin_unlock_irq(&conf->device_lock);
903 blk_start_plug(&plug);
901 /* flush any pending bitmap writes to disk 904 /* flush any pending bitmap writes to disk
902 * before proceeding w/ I/O */ 905 * before proceeding w/ I/O */
903 bitmap_unplug(conf->mddev->bitmap); 906 bitmap_unplug(conf->mddev->bitmap);
@@ -918,6 +921,7 @@ static void flush_pending_writes(struct r10conf *conf)
918 generic_make_request(bio); 921 generic_make_request(bio);
919 bio = next; 922 bio = next;
920 } 923 }
924 blk_finish_plug(&plug);
921 } else 925 } else
922 spin_unlock_irq(&conf->device_lock); 926 spin_unlock_irq(&conf->device_lock);
923} 927}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index f1c86d938502..39f31f07ffe9 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2577,31 +2577,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2577int r5c_journal_mode_set(struct mddev *mddev, int mode) 2577int r5c_journal_mode_set(struct mddev *mddev, int mode)
2578{ 2578{
2579 struct r5conf *conf; 2579 struct r5conf *conf;
2580 int err;
2581 2580
2582 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 2581 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2583 mode > R5C_JOURNAL_MODE_WRITE_BACK) 2582 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2584 return -EINVAL; 2583 return -EINVAL;
2585 2584
2586 err = mddev_lock(mddev);
2587 if (err)
2588 return err;
2589 conf = mddev->private; 2585 conf = mddev->private;
2590 if (!conf || !conf->log) { 2586 if (!conf || !conf->log)
2591 mddev_unlock(mddev);
2592 return -ENODEV; 2587 return -ENODEV;
2593 }
2594 2588
2595 if (raid5_calc_degraded(conf) > 0 && 2589 if (raid5_calc_degraded(conf) > 0 &&
2596 mode == R5C_JOURNAL_MODE_WRITE_BACK) { 2590 mode == R5C_JOURNAL_MODE_WRITE_BACK)
2597 mddev_unlock(mddev);
2598 return -EINVAL; 2591 return -EINVAL;
2599 }
2600 2592
2601 mddev_suspend(mddev); 2593 mddev_suspend(mddev);
2602 conf->log->r5c_journal_mode = mode; 2594 conf->log->r5c_journal_mode = mode;
2603 mddev_resume(mddev); 2595 mddev_resume(mddev);
2604 mddev_unlock(mddev);
2605 2596
2606 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 2597 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2607 mdname(mddev), mode, r5c_journal_mode_str[mode]); 2598 mdname(mddev), mode, r5c_journal_mode_str[mode]);
@@ -2614,6 +2605,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2614{ 2605{
2615 int mode = ARRAY_SIZE(r5c_journal_mode_str); 2606 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2616 size_t len = length; 2607 size_t len = length;
2608 int ret;
2617 2609
2618 if (len < 2) 2610 if (len < 2)
2619 return -EINVAL; 2611 return -EINVAL;
@@ -2625,8 +2617,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2625 if (strlen(r5c_journal_mode_str[mode]) == len && 2617 if (strlen(r5c_journal_mode_str[mode]) == len &&
2626 !strncmp(page, r5c_journal_mode_str[mode], len)) 2618 !strncmp(page, r5c_journal_mode_str[mode], len))
2627 break; 2619 break;
2628 2620 ret = mddev_lock(mddev);
2629 return r5c_journal_mode_set(mddev, mode) ?: length; 2621 if (ret)
2622 return ret;
2623 ret = r5c_journal_mode_set(mddev, mode);
2624 mddev_unlock(mddev);
2625 return ret ?: length;
2630} 2626}
2631 2627
2632struct md_sysfs_entry 2628struct md_sysfs_entry
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31dc25e2871a..98ce4272ace9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2677,13 +2677,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2677 pr_debug("raid456: error called\n"); 2677 pr_debug("raid456: error called\n");
2678 2678
2679 spin_lock_irqsave(&conf->device_lock, flags); 2679 spin_lock_irqsave(&conf->device_lock, flags);
2680 set_bit(Faulty, &rdev->flags);
2680 clear_bit(In_sync, &rdev->flags); 2681 clear_bit(In_sync, &rdev->flags);
2681 mddev->degraded = raid5_calc_degraded(conf); 2682 mddev->degraded = raid5_calc_degraded(conf);
2682 spin_unlock_irqrestore(&conf->device_lock, flags); 2683 spin_unlock_irqrestore(&conf->device_lock, flags);
2683 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2684 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2684 2685
2685 set_bit(Blocked, &rdev->flags); 2686 set_bit(Blocked, &rdev->flags);
2686 set_bit(Faulty, &rdev->flags);
2687 set_mask_bits(&mddev->sb_flags, 0, 2687 set_mask_bits(&mddev->sb_flags, 0,
2688 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 2688 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2689 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" 2689 pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index e4ea2a0c7a24..c5c827e11b64 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -521,13 +521,13 @@ static void list_add_locked(struct list_head *new, struct list_head *head,
521 spin_unlock_irqrestore(lock, flags); 521 spin_unlock_irqrestore(lock, flags);
522} 522}
523 523
524/** 524/*
525 * register a client callback that called when device plugged in/unplugged 525 * register a client callback that called when device plugged in/unplugged
526 * NOTE: if devices exist callback is called immediately for each device 526 * NOTE: if devices exist callback is called immediately for each device
527 * 527 *
528 * @param hotplug callback 528 * @param hotplug callback
529 * 529 *
530 * @return 0 on success, <0 on error. 530 * return: 0 on success, <0 on error.
531 */ 531 */
532int smscore_register_hotplug(hotplug_t hotplug) 532int smscore_register_hotplug(hotplug_t hotplug)
533{ 533{
@@ -562,7 +562,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
562} 562}
563EXPORT_SYMBOL_GPL(smscore_register_hotplug); 563EXPORT_SYMBOL_GPL(smscore_register_hotplug);
564 564
565/** 565/*
566 * unregister a client callback that called when device plugged in/unplugged 566 * unregister a client callback that called when device plugged in/unplugged
567 * 567 *
568 * @param hotplug callback 568 * @param hotplug callback
@@ -636,7 +636,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
636 return cb; 636 return cb;
637} 637}
638 638
639/** 639/*
640 * creates coredev object for a device, prepares buffers, 640 * creates coredev object for a device, prepares buffers,
641 * creates buffer mappings, notifies registered hotplugs about new device. 641 * creates buffer mappings, notifies registered hotplugs about new device.
642 * 642 *
@@ -644,7 +644,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
644 * and handlers 644 * and handlers
645 * @param coredev pointer to a value that receives created coredev object 645 * @param coredev pointer to a value that receives created coredev object
646 * 646 *
647 * @return 0 on success, <0 on error. 647 * return: 0 on success, <0 on error.
648 */ 648 */
649int smscore_register_device(struct smsdevice_params_t *params, 649int smscore_register_device(struct smsdevice_params_t *params,
650 struct smscore_device_t **coredev, 650 struct smscore_device_t **coredev,
@@ -764,10 +764,10 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
764 0 : -ETIME; 764 0 : -ETIME;
765} 765}
766 766
767/** 767/*
768 * Starts & enables IR operations 768 * Starts & enables IR operations
769 * 769 *
770 * @return 0 on success, < 0 on error. 770 * return: 0 on success, < 0 on error.
771 */ 771 */
772static int smscore_init_ir(struct smscore_device_t *coredev) 772static int smscore_init_ir(struct smscore_device_t *coredev)
773{ 773{
@@ -812,13 +812,13 @@ static int smscore_init_ir(struct smscore_device_t *coredev)
812 return 0; 812 return 0;
813} 813}
814 814
815/** 815/*
816 * configures device features according to board configuration structure. 816 * configures device features according to board configuration structure.
817 * 817 *
818 * @param coredev pointer to a coredev object returned by 818 * @param coredev pointer to a coredev object returned by
819 * smscore_register_device 819 * smscore_register_device
820 * 820 *
821 * @return 0 on success, <0 on error. 821 * return: 0 on success, <0 on error.
822 */ 822 */
823static int smscore_configure_board(struct smscore_device_t *coredev) 823static int smscore_configure_board(struct smscore_device_t *coredev)
824{ 824{
@@ -861,13 +861,13 @@ static int smscore_configure_board(struct smscore_device_t *coredev)
861 return 0; 861 return 0;
862} 862}
863 863
864/** 864/*
865 * sets initial device mode and notifies client hotplugs that device is ready 865 * sets initial device mode and notifies client hotplugs that device is ready
866 * 866 *
867 * @param coredev pointer to a coredev object returned by 867 * @param coredev pointer to a coredev object returned by
868 * smscore_register_device 868 * smscore_register_device
869 * 869 *
870 * @return 0 on success, <0 on error. 870 * return: 0 on success, <0 on error.
871 */ 871 */
872int smscore_start_device(struct smscore_device_t *coredev) 872int smscore_start_device(struct smscore_device_t *coredev)
873{ 873{
@@ -1087,7 +1087,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
1087 }, 1087 },
1088}; 1088};
1089 1089
1090/** 1090/*
1091 * get firmware file name from one of the two mechanisms : sms_boards or 1091 * get firmware file name from one of the two mechanisms : sms_boards or
1092 * smscore_fw_lkup. 1092 * smscore_fw_lkup.
1093 * @param coredev pointer to a coredev object returned by 1093 * @param coredev pointer to a coredev object returned by
@@ -1096,7 +1096,7 @@ static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = {
1096 * @param lookup if 1, always get the fw filename from smscore_fw_lkup 1096 * @param lookup if 1, always get the fw filename from smscore_fw_lkup
1097 * table. if 0, try first to get from sms_boards 1097 * table. if 0, try first to get from sms_boards
1098 * 1098 *
1099 * @return 0 on success, <0 on error. 1099 * return: 0 on success, <0 on error.
1100 */ 1100 */
1101static char *smscore_get_fw_filename(struct smscore_device_t *coredev, 1101static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
1102 int mode) 1102 int mode)
@@ -1125,7 +1125,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
1125 return fw[mode]; 1125 return fw[mode];
1126} 1126}
1127 1127
1128/** 1128/*
1129 * loads specified firmware into a buffer and calls device loadfirmware_handler 1129 * loads specified firmware into a buffer and calls device loadfirmware_handler
1130 * 1130 *
1131 * @param coredev pointer to a coredev object returned by 1131 * @param coredev pointer to a coredev object returned by
@@ -1133,7 +1133,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
1133 * @param filename null-terminated string specifies firmware file name 1133 * @param filename null-terminated string specifies firmware file name
1134 * @param loadfirmware_handler device handler that loads firmware 1134 * @param loadfirmware_handler device handler that loads firmware
1135 * 1135 *
1136 * @return 0 on success, <0 on error. 1136 * return: 0 on success, <0 on error.
1137 */ 1137 */
1138static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, 1138static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
1139 int mode, 1139 int mode,
@@ -1182,14 +1182,14 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
1182 return rc; 1182 return rc;
1183} 1183}
1184 1184
1185/** 1185/*
1186 * notifies all clients registered with the device, notifies hotplugs, 1186 * notifies all clients registered with the device, notifies hotplugs,
1187 * frees all buffers and coredev object 1187 * frees all buffers and coredev object
1188 * 1188 *
1189 * @param coredev pointer to a coredev object returned by 1189 * @param coredev pointer to a coredev object returned by
1190 * smscore_register_device 1190 * smscore_register_device
1191 * 1191 *
1192 * @return 0 on success, <0 on error. 1192 * return: 0 on success, <0 on error.
1193 */ 1193 */
1194void smscore_unregister_device(struct smscore_device_t *coredev) 1194void smscore_unregister_device(struct smscore_device_t *coredev)
1195{ 1195{
@@ -1282,14 +1282,14 @@ static int smscore_detect_mode(struct smscore_device_t *coredev)
1282 return rc; 1282 return rc;
1283} 1283}
1284 1284
1285/** 1285/*
1286 * send init device request and wait for response 1286 * send init device request and wait for response
1287 * 1287 *
1288 * @param coredev pointer to a coredev object returned by 1288 * @param coredev pointer to a coredev object returned by
1289 * smscore_register_device 1289 * smscore_register_device
1290 * @param mode requested mode of operation 1290 * @param mode requested mode of operation
1291 * 1291 *
1292 * @return 0 on success, <0 on error. 1292 * return: 0 on success, <0 on error.
1293 */ 1293 */
1294static int smscore_init_device(struct smscore_device_t *coredev, int mode) 1294static int smscore_init_device(struct smscore_device_t *coredev, int mode)
1295{ 1295{
@@ -1315,7 +1315,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
1315 return rc; 1315 return rc;
1316} 1316}
1317 1317
1318/** 1318/*
1319 * calls device handler to change mode of operation 1319 * calls device handler to change mode of operation
1320 * NOTE: stellar/usb may disconnect when changing mode 1320 * NOTE: stellar/usb may disconnect when changing mode
1321 * 1321 *
@@ -1323,7 +1323,7 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
1323 * smscore_register_device 1323 * smscore_register_device
1324 * @param mode requested mode of operation 1324 * @param mode requested mode of operation
1325 * 1325 *
1326 * @return 0 on success, <0 on error. 1326 * return: 0 on success, <0 on error.
1327 */ 1327 */
1328int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) 1328int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
1329{ 1329{
@@ -1411,13 +1411,13 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
1411 return rc; 1411 return rc;
1412} 1412}
1413 1413
1414/** 1414/*
1415 * calls device handler to get current mode of operation 1415 * calls device handler to get current mode of operation
1416 * 1416 *
1417 * @param coredev pointer to a coredev object returned by 1417 * @param coredev pointer to a coredev object returned by
1418 * smscore_register_device 1418 * smscore_register_device
1419 * 1419 *
1420 * @return current mode 1420 * return: current mode
1421 */ 1421 */
1422int smscore_get_device_mode(struct smscore_device_t *coredev) 1422int smscore_get_device_mode(struct smscore_device_t *coredev)
1423{ 1423{
@@ -1425,7 +1425,7 @@ int smscore_get_device_mode(struct smscore_device_t *coredev)
1425} 1425}
1426EXPORT_SYMBOL_GPL(smscore_get_device_mode); 1426EXPORT_SYMBOL_GPL(smscore_get_device_mode);
1427 1427
1428/** 1428/*
1429 * find client by response id & type within the clients list. 1429 * find client by response id & type within the clients list.
1430 * return client handle or NULL. 1430 * return client handle or NULL.
1431 * 1431 *
@@ -1462,7 +1462,7 @@ found:
1462 return client; 1462 return client;
1463} 1463}
1464 1464
1465/** 1465/*
1466 * find client by response id/type, call clients onresponse handler 1466 * find client by response id/type, call clients onresponse handler
1467 * return buffer to pool on error 1467 * return buffer to pool on error
1468 * 1468 *
@@ -1615,13 +1615,13 @@ void smscore_onresponse(struct smscore_device_t *coredev,
1615} 1615}
1616EXPORT_SYMBOL_GPL(smscore_onresponse); 1616EXPORT_SYMBOL_GPL(smscore_onresponse);
1617 1617
1618/** 1618/*
1619 * return pointer to next free buffer descriptor from core pool 1619 * return pointer to next free buffer descriptor from core pool
1620 * 1620 *
1621 * @param coredev pointer to a coredev object returned by 1621 * @param coredev pointer to a coredev object returned by
1622 * smscore_register_device 1622 * smscore_register_device
1623 * 1623 *
1624 * @return pointer to descriptor on success, NULL on error. 1624 * return: pointer to descriptor on success, NULL on error.
1625 */ 1625 */
1626 1626
1627static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev) 1627static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
@@ -1648,7 +1648,7 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
1648} 1648}
1649EXPORT_SYMBOL_GPL(smscore_getbuffer); 1649EXPORT_SYMBOL_GPL(smscore_getbuffer);
1650 1650
1651/** 1651/*
1652 * return buffer descriptor to a pool 1652 * return buffer descriptor to a pool
1653 * 1653 *
1654 * @param coredev pointer to a coredev object returned by 1654 * @param coredev pointer to a coredev object returned by
@@ -1693,7 +1693,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
1693 return 0; 1693 return 0;
1694} 1694}
1695 1695
1696/** 1696/*
1697 * creates smsclient object, check that id is taken by another client 1697 * creates smsclient object, check that id is taken by another client
1698 * 1698 *
1699 * @param coredev pointer to a coredev object from clients hotplug 1699 * @param coredev pointer to a coredev object from clients hotplug
@@ -1705,7 +1705,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
1705 * @param context client-specific context 1705 * @param context client-specific context
1706 * @param client pointer to a value that receives created smsclient object 1706 * @param client pointer to a value that receives created smsclient object
1707 * 1707 *
1708 * @return 0 on success, <0 on error. 1708 * return: 0 on success, <0 on error.
1709 */ 1709 */
1710int smscore_register_client(struct smscore_device_t *coredev, 1710int smscore_register_client(struct smscore_device_t *coredev,
1711 struct smsclient_params_t *params, 1711 struct smsclient_params_t *params,
@@ -1740,7 +1740,7 @@ int smscore_register_client(struct smscore_device_t *coredev,
1740} 1740}
1741EXPORT_SYMBOL_GPL(smscore_register_client); 1741EXPORT_SYMBOL_GPL(smscore_register_client);
1742 1742
1743/** 1743/*
1744 * frees smsclient object and all subclients associated with it 1744 * frees smsclient object and all subclients associated with it
1745 * 1745 *
1746 * @param client pointer to smsclient object returned by 1746 * @param client pointer to smsclient object returned by
@@ -1771,7 +1771,7 @@ void smscore_unregister_client(struct smscore_client_t *client)
1771} 1771}
1772EXPORT_SYMBOL_GPL(smscore_unregister_client); 1772EXPORT_SYMBOL_GPL(smscore_unregister_client);
1773 1773
1774/** 1774/*
1775 * verifies that source id is not taken by another client, 1775 * verifies that source id is not taken by another client,
1776 * calls device handler to send requests to the device 1776 * calls device handler to send requests to the device
1777 * 1777 *
@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(smscore_unregister_client);
1780 * @param buffer pointer to a request buffer 1780 * @param buffer pointer to a request buffer
1781 * @param size size (in bytes) of request buffer 1781 * @param size size (in bytes) of request buffer
1782 * 1782 *
1783 * @return 0 on success, <0 on error. 1783 * return: 0 on success, <0 on error.
1784 */ 1784 */
1785int smsclient_sendrequest(struct smscore_client_t *client, 1785int smsclient_sendrequest(struct smscore_client_t *client,
1786 void *buffer, size_t size) 1786 void *buffer, size_t size)
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 95b3723282f4..d48b61eb01f4 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -206,7 +206,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
206 * @hlen: Number of bytes in haystack. 206 * @hlen: Number of bytes in haystack.
207 * @needle: Buffer to find. 207 * @needle: Buffer to find.
208 * @nlen: Number of bytes in needle. 208 * @nlen: Number of bytes in needle.
209 * @return Pointer into haystack needle was found at, or NULL if not found. 209 * return: Pointer into haystack needle was found at, or NULL if not found.
210 */ 210 */
211static char *findstr(char *haystack, int hlen, char *needle, int nlen) 211static char *findstr(char *haystack, int hlen, char *needle, int nlen)
212{ 212{
@@ -226,7 +226,7 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen)
226/* ************************************************************************** */ 226/* ************************************************************************** */
227/* EN50221 physical interface functions */ 227/* EN50221 physical interface functions */
228 228
229/** 229/*
230 * dvb_ca_en50221_check_camstatus - Check CAM status. 230 * dvb_ca_en50221_check_camstatus - Check CAM status.
231 */ 231 */
232static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot) 232static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
@@ -275,9 +275,9 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
275 * @ca: CA instance. 275 * @ca: CA instance.
276 * @slot: Slot on interface. 276 * @slot: Slot on interface.
277 * @waitfor: Flags to wait for. 277 * @waitfor: Flags to wait for.
278 * @timeout_ms: Timeout in milliseconds. 278 * @timeout_hz: Timeout in milliseconds.
279 * 279 *
280 * @return 0 on success, nonzero on error. 280 * return: 0 on success, nonzero on error.
281 */ 281 */
282static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot, 282static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
283 u8 waitfor, int timeout_hz) 283 u8 waitfor, int timeout_hz)
@@ -325,7 +325,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
325 * @ca: CA instance. 325 * @ca: CA instance.
326 * @slot: Slot id. 326 * @slot: Slot id.
327 * 327 *
328 * @return 0 on success, nonzero on failure. 328 * return: 0 on success, nonzero on failure.
329 */ 329 */
330static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) 330static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
331{ 331{
@@ -397,11 +397,11 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
397 * @ca: CA instance. 397 * @ca: CA instance.
398 * @slot: Slot id. 398 * @slot: Slot id.
399 * @address: Address to read from. Updated. 399 * @address: Address to read from. Updated.
400 * @tupleType: Tuple id byte. Updated. 400 * @tuple_type: Tuple id byte. Updated.
401 * @tupleLength: Tuple length. Updated. 401 * @tuple_length: Tuple length. Updated.
402 * @tuple: Dest buffer for tuple (must be 256 bytes). Updated. 402 * @tuple: Dest buffer for tuple (must be 256 bytes). Updated.
403 * 403 *
404 * @return 0 on success, nonzero on error. 404 * return: 0 on success, nonzero on error.
405 */ 405 */
406static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot, 406static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
407 int *address, int *tuple_type, 407 int *address, int *tuple_type,
@@ -455,7 +455,7 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
455 * @ca: CA instance. 455 * @ca: CA instance.
456 * @slot: Slot id. 456 * @slot: Slot id.
457 * 457 *
458 * @return 0 on success, <0 on failure. 458 * return: 0 on success, <0 on failure.
459 */ 459 */
460static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot) 460static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
461{ 461{
@@ -632,10 +632,11 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
632 * @ca: CA instance. 632 * @ca: CA instance.
633 * @slot: Slot to read from. 633 * @slot: Slot to read from.
634 * @ebuf: If non-NULL, the data will be written to this buffer. If NULL, 634 * @ebuf: If non-NULL, the data will be written to this buffer. If NULL,
635 * the data will be added into the buffering system as a normal fragment. 635 * the data will be added into the buffering system as a normal
636 * fragment.
636 * @ecount: Size of ebuf. Ignored if ebuf is NULL. 637 * @ecount: Size of ebuf. Ignored if ebuf is NULL.
637 * 638 *
638 * @return Number of bytes read, or < 0 on error 639 * return: Number of bytes read, or < 0 on error
639 */ 640 */
640static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, 641static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
641 u8 *ebuf, int ecount) 642 u8 *ebuf, int ecount)
@@ -784,11 +785,11 @@ exit:
784 * 785 *
785 * @ca: CA instance. 786 * @ca: CA instance.
786 * @slot: Slot to write to. 787 * @slot: Slot to write to.
787 * @ebuf: The data in this buffer is treated as a complete link-level packet to 788 * @buf: The data in this buffer is treated as a complete link-level packet to
788 * be written. 789 * be written.
789 * @count: Size of ebuf. 790 * @bytes_write: Size of ebuf.
790 * 791 *
791 * @return Number of bytes written, or < 0 on error. 792 * return: Number of bytes written, or < 0 on error.
792 */ 793 */
793static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, 794static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
794 u8 *buf, int bytes_write) 795 u8 *buf, int bytes_write)
@@ -933,7 +934,7 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
933/** 934/**
934 * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. 935 * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
935 * 936 *
936 * @ca: CA instance. 937 * @pubca: CA instance.
937 * @slot: Slot concerned. 938 * @slot: Slot concerned.
938 * @change_type: One of the DVB_CA_CAMCHANGE_* values. 939 * @change_type: One of the DVB_CA_CAMCHANGE_* values.
939 */ 940 */
@@ -963,7 +964,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
963/** 964/**
964 * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. 965 * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
965 * 966 *
966 * @ca: CA instance. 967 * @pubca: CA instance.
967 * @slot: Slot concerned. 968 * @slot: Slot concerned.
968 */ 969 */
969void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot) 970void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -983,7 +984,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
983/** 984/**
984 * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred. 985 * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
985 * 986 *
986 * @ca: CA instance. 987 * @pubca: CA instance.
987 * @slot: Slot concerned. 988 * @slot: Slot concerned.
988 */ 989 */
989void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot) 990void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
@@ -1091,7 +1092,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
1091 * 1092 *
1092 * @ca: CA instance. 1093 * @ca: CA instance.
1093 * @slot: Slot to process. 1094 * @slot: Slot to process.
1094 * @return: 0 .. no change 1095 * return:: 0 .. no change
1095 * 1 .. CAM state changed 1096 * 1 .. CAM state changed
1096 */ 1097 */
1097 1098
@@ -1296,7 +1297,7 @@ static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
1296 mutex_unlock(&sl->slot_lock); 1297 mutex_unlock(&sl->slot_lock);
1297} 1298}
1298 1299
1299/** 1300/*
1300 * Kernel thread which monitors CA slots for CAM changes, and performs data 1301 * Kernel thread which monitors CA slots for CAM changes, and performs data
1301 * transfers. 1302 * transfers.
1302 */ 1303 */
@@ -1336,12 +1337,11 @@ static int dvb_ca_en50221_thread(void *data)
1336 * Real ioctl implementation. 1337 * Real ioctl implementation.
1337 * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them. 1338 * NOTE: CA_SEND_MSG/CA_GET_MSG ioctls have userspace buffers passed to them.
1338 * 1339 *
1339 * @inode: Inode concerned.
1340 * @file: File concerned. 1340 * @file: File concerned.
1341 * @cmd: IOCTL command. 1341 * @cmd: IOCTL command.
1342 * @arg: Associated argument. 1342 * @parg: Associated argument.
1343 * 1343 *
1344 * @return 0 on success, <0 on error. 1344 * return: 0 on success, <0 on error.
1345 */ 1345 */
1346static int dvb_ca_en50221_io_do_ioctl(struct file *file, 1346static int dvb_ca_en50221_io_do_ioctl(struct file *file,
1347 unsigned int cmd, void *parg) 1347 unsigned int cmd, void *parg)
@@ -1420,12 +1420,11 @@ out_unlock:
1420/** 1420/**
1421 * Wrapper for ioctl implementation. 1421 * Wrapper for ioctl implementation.
1422 * 1422 *
1423 * @inode: Inode concerned.
1424 * @file: File concerned. 1423 * @file: File concerned.
1425 * @cmd: IOCTL command. 1424 * @cmd: IOCTL command.
1426 * @arg: Associated argument. 1425 * @arg: Associated argument.
1427 * 1426 *
1428 * @return 0 on success, <0 on error. 1427 * return: 0 on success, <0 on error.
1429 */ 1428 */
1430static long dvb_ca_en50221_io_ioctl(struct file *file, 1429static long dvb_ca_en50221_io_ioctl(struct file *file,
1431 unsigned int cmd, unsigned long arg) 1430 unsigned int cmd, unsigned long arg)
@@ -1441,7 +1440,7 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
1441 * @count: Size of source buffer. 1440 * @count: Size of source buffer.
1442 * @ppos: Position in file (ignored). 1441 * @ppos: Position in file (ignored).
1443 * 1442 *
1444 * @return Number of bytes read, or <0 on error. 1443 * return: Number of bytes read, or <0 on error.
1445 */ 1444 */
1446static ssize_t dvb_ca_en50221_io_write(struct file *file, 1445static ssize_t dvb_ca_en50221_io_write(struct file *file,
1447 const char __user *buf, size_t count, 1446 const char __user *buf, size_t count,
@@ -1536,7 +1535,7 @@ exit:
1536 return status; 1535 return status;
1537} 1536}
1538 1537
1539/** 1538/*
1540 * Condition for waking up in dvb_ca_en50221_io_read_condition 1539 * Condition for waking up in dvb_ca_en50221_io_read_condition
1541 */ 1540 */
1542static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca, 1541static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
@@ -1593,7 +1592,7 @@ nextslot:
1593 * @count: Size of destination buffer. 1592 * @count: Size of destination buffer.
1594 * @ppos: Position in file (ignored). 1593 * @ppos: Position in file (ignored).
1595 * 1594 *
1596 * @return Number of bytes read, or <0 on error. 1595 * return: Number of bytes read, or <0 on error.
1597 */ 1596 */
1598static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf, 1597static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
1599 size_t count, loff_t *ppos) 1598 size_t count, loff_t *ppos)
@@ -1702,7 +1701,7 @@ exit:
1702 * @inode: Inode concerned. 1701 * @inode: Inode concerned.
1703 * @file: File concerned. 1702 * @file: File concerned.
1704 * 1703 *
1705 * @return 0 on success, <0 on failure. 1704 * return: 0 on success, <0 on failure.
1706 */ 1705 */
1707static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) 1706static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
1708{ 1707{
@@ -1752,7 +1751,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
1752 * @inode: Inode concerned. 1751 * @inode: Inode concerned.
1753 * @file: File concerned. 1752 * @file: File concerned.
1754 * 1753 *
1755 * @return 0 on success, <0 on failure. 1754 * return: 0 on success, <0 on failure.
1756 */ 1755 */
1757static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) 1756static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
1758{ 1757{
@@ -1781,7 +1780,7 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
1781 * @file: File concerned. 1780 * @file: File concerned.
1782 * @wait: poll wait table. 1781 * @wait: poll wait table.
1783 * 1782 *
1784 * @return Standard poll mask. 1783 * return: Standard poll mask.
1785 */ 1784 */
1786static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) 1785static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
1787{ 1786{
@@ -1838,11 +1837,11 @@ static const struct dvb_device dvbdev_ca = {
1838 * Initialise a new DVB CA EN50221 interface device. 1837 * Initialise a new DVB CA EN50221 interface device.
1839 * 1838 *
1840 * @dvb_adapter: DVB adapter to attach the new CA device to. 1839 * @dvb_adapter: DVB adapter to attach the new CA device to.
1841 * @ca: The dvb_ca instance. 1840 * @pubca: The dvb_ca instance.
1842 * @flags: Flags describing the CA device (DVB_CA_FLAG_*). 1841 * @flags: Flags describing the CA device (DVB_CA_FLAG_*).
1843 * @slot_count: Number of slots supported. 1842 * @slot_count: Number of slots supported.
1844 * 1843 *
1845 * @return 0 on success, nonzero on failure 1844 * return: 0 on success, nonzero on failure
1846 */ 1845 */
1847int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, 1846int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
1848 struct dvb_ca_en50221 *pubca, int flags, int slot_count) 1847 struct dvb_ca_en50221 *pubca, int flags, int slot_count)
@@ -1929,8 +1928,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_init);
1929/** 1928/**
1930 * Release a DVB CA EN50221 interface device. 1929 * Release a DVB CA EN50221 interface device.
1931 * 1930 *
1932 * @ca_dev: The dvb_device_t instance for the CA device. 1931 * @pubca: The associated dvb_ca instance.
1933 * @ca: The associated dvb_ca instance.
1934 */ 1932 */
1935void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) 1933void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
1936{ 1934{
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 3ad83359098b..2afaa8226342 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -369,11 +369,14 @@ static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepr
369} 369}
370 370
371/** 371/**
372 * Performs automatic twiddling of frontend parameters. 372 * dvb_frontend_swzigzag_autotune - Performs automatic twiddling of frontend
373 * parameters.
373 * 374 *
374 * @param fe The frontend concerned. 375 * @fe: The frontend concerned.
375 * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT 376 * @check_wrapped: Checks if an iteration has completed.
376 * @returns Number of complete iterations that have been performed. 377 * DO NOT SET ON THE FIRST ATTEMPT.
378 *
379 * return: Number of complete iterations that have been performed.
377 */ 380 */
378static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped) 381static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped)
379{ 382{
@@ -1253,7 +1256,7 @@ dtv_property_legacy_params_sync(struct dvb_frontend *fe,
1253 * dtv_get_frontend - calls a callback for retrieving DTV parameters 1256 * dtv_get_frontend - calls a callback for retrieving DTV parameters
1254 * @fe: struct dvb_frontend pointer 1257 * @fe: struct dvb_frontend pointer
1255 * @c: struct dtv_frontend_properties pointer (DVBv5 cache) 1258 * @c: struct dtv_frontend_properties pointer (DVBv5 cache)
1256 * @p_out struct dvb_frontend_parameters pointer (DVBv3 FE struct) 1259 * @p_out: struct dvb_frontend_parameters pointer (DVBv3 FE struct)
1257 * 1260 *
1258 * This routine calls either the DVBv3 or DVBv5 get_frontend call. 1261 * This routine calls either the DVBv3 or DVBv5 get_frontend call.
1259 * If c is not null, it will update the DVBv5 cache struct pointed by it. 1262 * If c is not null, it will update the DVBv5 cache struct pointed by it.
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 06b0dcc13695..c018e3c06d5d 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -125,7 +125,7 @@ struct dvb_net_priv {
125}; 125};
126 126
127 127
128/** 128/*
129 * Determine the packet's protocol ID. The rule here is that we 129 * Determine the packet's protocol ID. The rule here is that we
130 * assume 802.3 if the type field is short enough to be a length. 130 * assume 802.3 if the type field is short enough to be a length.
131 * This is normal practice and works for any 'now in use' protocol. 131 * This is normal practice and works for any 'now in use' protocol.
@@ -155,7 +155,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
155 155
156 rawp = skb->data; 156 rawp = skb->data;
157 157
158 /** 158 /*
159 * This is a magic hack to spot IPX packets. Older Novell breaks 159 * This is a magic hack to spot IPX packets. Older Novell breaks
160 * the protocol design and runs IPX over 802.3 without an 802.2 LLC 160 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
161 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This 161 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
@@ -164,7 +164,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
164 if (*(unsigned short *)rawp == 0xFFFF) 164 if (*(unsigned short *)rawp == 0xFFFF)
165 return htons(ETH_P_802_3); 165 return htons(ETH_P_802_3);
166 166
167 /** 167 /*
168 * Real 802.2 LLC 168 * Real 802.2 LLC
169 */ 169 */
170 return htons(ETH_P_802_2); 170 return htons(ETH_P_802_2);
@@ -215,7 +215,8 @@ static int ule_exthdr_padding(struct dvb_net_priv *p)
215 return 0; 215 return 0;
216} 216}
217 217
218/** Handle ULE extension headers. 218/*
219 * Handle ULE extension headers.
219 * Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding. 220 * Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
220 * Returns: >= 0: nr. of bytes consumed by next extension header 221 * Returns: >= 0: nr. of bytes consumed by next extension header
221 * -1: Mandatory extension header that is not recognized or TEST SNDU; discard. 222 * -1: Mandatory extension header that is not recognized or TEST SNDU; discard.
@@ -291,7 +292,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
291} 292}
292 293
293 294
294/** Prepare for a new ULE SNDU: reset the decoder state. */ 295/* Prepare for a new ULE SNDU: reset the decoder state. */
295static inline void reset_ule( struct dvb_net_priv *p ) 296static inline void reset_ule( struct dvb_net_priv *p )
296{ 297{
297 p->ule_skb = NULL; 298 p->ule_skb = NULL;
@@ -304,7 +305,7 @@ static inline void reset_ule( struct dvb_net_priv *p )
304 p->ule_bridged = 0; 305 p->ule_bridged = 0;
305} 306}
306 307
307/** 308/*
308 * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of 309 * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
309 * TS cells of a single PID. 310 * TS cells of a single PID.
310 */ 311 */
@@ -1005,7 +1006,7 @@ static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
1005{ 1006{
1006 struct net_device *dev = filter->priv; 1007 struct net_device *dev = filter->priv;
1007 1008
1008 /** 1009 /*
1009 * we rely on the DVB API definition where exactly one complete 1010 * we rely on the DVB API definition where exactly one complete
1010 * section is delivered in buffer1 1011 * section is delivered in buffer1
1011 */ 1012 */
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 353274524f1b..a290722c04fd 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -38,6 +38,13 @@
38 * @api_version: Firmware API version. 38 * @api_version: Firmware API version.
39 * @gpio: GPIOs. 39 * @gpio: GPIOs.
40 * @get_dvb_frontend: Get DVB frontend callback. 40 * @get_dvb_frontend: Get DVB frontend callback.
41 *
42 * AF9013/5 GPIOs (mostly guessed):
43 * * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
44 * * demod#1-gpio#1 - xtal setting (?)
45 * * demod#1-gpio#3 - tuner#1
46 * * demod#2-gpio#0 - tuner#2
47 * * demod#2-gpio#1 - xtal setting (?)
41 */ 48 */
42struct af9013_platform_data { 49struct af9013_platform_data {
43 /* 50 /*
@@ -89,16 +96,15 @@ struct af9013_platform_data {
89#define AF9013_TS_PARALLEL AF9013_TS_MODE_PARALLEL 96#define AF9013_TS_PARALLEL AF9013_TS_MODE_PARALLEL
90#define AF9013_TS_SERIAL AF9013_TS_MODE_SERIAL 97#define AF9013_TS_SERIAL AF9013_TS_MODE_SERIAL
91 98
92/*
93 * AF9013/5 GPIOs (mostly guessed)
94 * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
95 * demod#1-gpio#1 - xtal setting (?)
96 * demod#1-gpio#3 - tuner#1
97 * demod#2-gpio#0 - tuner#2
98 * demod#2-gpio#1 - xtal setting (?)
99 */
100
101#if IS_REACHABLE(CONFIG_DVB_AF9013) 99#if IS_REACHABLE(CONFIG_DVB_AF9013)
100/**
101 * Attach an af9013 demod
102 *
103 * @config: pointer to &struct af9013_config with demod configuration.
104 * @i2c: i2c adapter to use.
105 *
106 * return: FE pointer on success, NULL on failure.
107 */
102extern struct dvb_frontend *af9013_attach(const struct af9013_config *config, 108extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
103 struct i2c_adapter *i2c); 109 struct i2c_adapter *i2c);
104#else 110#else
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
index dc61bf7d1b09..418c565baf83 100644
--- a/drivers/media/dvb-frontends/ascot2e.h
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -41,6 +41,15 @@ struct ascot2e_config {
41}; 41};
42 42
43#if IS_REACHABLE(CONFIG_DVB_ASCOT2E) 43#if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
44/**
45 * Attach an ascot2e tuner
46 *
47 * @fe: frontend to be attached
48 * @config: pointer to &struct ascot2e_config with tuner configuration.
49 * @i2c: i2c adapter to use.
50 *
51 * return: FE pointer on success, NULL on failure.
52 */
44extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe, 53extern struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
45 const struct ascot2e_config *config, 54 const struct ascot2e_config *config,
46 struct i2c_adapter *i2c); 55 struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index f3ff8f6eb3bb..a49400c0e28e 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -49,7 +49,6 @@
49 * @gpio_chip_base: GPIO. 49 * @gpio_chip_base: GPIO.
50 * @get_dvb_frontend: Get DVB frontend. 50 * @get_dvb_frontend: Get DVB frontend.
51 */ 51 */
52
53struct cxd2820r_platform_data { 52struct cxd2820r_platform_data {
54 u8 ts_mode; 53 u8 ts_mode;
55 bool ts_clk_inv; 54 bool ts_clk_inv;
@@ -62,6 +61,17 @@ struct cxd2820r_platform_data {
62 bool attach_in_use; 61 bool attach_in_use;
63}; 62};
64 63
64/**
65 * struct cxd2820r_config - configuration for cxd2020r demod
66 *
67 * @i2c_address: Demodulator I2C address. Driver determines DVB-C slave I2C
68 * address automatically from master address.
69 * Default: none, must set. Values: 0x6c, 0x6d.
70 * @ts_mode: TS output mode. Default: none, must set. Values: FIXME?
71 * @ts_clock_inv: TS clock inverted. Default: 0. Values: 0, 1.
72 * @if_agc_polarity: Default: 0. Values: 0, 1
73 * @spec_inv: Spectrum inversion. Default: 0. Values: 0, 1.
74 */
65struct cxd2820r_config { 75struct cxd2820r_config {
66 /* Demodulator I2C address. 76 /* Demodulator I2C address.
67 * Driver determines DVB-C slave I2C address automatically from master 77 * Driver determines DVB-C slave I2C address automatically from master
@@ -98,6 +108,18 @@ struct cxd2820r_config {
98 108
99 109
100#if IS_REACHABLE(CONFIG_DVB_CXD2820R) 110#if IS_REACHABLE(CONFIG_DVB_CXD2820R)
111/**
112 * Attach a cxd2820r demod
113 *
114 * @config: pointer to &struct cxd2820r_config with demod configuration.
115 * @i2c: i2c adapter to use.
116 * @gpio_chip_base: if zero, disables GPIO setting. Otherwise, if
117 * CONFIG_GPIOLIB is set dynamically allocate
118 * gpio base; if is not set, use its value to
119 * setup the GPIO pins.
120 *
121 * return: FE pointer on success, NULL on failure.
122 */
101extern struct dvb_frontend *cxd2820r_attach( 123extern struct dvb_frontend *cxd2820r_attach(
102 const struct cxd2820r_config *config, 124 const struct cxd2820r_config *config,
103 struct i2c_adapter *i2c, 125 struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
index 5b5421f70388..2b3af247a1f1 100644
--- a/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
+++ b/drivers/media/dvb-frontends/drx39xyj/bsp_i2c.h
@@ -52,7 +52,7 @@ struct i2c_device_addr {
52}; 52};
53 53
54 54
55/** 55/*
56* \def IS_I2C_10BIT( addr ) 56* \def IS_I2C_10BIT( addr )
57* \brief Determine if I2C address 'addr' is a 10 bits address or not. 57* \brief Determine if I2C address 'addr' is a 10 bits address or not.
58* \param addr The I2C address. 58* \param addr The I2C address.
@@ -67,7 +67,7 @@ struct i2c_device_addr {
67Exported FUNCTIONS 67Exported FUNCTIONS
68------------------------------------------------------------------------------*/ 68------------------------------------------------------------------------------*/
69 69
70/** 70/*
71* \fn drxbsp_i2c_init() 71* \fn drxbsp_i2c_init()
72* \brief Initialize I2C communication module. 72* \brief Initialize I2C communication module.
73* \return drx_status_t Return status. 73* \return drx_status_t Return status.
@@ -76,7 +76,7 @@ Exported FUNCTIONS
76*/ 76*/
77 drx_status_t drxbsp_i2c_init(void); 77 drx_status_t drxbsp_i2c_init(void);
78 78
79/** 79/*
80* \fn drxbsp_i2c_term() 80* \fn drxbsp_i2c_term()
81* \brief Terminate I2C communication module. 81* \brief Terminate I2C communication module.
82* \return drx_status_t Return status. 82* \return drx_status_t Return status.
@@ -85,7 +85,7 @@ Exported FUNCTIONS
85*/ 85*/
86 drx_status_t drxbsp_i2c_term(void); 86 drx_status_t drxbsp_i2c_term(void);
87 87
88/** 88/*
89* \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr, 89* \fn drx_status_t drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
90* u16 w_count, 90* u16 w_count,
91* u8 *wData, 91* u8 *wData,
@@ -121,7 +121,7 @@ Exported FUNCTIONS
121 struct i2c_device_addr *r_dev_addr, 121 struct i2c_device_addr *r_dev_addr,
122 u16 r_count, u8 *r_data); 122 u16 r_count, u8 *r_data);
123 123
124/** 124/*
125* \fn drxbsp_i2c_error_text() 125* \fn drxbsp_i2c_error_text()
126* \brief Returns a human readable error. 126* \brief Returns a human readable error.
127* Counter part of numerical drx_i2c_error_g. 127* Counter part of numerical drx_i2c_error_g.
@@ -130,7 +130,7 @@ Exported FUNCTIONS
130*/ 130*/
131 char *drxbsp_i2c_error_text(void); 131 char *drxbsp_i2c_error_text(void);
132 132
133/** 133/*
134* \var drx_i2c_error_g; 134* \var drx_i2c_error_g;
135* \brief I2C specific error codes, platform dependent. 135* \brief I2C specific error codes, platform dependent.
136*/ 136*/
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index cd69e187ba7a..855685b6b386 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -46,7 +46,7 @@ struct i2c_device_addr {
46 void *user_data; /* User data pointer */ 46 void *user_data; /* User data pointer */
47}; 47};
48 48
49/** 49/*
50* \def IS_I2C_10BIT( addr ) 50* \def IS_I2C_10BIT( addr )
51* \brief Determine if I2C address 'addr' is a 10 bits address or not. 51* \brief Determine if I2C address 'addr' is a 10 bits address or not.
52* \param addr The I2C address. 52* \param addr The I2C address.
@@ -61,7 +61,7 @@ struct i2c_device_addr {
61Exported FUNCTIONS 61Exported FUNCTIONS
62------------------------------------------------------------------------------*/ 62------------------------------------------------------------------------------*/
63 63
64/** 64/*
65* \fn drxbsp_i2c_init() 65* \fn drxbsp_i2c_init()
66* \brief Initialize I2C communication module. 66* \brief Initialize I2C communication module.
67* \return int Return status. 67* \return int Return status.
@@ -70,7 +70,7 @@ Exported FUNCTIONS
70*/ 70*/
71int drxbsp_i2c_init(void); 71int drxbsp_i2c_init(void);
72 72
73/** 73/*
74* \fn drxbsp_i2c_term() 74* \fn drxbsp_i2c_term()
75* \brief Terminate I2C communication module. 75* \brief Terminate I2C communication module.
76* \return int Return status. 76* \return int Return status.
@@ -79,7 +79,7 @@ int drxbsp_i2c_init(void);
79*/ 79*/
80int drxbsp_i2c_term(void); 80int drxbsp_i2c_term(void);
81 81
82/** 82/*
83* \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr, 83* \fn int drxbsp_i2c_write_read( struct i2c_device_addr *w_dev_addr,
84* u16 w_count, 84* u16 w_count,
85* u8 * wData, 85* u8 * wData,
@@ -115,7 +115,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
115 struct i2c_device_addr *r_dev_addr, 115 struct i2c_device_addr *r_dev_addr,
116 u16 r_count, u8 *r_data); 116 u16 r_count, u8 *r_data);
117 117
118/** 118/*
119* \fn drxbsp_i2c_error_text() 119* \fn drxbsp_i2c_error_text()
120* \brief Returns a human readable error. 120* \brief Returns a human readable error.
121* Counter part of numerical drx_i2c_error_g. 121* Counter part of numerical drx_i2c_error_g.
@@ -124,7 +124,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
124*/ 124*/
125char *drxbsp_i2c_error_text(void); 125char *drxbsp_i2c_error_text(void);
126 126
127/** 127/*
128* \var drx_i2c_error_g; 128* \var drx_i2c_error_g;
129* \brief I2C specific error codes, platform dependent. 129* \brief I2C specific error codes, platform dependent.
130*/ 130*/
@@ -241,13 +241,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
241 struct i2c_device_addr *r_dev_addr, 241 struct i2c_device_addr *r_dev_addr,
242 u16 r_count, u8 *r_data); 242 u16 r_count, u8 *r_data);
243 243
244/************** 244/*************
245* 245*
246* This section configures the DRX Data Access Protocols (DAPs). 246* This section configures the DRX Data Access Protocols (DAPs).
247* 247*
248**************/ 248**************/
249 249
250/** 250/*
251* \def DRXDAP_SINGLE_MASTER 251* \def DRXDAP_SINGLE_MASTER
252* \brief Enable I2C single or I2C multimaster mode on host. 252* \brief Enable I2C single or I2C multimaster mode on host.
253* 253*
@@ -262,7 +262,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
262#define DRXDAP_SINGLE_MASTER 1 262#define DRXDAP_SINGLE_MASTER 1
263#endif 263#endif
264 264
265/** 265/*
266* \def DRXDAP_MAX_WCHUNKSIZE 266* \def DRXDAP_MAX_WCHUNKSIZE
267* \brief Defines maximum chunksize of an i2c write action by host. 267* \brief Defines maximum chunksize of an i2c write action by host.
268* 268*
@@ -282,7 +282,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
282#define DRXDAP_MAX_WCHUNKSIZE 60 282#define DRXDAP_MAX_WCHUNKSIZE 60
283#endif 283#endif
284 284
285/** 285/*
286* \def DRXDAP_MAX_RCHUNKSIZE 286* \def DRXDAP_MAX_RCHUNKSIZE
287* \brief Defines maximum chunksize of an i2c read action by host. 287* \brief Defines maximum chunksize of an i2c read action by host.
288* 288*
@@ -297,13 +297,13 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
297#define DRXDAP_MAX_RCHUNKSIZE 60 297#define DRXDAP_MAX_RCHUNKSIZE 60
298#endif 298#endif
299 299
300/************** 300/*************
301* 301*
302* This section describes drxdriver defines. 302* This section describes drxdriver defines.
303* 303*
304**************/ 304**************/
305 305
306/** 306/*
307* \def DRX_UNKNOWN 307* \def DRX_UNKNOWN
308* \brief Generic UNKNOWN value for DRX enumerated types. 308* \brief Generic UNKNOWN value for DRX enumerated types.
309* 309*
@@ -313,7 +313,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
313#define DRX_UNKNOWN (254) 313#define DRX_UNKNOWN (254)
314#endif 314#endif
315 315
316/** 316/*
317* \def DRX_AUTO 317* \def DRX_AUTO
318* \brief Generic AUTO value for DRX enumerated types. 318* \brief Generic AUTO value for DRX enumerated types.
319* 319*
@@ -324,104 +324,104 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
324#define DRX_AUTO (255) 324#define DRX_AUTO (255)
325#endif 325#endif
326 326
327/************** 327/*************
328* 328*
329* This section describes flag definitions for the device capbilities. 329* This section describes flag definitions for the device capbilities.
330* 330*
331**************/ 331**************/
332 332
333/** 333/*
334* \brief LNA capability flag 334* \brief LNA capability flag
335* 335*
336* Device has a Low Noise Amplifier 336* Device has a Low Noise Amplifier
337* 337*
338*/ 338*/
339#define DRX_CAPABILITY_HAS_LNA (1UL << 0) 339#define DRX_CAPABILITY_HAS_LNA (1UL << 0)
340/** 340/*
341* \brief OOB-RX capability flag 341* \brief OOB-RX capability flag
342* 342*
343* Device has OOB-RX 343* Device has OOB-RX
344* 344*
345*/ 345*/
346#define DRX_CAPABILITY_HAS_OOBRX (1UL << 1) 346#define DRX_CAPABILITY_HAS_OOBRX (1UL << 1)
347/** 347/*
348* \brief ATV capability flag 348* \brief ATV capability flag
349* 349*
350* Device has ATV 350* Device has ATV
351* 351*
352*/ 352*/
353#define DRX_CAPABILITY_HAS_ATV (1UL << 2) 353#define DRX_CAPABILITY_HAS_ATV (1UL << 2)
354/** 354/*
355* \brief DVB-T capability flag 355* \brief DVB-T capability flag
356* 356*
357* Device has DVB-T 357* Device has DVB-T
358* 358*
359*/ 359*/
360#define DRX_CAPABILITY_HAS_DVBT (1UL << 3) 360#define DRX_CAPABILITY_HAS_DVBT (1UL << 3)
361/** 361/*
362* \brief ITU-B capability flag 362* \brief ITU-B capability flag
363* 363*
364* Device has ITU-B 364* Device has ITU-B
365* 365*
366*/ 366*/
367#define DRX_CAPABILITY_HAS_ITUB (1UL << 4) 367#define DRX_CAPABILITY_HAS_ITUB (1UL << 4)
368/** 368/*
369* \brief Audio capability flag 369* \brief Audio capability flag
370* 370*
371* Device has Audio 371* Device has Audio
372* 372*
373*/ 373*/
374#define DRX_CAPABILITY_HAS_AUD (1UL << 5) 374#define DRX_CAPABILITY_HAS_AUD (1UL << 5)
375/** 375/*
376* \brief SAW switch capability flag 376* \brief SAW switch capability flag
377* 377*
378* Device has SAW switch 378* Device has SAW switch
379* 379*
380*/ 380*/
381#define DRX_CAPABILITY_HAS_SAWSW (1UL << 6) 381#define DRX_CAPABILITY_HAS_SAWSW (1UL << 6)
382/** 382/*
383* \brief GPIO1 capability flag 383* \brief GPIO1 capability flag
384* 384*
385* Device has GPIO1 385* Device has GPIO1
386* 386*
387*/ 387*/
388#define DRX_CAPABILITY_HAS_GPIO1 (1UL << 7) 388#define DRX_CAPABILITY_HAS_GPIO1 (1UL << 7)
389/** 389/*
390* \brief GPIO2 capability flag 390* \brief GPIO2 capability flag
391* 391*
392* Device has GPIO2 392* Device has GPIO2
393* 393*
394*/ 394*/
395#define DRX_CAPABILITY_HAS_GPIO2 (1UL << 8) 395#define DRX_CAPABILITY_HAS_GPIO2 (1UL << 8)
396/** 396/*
397* \brief IRQN capability flag 397* \brief IRQN capability flag
398* 398*
399* Device has IRQN 399* Device has IRQN
400* 400*
401*/ 401*/
402#define DRX_CAPABILITY_HAS_IRQN (1UL << 9) 402#define DRX_CAPABILITY_HAS_IRQN (1UL << 9)
403/** 403/*
404* \brief 8VSB capability flag 404* \brief 8VSB capability flag
405* 405*
406* Device has 8VSB 406* Device has 8VSB
407* 407*
408*/ 408*/
409#define DRX_CAPABILITY_HAS_8VSB (1UL << 10) 409#define DRX_CAPABILITY_HAS_8VSB (1UL << 10)
410/** 410/*
411* \brief SMA-TX capability flag 411* \brief SMA-TX capability flag
412* 412*
413* Device has SMATX 413* Device has SMATX
414* 414*
415*/ 415*/
416#define DRX_CAPABILITY_HAS_SMATX (1UL << 11) 416#define DRX_CAPABILITY_HAS_SMATX (1UL << 11)
417/** 417/*
418* \brief SMA-RX capability flag 418* \brief SMA-RX capability flag
419* 419*
420* Device has SMARX 420* Device has SMARX
421* 421*
422*/ 422*/
423#define DRX_CAPABILITY_HAS_SMARX (1UL << 12) 423#define DRX_CAPABILITY_HAS_SMARX (1UL << 12)
424/** 424/*
425* \brief ITU-A/C capability flag 425* \brief ITU-A/C capability flag
426* 426*
427* Device has ITU-A/C 427* Device has ITU-A/C
@@ -439,7 +439,7 @@ MACROS
439 DRX_VERSIONSTRING_HELP(PATCH) 439 DRX_VERSIONSTRING_HELP(PATCH)
440#define DRX_VERSIONSTRING_HELP(NUM) #NUM 440#define DRX_VERSIONSTRING_HELP(NUM) #NUM
441 441
442/** 442/*
443* \brief Macro to create byte array elements from 16 bit integers. 443* \brief Macro to create byte array elements from 16 bit integers.
444* This macro is used to create byte arrays for block writes. 444* This macro is used to create byte arrays for block writes.
445* Block writes speed up I2C traffic between host and demod. 445* Block writes speed up I2C traffic between host and demod.
@@ -449,7 +449,7 @@ MACROS
449#define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \ 449#define DRX_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
450 ((u8)((((u16)x)>>8)&0xFF)) 450 ((u8)((((u16)x)>>8)&0xFF))
451 451
452/** 452/*
453* \brief Macro to convert 16 bit register value to a s32 453* \brief Macro to convert 16 bit register value to a s32
454*/ 454*/
455#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \ 455#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
@@ -461,191 +461,191 @@ MACROS
461ENUM 461ENUM
462-------------------------------------------------------------------------*/ 462-------------------------------------------------------------------------*/
463 463
464/** 464/*
465* \enum enum drx_standard 465* \enum enum drx_standard
466* \brief Modulation standards. 466* \brief Modulation standards.
467*/ 467*/
468enum drx_standard { 468enum drx_standard {
469 DRX_STANDARD_DVBT = 0, /**< Terrestrial DVB-T. */ 469 DRX_STANDARD_DVBT = 0, /*< Terrestrial DVB-T. */
470 DRX_STANDARD_8VSB, /**< Terrestrial 8VSB. */ 470 DRX_STANDARD_8VSB, /*< Terrestrial 8VSB. */
471 DRX_STANDARD_NTSC, /**< Terrestrial\Cable analog NTSC. */ 471 DRX_STANDARD_NTSC, /*< Terrestrial\Cable analog NTSC. */
472 DRX_STANDARD_PAL_SECAM_BG, 472 DRX_STANDARD_PAL_SECAM_BG,
473 /**< Terrestrial analog PAL/SECAM B/G */ 473 /*< Terrestrial analog PAL/SECAM B/G */
474 DRX_STANDARD_PAL_SECAM_DK, 474 DRX_STANDARD_PAL_SECAM_DK,
475 /**< Terrestrial analog PAL/SECAM D/K */ 475 /*< Terrestrial analog PAL/SECAM D/K */
476 DRX_STANDARD_PAL_SECAM_I, 476 DRX_STANDARD_PAL_SECAM_I,
477 /**< Terrestrial analog PAL/SECAM I */ 477 /*< Terrestrial analog PAL/SECAM I */
478 DRX_STANDARD_PAL_SECAM_L, 478 DRX_STANDARD_PAL_SECAM_L,
479 /**< Terrestrial analog PAL/SECAM L 479 /*< Terrestrial analog PAL/SECAM L
480 with negative modulation */ 480 with negative modulation */
481 DRX_STANDARD_PAL_SECAM_LP, 481 DRX_STANDARD_PAL_SECAM_LP,
482 /**< Terrestrial analog PAL/SECAM L 482 /*< Terrestrial analog PAL/SECAM L
483 with positive modulation */ 483 with positive modulation */
484 DRX_STANDARD_ITU_A, /**< Cable ITU ANNEX A. */ 484 DRX_STANDARD_ITU_A, /*< Cable ITU ANNEX A. */
485 DRX_STANDARD_ITU_B, /**< Cable ITU ANNEX B. */ 485 DRX_STANDARD_ITU_B, /*< Cable ITU ANNEX B. */
486 DRX_STANDARD_ITU_C, /**< Cable ITU ANNEX C. */ 486 DRX_STANDARD_ITU_C, /*< Cable ITU ANNEX C. */
487 DRX_STANDARD_ITU_D, /**< Cable ITU ANNEX D. */ 487 DRX_STANDARD_ITU_D, /*< Cable ITU ANNEX D. */
488 DRX_STANDARD_FM, /**< Terrestrial\Cable FM radio */ 488 DRX_STANDARD_FM, /*< Terrestrial\Cable FM radio */
489 DRX_STANDARD_DTMB, /**< Terrestrial DTMB standard (China)*/ 489 DRX_STANDARD_DTMB, /*< Terrestrial DTMB standard (China)*/
490 DRX_STANDARD_UNKNOWN = DRX_UNKNOWN, 490 DRX_STANDARD_UNKNOWN = DRX_UNKNOWN,
491 /**< Standard unknown. */ 491 /*< Standard unknown. */
492 DRX_STANDARD_AUTO = DRX_AUTO 492 DRX_STANDARD_AUTO = DRX_AUTO
493 /**< Autodetect standard. */ 493 /*< Autodetect standard. */
494}; 494};
495 495
496/** 496/*
497* \enum enum drx_standard 497* \enum enum drx_standard
498* \brief Modulation sub-standards. 498* \brief Modulation sub-standards.
499*/ 499*/
500enum drx_substandard { 500enum drx_substandard {
501 DRX_SUBSTANDARD_MAIN = 0, /**< Main subvariant of standard */ 501 DRX_SUBSTANDARD_MAIN = 0, /*< Main subvariant of standard */
502 DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA, 502 DRX_SUBSTANDARD_ATV_BG_SCANDINAVIA,
503 DRX_SUBSTANDARD_ATV_DK_POLAND, 503 DRX_SUBSTANDARD_ATV_DK_POLAND,
504 DRX_SUBSTANDARD_ATV_DK_CHINA, 504 DRX_SUBSTANDARD_ATV_DK_CHINA,
505 DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN, 505 DRX_SUBSTANDARD_UNKNOWN = DRX_UNKNOWN,
506 /**< Sub-standard unknown. */ 506 /*< Sub-standard unknown. */
507 DRX_SUBSTANDARD_AUTO = DRX_AUTO 507 DRX_SUBSTANDARD_AUTO = DRX_AUTO
508 /**< Auto (default) sub-standard */ 508 /*< Auto (default) sub-standard */
509}; 509};
510 510
511/** 511/*
512* \enum enum drx_bandwidth 512* \enum enum drx_bandwidth
513* \brief Channel bandwidth or channel spacing. 513* \brief Channel bandwidth or channel spacing.
514*/ 514*/
515enum drx_bandwidth { 515enum drx_bandwidth {
516 DRX_BANDWIDTH_8MHZ = 0, /**< Bandwidth 8 MHz. */ 516 DRX_BANDWIDTH_8MHZ = 0, /*< Bandwidth 8 MHz. */
517 DRX_BANDWIDTH_7MHZ, /**< Bandwidth 7 MHz. */ 517 DRX_BANDWIDTH_7MHZ, /*< Bandwidth 7 MHz. */
518 DRX_BANDWIDTH_6MHZ, /**< Bandwidth 6 MHz. */ 518 DRX_BANDWIDTH_6MHZ, /*< Bandwidth 6 MHz. */
519 DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN, 519 DRX_BANDWIDTH_UNKNOWN = DRX_UNKNOWN,
520 /**< Bandwidth unknown. */ 520 /*< Bandwidth unknown. */
521 DRX_BANDWIDTH_AUTO = DRX_AUTO 521 DRX_BANDWIDTH_AUTO = DRX_AUTO
522 /**< Auto Set Bandwidth */ 522 /*< Auto Set Bandwidth */
523}; 523};
524 524
525/** 525/*
526* \enum enum drx_mirror 526* \enum enum drx_mirror
527* \brief Indicate if channel spectrum is mirrored or not. 527* \brief Indicate if channel spectrum is mirrored or not.
528*/ 528*/
529enum drx_mirror { 529enum drx_mirror {
530 DRX_MIRROR_NO = 0, /**< Spectrum is not mirrored. */ 530 DRX_MIRROR_NO = 0, /*< Spectrum is not mirrored. */
531 DRX_MIRROR_YES, /**< Spectrum is mirrored. */ 531 DRX_MIRROR_YES, /*< Spectrum is mirrored. */
532 DRX_MIRROR_UNKNOWN = DRX_UNKNOWN, 532 DRX_MIRROR_UNKNOWN = DRX_UNKNOWN,
533 /**< Unknown if spectrum is mirrored. */ 533 /*< Unknown if spectrum is mirrored. */
534 DRX_MIRROR_AUTO = DRX_AUTO 534 DRX_MIRROR_AUTO = DRX_AUTO
535 /**< Autodetect if spectrum is mirrored. */ 535 /*< Autodetect if spectrum is mirrored. */
536}; 536};
537 537
538/** 538/*
539* \enum enum drx_modulation 539* \enum enum drx_modulation
540* \brief Constellation type of the channel. 540* \brief Constellation type of the channel.
541*/ 541*/
542enum drx_modulation { 542enum drx_modulation {
543 DRX_CONSTELLATION_BPSK = 0, /**< Modulation is BPSK. */ 543 DRX_CONSTELLATION_BPSK = 0, /*< Modulation is BPSK. */
544 DRX_CONSTELLATION_QPSK, /**< Constellation is QPSK. */ 544 DRX_CONSTELLATION_QPSK, /*< Constellation is QPSK. */
545 DRX_CONSTELLATION_PSK8, /**< Constellation is PSK8. */ 545 DRX_CONSTELLATION_PSK8, /*< Constellation is PSK8. */
546 DRX_CONSTELLATION_QAM16, /**< Constellation is QAM16. */ 546 DRX_CONSTELLATION_QAM16, /*< Constellation is QAM16. */
547 DRX_CONSTELLATION_QAM32, /**< Constellation is QAM32. */ 547 DRX_CONSTELLATION_QAM32, /*< Constellation is QAM32. */
548 DRX_CONSTELLATION_QAM64, /**< Constellation is QAM64. */ 548 DRX_CONSTELLATION_QAM64, /*< Constellation is QAM64. */
549 DRX_CONSTELLATION_QAM128, /**< Constellation is QAM128. */ 549 DRX_CONSTELLATION_QAM128, /*< Constellation is QAM128. */
550 DRX_CONSTELLATION_QAM256, /**< Constellation is QAM256. */ 550 DRX_CONSTELLATION_QAM256, /*< Constellation is QAM256. */
551 DRX_CONSTELLATION_QAM512, /**< Constellation is QAM512. */ 551 DRX_CONSTELLATION_QAM512, /*< Constellation is QAM512. */
552 DRX_CONSTELLATION_QAM1024, /**< Constellation is QAM1024. */ 552 DRX_CONSTELLATION_QAM1024, /*< Constellation is QAM1024. */
553 DRX_CONSTELLATION_QPSK_NR, /**< Constellation is QPSK_NR */ 553 DRX_CONSTELLATION_QPSK_NR, /*< Constellation is QPSK_NR */
554 DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN, 554 DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
555 /**< Constellation unknown. */ 555 /*< Constellation unknown. */
556 DRX_CONSTELLATION_AUTO = DRX_AUTO 556 DRX_CONSTELLATION_AUTO = DRX_AUTO
557 /**< Autodetect constellation. */ 557 /*< Autodetect constellation. */
558}; 558};
559 559
560/** 560/*
561* \enum enum drx_hierarchy 561* \enum enum drx_hierarchy
562* \brief Hierarchy of the channel. 562* \brief Hierarchy of the channel.
563*/ 563*/
564enum drx_hierarchy { 564enum drx_hierarchy {
565 DRX_HIERARCHY_NONE = 0, /**< None hierarchical channel. */ 565 DRX_HIERARCHY_NONE = 0, /*< None hierarchical channel. */
566 DRX_HIERARCHY_ALPHA1, /**< Hierarchical channel, alpha=1. */ 566 DRX_HIERARCHY_ALPHA1, /*< Hierarchical channel, alpha=1. */
567 DRX_HIERARCHY_ALPHA2, /**< Hierarchical channel, alpha=2. */ 567 DRX_HIERARCHY_ALPHA2, /*< Hierarchical channel, alpha=2. */
568 DRX_HIERARCHY_ALPHA4, /**< Hierarchical channel, alpha=4. */ 568 DRX_HIERARCHY_ALPHA4, /*< Hierarchical channel, alpha=4. */
569 DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN, 569 DRX_HIERARCHY_UNKNOWN = DRX_UNKNOWN,
570 /**< Hierarchy unknown. */ 570 /*< Hierarchy unknown. */
571 DRX_HIERARCHY_AUTO = DRX_AUTO 571 DRX_HIERARCHY_AUTO = DRX_AUTO
572 /**< Autodetect hierarchy. */ 572 /*< Autodetect hierarchy. */
573}; 573};
574 574
575/** 575/*
576* \enum enum drx_priority 576* \enum enum drx_priority
577* \brief Channel priority in case of hierarchical transmission. 577* \brief Channel priority in case of hierarchical transmission.
578*/ 578*/
579enum drx_priority { 579enum drx_priority {
580 DRX_PRIORITY_LOW = 0, /**< Low priority channel. */ 580 DRX_PRIORITY_LOW = 0, /*< Low priority channel. */
581 DRX_PRIORITY_HIGH, /**< High priority channel. */ 581 DRX_PRIORITY_HIGH, /*< High priority channel. */
582 DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN 582 DRX_PRIORITY_UNKNOWN = DRX_UNKNOWN
583 /**< Priority unknown. */ 583 /*< Priority unknown. */
584}; 584};
585 585
586/** 586/*
587* \enum enum drx_coderate 587* \enum enum drx_coderate
588* \brief Channel priority in case of hierarchical transmission. 588* \brief Channel priority in case of hierarchical transmission.
589*/ 589*/
590enum drx_coderate { 590enum drx_coderate {
591 DRX_CODERATE_1DIV2 = 0, /**< Code rate 1/2nd. */ 591 DRX_CODERATE_1DIV2 = 0, /*< Code rate 1/2nd. */
592 DRX_CODERATE_2DIV3, /**< Code rate 2/3nd. */ 592 DRX_CODERATE_2DIV3, /*< Code rate 2/3nd. */
593 DRX_CODERATE_3DIV4, /**< Code rate 3/4nd. */ 593 DRX_CODERATE_3DIV4, /*< Code rate 3/4nd. */
594 DRX_CODERATE_5DIV6, /**< Code rate 5/6nd. */ 594 DRX_CODERATE_5DIV6, /*< Code rate 5/6nd. */
595 DRX_CODERATE_7DIV8, /**< Code rate 7/8nd. */ 595 DRX_CODERATE_7DIV8, /*< Code rate 7/8nd. */
596 DRX_CODERATE_UNKNOWN = DRX_UNKNOWN, 596 DRX_CODERATE_UNKNOWN = DRX_UNKNOWN,
597 /**< Code rate unknown. */ 597 /*< Code rate unknown. */
598 DRX_CODERATE_AUTO = DRX_AUTO 598 DRX_CODERATE_AUTO = DRX_AUTO
599 /**< Autodetect code rate. */ 599 /*< Autodetect code rate. */
600}; 600};
601 601
602/** 602/*
603* \enum enum drx_guard 603* \enum enum drx_guard
604* \brief Guard interval of a channel. 604* \brief Guard interval of a channel.
605*/ 605*/
606enum drx_guard { 606enum drx_guard {
607 DRX_GUARD_1DIV32 = 0, /**< Guard interval 1/32nd. */ 607 DRX_GUARD_1DIV32 = 0, /*< Guard interval 1/32nd. */
608 DRX_GUARD_1DIV16, /**< Guard interval 1/16th. */ 608 DRX_GUARD_1DIV16, /*< Guard interval 1/16th. */
609 DRX_GUARD_1DIV8, /**< Guard interval 1/8th. */ 609 DRX_GUARD_1DIV8, /*< Guard interval 1/8th. */
610 DRX_GUARD_1DIV4, /**< Guard interval 1/4th. */ 610 DRX_GUARD_1DIV4, /*< Guard interval 1/4th. */
611 DRX_GUARD_UNKNOWN = DRX_UNKNOWN, 611 DRX_GUARD_UNKNOWN = DRX_UNKNOWN,
612 /**< Guard interval unknown. */ 612 /*< Guard interval unknown. */
613 DRX_GUARD_AUTO = DRX_AUTO 613 DRX_GUARD_AUTO = DRX_AUTO
614 /**< Autodetect guard interval. */ 614 /*< Autodetect guard interval. */
615}; 615};
616 616
617/** 617/*
618* \enum enum drx_fft_mode 618* \enum enum drx_fft_mode
619* \brief FFT mode. 619* \brief FFT mode.
620*/ 620*/
621enum drx_fft_mode { 621enum drx_fft_mode {
622 DRX_FFTMODE_2K = 0, /**< 2K FFT mode. */ 622 DRX_FFTMODE_2K = 0, /*< 2K FFT mode. */
623 DRX_FFTMODE_4K, /**< 4K FFT mode. */ 623 DRX_FFTMODE_4K, /*< 4K FFT mode. */
624 DRX_FFTMODE_8K, /**< 8K FFT mode. */ 624 DRX_FFTMODE_8K, /*< 8K FFT mode. */
625 DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN, 625 DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
626 /**< FFT mode unknown. */ 626 /*< FFT mode unknown. */
627 DRX_FFTMODE_AUTO = DRX_AUTO 627 DRX_FFTMODE_AUTO = DRX_AUTO
628 /**< Autodetect FFT mode. */ 628 /*< Autodetect FFT mode. */
629}; 629};
630 630
631/** 631/*
632* \enum enum drx_classification 632* \enum enum drx_classification
633* \brief Channel classification. 633* \brief Channel classification.
634*/ 634*/
635enum drx_classification { 635enum drx_classification {
636 DRX_CLASSIFICATION_GAUSS = 0, /**< Gaussion noise. */ 636 DRX_CLASSIFICATION_GAUSS = 0, /*< Gaussion noise. */
637 DRX_CLASSIFICATION_HVY_GAUSS, /**< Heavy Gaussion noise. */ 637 DRX_CLASSIFICATION_HVY_GAUSS, /*< Heavy Gaussion noise. */
638 DRX_CLASSIFICATION_COCHANNEL, /**< Co-channel. */ 638 DRX_CLASSIFICATION_COCHANNEL, /*< Co-channel. */
639 DRX_CLASSIFICATION_STATIC, /**< Static echo. */ 639 DRX_CLASSIFICATION_STATIC, /*< Static echo. */
640 DRX_CLASSIFICATION_MOVING, /**< Moving echo. */ 640 DRX_CLASSIFICATION_MOVING, /*< Moving echo. */
641 DRX_CLASSIFICATION_ZERODB, /**< Zero dB echo. */ 641 DRX_CLASSIFICATION_ZERODB, /*< Zero dB echo. */
642 DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN, 642 DRX_CLASSIFICATION_UNKNOWN = DRX_UNKNOWN,
643 /**< Unknown classification */ 643 /*< Unknown classification */
644 DRX_CLASSIFICATION_AUTO = DRX_AUTO 644 DRX_CLASSIFICATION_AUTO = DRX_AUTO
645 /**< Autodetect classification. */ 645 /*< Autodetect classification. */
646}; 646};
647 647
648/** 648/*
649* /enum enum drx_interleave_mode 649* /enum enum drx_interleave_mode
650* /brief Interleave modes 650* /brief Interleave modes
651*/ 651*/
@@ -673,80 +673,80 @@ enum drx_interleave_mode {
673 DRX_INTERLEAVEMODE_B52_M48, 673 DRX_INTERLEAVEMODE_B52_M48,
674 DRX_INTERLEAVEMODE_B52_M0, 674 DRX_INTERLEAVEMODE_B52_M0,
675 DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN, 675 DRX_INTERLEAVEMODE_UNKNOWN = DRX_UNKNOWN,
676 /**< Unknown interleave mode */ 676 /*< Unknown interleave mode */
677 DRX_INTERLEAVEMODE_AUTO = DRX_AUTO 677 DRX_INTERLEAVEMODE_AUTO = DRX_AUTO
678 /**< Autodetect interleave mode */ 678 /*< Autodetect interleave mode */
679}; 679};
680 680
681/** 681/*
682* \enum enum drx_carrier_mode 682* \enum enum drx_carrier_mode
683* \brief Channel Carrier Mode. 683* \brief Channel Carrier Mode.
684*/ 684*/
685enum drx_carrier_mode { 685enum drx_carrier_mode {
686 DRX_CARRIER_MULTI = 0, /**< Multi carrier mode */ 686 DRX_CARRIER_MULTI = 0, /*< Multi carrier mode */
687 DRX_CARRIER_SINGLE, /**< Single carrier mode */ 687 DRX_CARRIER_SINGLE, /*< Single carrier mode */
688 DRX_CARRIER_UNKNOWN = DRX_UNKNOWN, 688 DRX_CARRIER_UNKNOWN = DRX_UNKNOWN,
689 /**< Carrier mode unknown. */ 689 /*< Carrier mode unknown. */
690 DRX_CARRIER_AUTO = DRX_AUTO /**< Autodetect carrier mode */ 690 DRX_CARRIER_AUTO = DRX_AUTO /*< Autodetect carrier mode */
691}; 691};
692 692
693/** 693/*
694* \enum enum drx_frame_mode 694* \enum enum drx_frame_mode
695* \brief Channel Frame Mode. 695* \brief Channel Frame Mode.
696*/ 696*/
697enum drx_frame_mode { 697enum drx_frame_mode {
698 DRX_FRAMEMODE_420 = 0, /**< 420 with variable PN */ 698 DRX_FRAMEMODE_420 = 0, /*< 420 with variable PN */
699 DRX_FRAMEMODE_595, /**< 595 */ 699 DRX_FRAMEMODE_595, /*< 595 */
700 DRX_FRAMEMODE_945, /**< 945 with variable PN */ 700 DRX_FRAMEMODE_945, /*< 945 with variable PN */
701 DRX_FRAMEMODE_420_FIXED_PN, 701 DRX_FRAMEMODE_420_FIXED_PN,
702 /**< 420 with fixed PN */ 702 /*< 420 with fixed PN */
703 DRX_FRAMEMODE_945_FIXED_PN, 703 DRX_FRAMEMODE_945_FIXED_PN,
704 /**< 945 with fixed PN */ 704 /*< 945 with fixed PN */
705 DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN, 705 DRX_FRAMEMODE_UNKNOWN = DRX_UNKNOWN,
706 /**< Frame mode unknown. */ 706 /*< Frame mode unknown. */
707 DRX_FRAMEMODE_AUTO = DRX_AUTO 707 DRX_FRAMEMODE_AUTO = DRX_AUTO
708 /**< Autodetect frame mode */ 708 /*< Autodetect frame mode */
709}; 709};
710 710
711/** 711/*
712* \enum enum drx_tps_frame 712* \enum enum drx_tps_frame
713* \brief Frame number in current super-frame. 713* \brief Frame number in current super-frame.
714*/ 714*/
715enum drx_tps_frame { 715enum drx_tps_frame {
716 DRX_TPS_FRAME1 = 0, /**< TPS frame 1. */ 716 DRX_TPS_FRAME1 = 0, /*< TPS frame 1. */
717 DRX_TPS_FRAME2, /**< TPS frame 2. */ 717 DRX_TPS_FRAME2, /*< TPS frame 2. */
718 DRX_TPS_FRAME3, /**< TPS frame 3. */ 718 DRX_TPS_FRAME3, /*< TPS frame 3. */
719 DRX_TPS_FRAME4, /**< TPS frame 4. */ 719 DRX_TPS_FRAME4, /*< TPS frame 4. */
720 DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN 720 DRX_TPS_FRAME_UNKNOWN = DRX_UNKNOWN
721 /**< TPS frame unknown. */ 721 /*< TPS frame unknown. */
722}; 722};
723 723
724/** 724/*
725* \enum enum drx_ldpc 725* \enum enum drx_ldpc
726* \brief TPS LDPC . 726* \brief TPS LDPC .
727*/ 727*/
728enum drx_ldpc { 728enum drx_ldpc {
729 DRX_LDPC_0_4 = 0, /**< LDPC 0.4 */ 729 DRX_LDPC_0_4 = 0, /*< LDPC 0.4 */
730 DRX_LDPC_0_6, /**< LDPC 0.6 */ 730 DRX_LDPC_0_6, /*< LDPC 0.6 */
731 DRX_LDPC_0_8, /**< LDPC 0.8 */ 731 DRX_LDPC_0_8, /*< LDPC 0.8 */
732 DRX_LDPC_UNKNOWN = DRX_UNKNOWN, 732 DRX_LDPC_UNKNOWN = DRX_UNKNOWN,
733 /**< LDPC unknown. */ 733 /*< LDPC unknown. */
734 DRX_LDPC_AUTO = DRX_AUTO /**< Autodetect LDPC */ 734 DRX_LDPC_AUTO = DRX_AUTO /*< Autodetect LDPC */
735}; 735};
736 736
737/** 737/*
738* \enum enum drx_pilot_mode 738* \enum enum drx_pilot_mode
739* \brief Pilot modes in DTMB. 739* \brief Pilot modes in DTMB.
740*/ 740*/
741enum drx_pilot_mode { 741enum drx_pilot_mode {
742 DRX_PILOT_ON = 0, /**< Pilot On */ 742 DRX_PILOT_ON = 0, /*< Pilot On */
743 DRX_PILOT_OFF, /**< Pilot Off */ 743 DRX_PILOT_OFF, /*< Pilot Off */
744 DRX_PILOT_UNKNOWN = DRX_UNKNOWN, 744 DRX_PILOT_UNKNOWN = DRX_UNKNOWN,
745 /**< Pilot unknown. */ 745 /*< Pilot unknown. */
746 DRX_PILOT_AUTO = DRX_AUTO /**< Autodetect Pilot */ 746 DRX_PILOT_AUTO = DRX_AUTO /*< Autodetect Pilot */
747}; 747};
748 748
749/** 749/*
750 * enum drxu_code_action - indicate if firmware has to be uploaded or verified. 750 * enum drxu_code_action - indicate if firmware has to be uploaded or verified.
751 * @UCODE_UPLOAD: Upload the microcode image to device 751 * @UCODE_UPLOAD: Upload the microcode image to device
752 * @UCODE_VERIFY: Compare microcode image with code on device 752 * @UCODE_VERIFY: Compare microcode image with code on device
@@ -756,7 +756,7 @@ enum drxu_code_action {
756 UCODE_VERIFY 756 UCODE_VERIFY
757}; 757};
758 758
759/** 759/*
760* \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator. 760* \enum enum drx_lock_status * \brief Used to reflect current lock status of demodulator.
761* 761*
762* The generic lock states have device dependent semantics. 762* The generic lock states have device dependent semantics.
@@ -801,7 +801,7 @@ enum drx_lock_status {
801 DRX_LOCKED 801 DRX_LOCKED
802}; 802};
803 803
804/** 804/*
805* \enum enum drx_uio* \brief Used to address a User IO (UIO). 805* \enum enum drx_uio* \brief Used to address a User IO (UIO).
806*/ 806*/
807enum drx_uio { 807enum drx_uio {
@@ -840,7 +840,7 @@ enum drx_uio {
840 DRX_UIO_MAX = DRX_UIO32 840 DRX_UIO_MAX = DRX_UIO32
841}; 841};
842 842
843/** 843/*
844* \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO. 844* \enum enum drxuio_mode * \brief Used to configure the modus oprandi of a UIO.
845* 845*
846* DRX_UIO_MODE_FIRMWARE is an old uio mode. 846* DRX_UIO_MODE_FIRMWARE is an old uio mode.
@@ -850,37 +850,37 @@ enum drx_uio {
850*/ 850*/
851enum drxuio_mode { 851enum drxuio_mode {
852 DRX_UIO_MODE_DISABLE = 0x01, 852 DRX_UIO_MODE_DISABLE = 0x01,
853 /**< not used, pin is configured as input */ 853 /*< not used, pin is configured as input */
854 DRX_UIO_MODE_READWRITE = 0x02, 854 DRX_UIO_MODE_READWRITE = 0x02,
855 /**< used for read/write by application */ 855 /*< used for read/write by application */
856 DRX_UIO_MODE_FIRMWARE = 0x04, 856 DRX_UIO_MODE_FIRMWARE = 0x04,
857 /**< controlled by firmware, function 0 */ 857 /*< controlled by firmware, function 0 */
858 DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE, 858 DRX_UIO_MODE_FIRMWARE0 = DRX_UIO_MODE_FIRMWARE,
859 /**< same as above */ 859 /*< same as above */
860 DRX_UIO_MODE_FIRMWARE1 = 0x08, 860 DRX_UIO_MODE_FIRMWARE1 = 0x08,
861 /**< controlled by firmware, function 1 */ 861 /*< controlled by firmware, function 1 */
862 DRX_UIO_MODE_FIRMWARE2 = 0x10, 862 DRX_UIO_MODE_FIRMWARE2 = 0x10,
863 /**< controlled by firmware, function 2 */ 863 /*< controlled by firmware, function 2 */
864 DRX_UIO_MODE_FIRMWARE3 = 0x20, 864 DRX_UIO_MODE_FIRMWARE3 = 0x20,
865 /**< controlled by firmware, function 3 */ 865 /*< controlled by firmware, function 3 */
866 DRX_UIO_MODE_FIRMWARE4 = 0x40, 866 DRX_UIO_MODE_FIRMWARE4 = 0x40,
867 /**< controlled by firmware, function 4 */ 867 /*< controlled by firmware, function 4 */
868 DRX_UIO_MODE_FIRMWARE5 = 0x80 868 DRX_UIO_MODE_FIRMWARE5 = 0x80
869 /**< controlled by firmware, function 5 */ 869 /*< controlled by firmware, function 5 */
870}; 870};
871 871
872/** 872/*
873* \enum enum drxoob_downstream_standard * \brief Used to select OOB standard. 873* \enum enum drxoob_downstream_standard * \brief Used to select OOB standard.
874* 874*
875* Based on ANSI 55-1 and 55-2 875* Based on ANSI 55-1 and 55-2
876*/ 876*/
877enum drxoob_downstream_standard { 877enum drxoob_downstream_standard {
878 DRX_OOB_MODE_A = 0, 878 DRX_OOB_MODE_A = 0,
879 /**< ANSI 55-1 */ 879 /*< ANSI 55-1 */
880 DRX_OOB_MODE_B_GRADE_A, 880 DRX_OOB_MODE_B_GRADE_A,
881 /**< ANSI 55-2 A */ 881 /*< ANSI 55-2 A */
882 DRX_OOB_MODE_B_GRADE_B 882 DRX_OOB_MODE_B_GRADE_B
883 /**< ANSI 55-2 B */ 883 /*< ANSI 55-2 B */
884}; 884};
885 885
886/*------------------------------------------------------------------------- 886/*-------------------------------------------------------------------------
@@ -924,7 +924,7 @@ STRUCTS
924/*============================================================================*/ 924/*============================================================================*/
925/*============================================================================*/ 925/*============================================================================*/
926 926
927/** 927/*
928 * struct drxu_code_info Parameters for microcode upload and verfiy. 928 * struct drxu_code_info Parameters for microcode upload and verfiy.
929 * 929 *
930 * @mc_file: microcode file name 930 * @mc_file: microcode file name
@@ -935,7 +935,7 @@ struct drxu_code_info {
935 char *mc_file; 935 char *mc_file;
936}; 936};
937 937
938/** 938/*
939* \struct drx_mc_version_rec_t 939* \struct drx_mc_version_rec_t
940* \brief Microcode version record 940* \brief Microcode version record
941* Version numbers are stored in BCD format, as usual: 941* Version numbers are stored in BCD format, as usual:
@@ -963,7 +963,7 @@ struct drx_mc_version_rec {
963 963
964/*========================================*/ 964/*========================================*/
965 965
966/** 966/*
967* \struct drx_filter_info_t 967* \struct drx_filter_info_t
968* \brief Parameters for loading filter coefficients 968* \brief Parameters for loading filter coefficients
969* 969*
@@ -971,18 +971,18 @@ struct drx_mc_version_rec {
971*/ 971*/
972struct drx_filter_info { 972struct drx_filter_info {
973 u8 *data_re; 973 u8 *data_re;
974 /**< pointer to coefficients for RE */ 974 /*< pointer to coefficients for RE */
975 u8 *data_im; 975 u8 *data_im;
976 /**< pointer to coefficients for IM */ 976 /*< pointer to coefficients for IM */
977 u16 size_re; 977 u16 size_re;
978 /**< size of coefficients for RE */ 978 /*< size of coefficients for RE */
979 u16 size_im; 979 u16 size_im;
980 /**< size of coefficients for IM */ 980 /*< size of coefficients for IM */
981}; 981};
982 982
983/*========================================*/ 983/*========================================*/
984 984
985/** 985/*
986* \struct struct drx_channel * \brief The set of parameters describing a single channel. 986* \struct struct drx_channel * \brief The set of parameters describing a single channel.
987* 987*
988* Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL. 988* Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL.
@@ -991,29 +991,29 @@ struct drx_filter_info {
991*/ 991*/
992struct drx_channel { 992struct drx_channel {
993 s32 frequency; 993 s32 frequency;
994 /**< frequency in kHz */ 994 /*< frequency in kHz */
995 enum drx_bandwidth bandwidth; 995 enum drx_bandwidth bandwidth;
996 /**< bandwidth */ 996 /*< bandwidth */
997 enum drx_mirror mirror; /**< mirrored or not on RF */ 997 enum drx_mirror mirror; /*< mirrored or not on RF */
998 enum drx_modulation constellation; 998 enum drx_modulation constellation;
999 /**< constellation */ 999 /*< constellation */
1000 enum drx_hierarchy hierarchy; 1000 enum drx_hierarchy hierarchy;
1001 /**< hierarchy */ 1001 /*< hierarchy */
1002 enum drx_priority priority; /**< priority */ 1002 enum drx_priority priority; /*< priority */
1003 enum drx_coderate coderate; /**< coderate */ 1003 enum drx_coderate coderate; /*< coderate */
1004 enum drx_guard guard; /**< guard interval */ 1004 enum drx_guard guard; /*< guard interval */
1005 enum drx_fft_mode fftmode; /**< fftmode */ 1005 enum drx_fft_mode fftmode; /*< fftmode */
1006 enum drx_classification classification; 1006 enum drx_classification classification;
1007 /**< classification */ 1007 /*< classification */
1008 u32 symbolrate; 1008 u32 symbolrate;
1009 /**< symbolrate in symbols/sec */ 1009 /*< symbolrate in symbols/sec */
1010 enum drx_interleave_mode interleavemode; 1010 enum drx_interleave_mode interleavemode;
1011 /**< interleaveMode QAM */ 1011 /*< interleaveMode QAM */
1012 enum drx_ldpc ldpc; /**< ldpc */ 1012 enum drx_ldpc ldpc; /*< ldpc */
1013 enum drx_carrier_mode carrier; /**< carrier */ 1013 enum drx_carrier_mode carrier; /*< carrier */
1014 enum drx_frame_mode framemode; 1014 enum drx_frame_mode framemode;
1015 /**< frame mode */ 1015 /*< frame mode */
1016 enum drx_pilot_mode pilot; /**< pilot mode */ 1016 enum drx_pilot_mode pilot; /*< pilot mode */
1017}; 1017};
1018 1018
1019/*========================================*/ 1019/*========================================*/
@@ -1027,74 +1027,74 @@ enum drx_cfg_sqi_speed {
1027 1027
1028/*========================================*/ 1028/*========================================*/
1029 1029
1030/** 1030/*
1031* \struct struct drx_complex * A complex number. 1031* \struct struct drx_complex * A complex number.
1032* 1032*
1033* Used by DRX_CTRL_CONSTEL. 1033* Used by DRX_CTRL_CONSTEL.
1034*/ 1034*/
1035struct drx_complex { 1035struct drx_complex {
1036 s16 im; 1036 s16 im;
1037 /**< Imaginary part. */ 1037 /*< Imaginary part. */
1038 s16 re; 1038 s16 re;
1039 /**< Real part. */ 1039 /*< Real part. */
1040}; 1040};
1041 1041
1042/*========================================*/ 1042/*========================================*/
1043 1043
1044/** 1044/*
1045* \struct struct drx_frequency_plan * Array element of a frequency plan. 1045* \struct struct drx_frequency_plan * Array element of a frequency plan.
1046* 1046*
1047* Used by DRX_CTRL_SCAN_INIT. 1047* Used by DRX_CTRL_SCAN_INIT.
1048*/ 1048*/
1049struct drx_frequency_plan { 1049struct drx_frequency_plan {
1050 s32 first; 1050 s32 first;
1051 /**< First centre frequency in this band */ 1051 /*< First centre frequency in this band */
1052 s32 last; 1052 s32 last;
1053 /**< Last centre frequency in this band */ 1053 /*< Last centre frequency in this band */
1054 s32 step; 1054 s32 step;
1055 /**< Stepping frequency in this band */ 1055 /*< Stepping frequency in this band */
1056 enum drx_bandwidth bandwidth; 1056 enum drx_bandwidth bandwidth;
1057 /**< Bandwidth within this frequency band */ 1057 /*< Bandwidth within this frequency band */
1058 u16 ch_number; 1058 u16 ch_number;
1059 /**< First channel number in this band, or first 1059 /*< First channel number in this band, or first
1060 index in ch_names */ 1060 index in ch_names */
1061 char **ch_names; 1061 char **ch_names;
1062 /**< Optional list of channel names in this 1062 /*< Optional list of channel names in this
1063 band */ 1063 band */
1064}; 1064};
1065 1065
1066/*========================================*/ 1066/*========================================*/
1067 1067
1068/** 1068/*
1069* \struct struct drx_scan_param * Parameters for channel scan. 1069* \struct struct drx_scan_param * Parameters for channel scan.
1070* 1070*
1071* Used by DRX_CTRL_SCAN_INIT. 1071* Used by DRX_CTRL_SCAN_INIT.
1072*/ 1072*/
1073struct drx_scan_param { 1073struct drx_scan_param {
1074 struct drx_frequency_plan *frequency_plan; 1074 struct drx_frequency_plan *frequency_plan;
1075 /**< Frequency plan (array)*/ 1075 /*< Frequency plan (array)*/
1076 u16 frequency_plan_size; /**< Number of bands */ 1076 u16 frequency_plan_size; /*< Number of bands */
1077 u32 num_tries; /**< Max channels tried */ 1077 u32 num_tries; /*< Max channels tried */
1078 s32 skip; /**< Minimum frequency step to take 1078 s32 skip; /*< Minimum frequency step to take
1079 after a channel is found */ 1079 after a channel is found */
1080 void *ext_params; /**< Standard specific params */ 1080 void *ext_params; /*< Standard specific params */
1081}; 1081};
1082 1082
1083/*========================================*/ 1083/*========================================*/
1084 1084
1085/** 1085/*
1086* \brief Scan commands. 1086* \brief Scan commands.
1087* Used by scanning algorithms. 1087* Used by scanning algorithms.
1088*/ 1088*/
1089enum drx_scan_command { 1089enum drx_scan_command {
1090 DRX_SCAN_COMMAND_INIT = 0,/**< Initialize scanning */ 1090 DRX_SCAN_COMMAND_INIT = 0,/*< Initialize scanning */
1091 DRX_SCAN_COMMAND_NEXT, /**< Next scan */ 1091 DRX_SCAN_COMMAND_NEXT, /*< Next scan */
1092 DRX_SCAN_COMMAND_STOP /**< Stop scanning */ 1092 DRX_SCAN_COMMAND_STOP /*< Stop scanning */
1093}; 1093};
1094 1094
1095/*========================================*/ 1095/*========================================*/
1096 1096
1097/** 1097/*
1098* \brief Inner scan function prototype. 1098* \brief Inner scan function prototype.
1099*/ 1099*/
1100typedef int(*drx_scan_func_t) (void *scan_context, 1100typedef int(*drx_scan_func_t) (void *scan_context,
@@ -1104,77 +1104,77 @@ typedef int(*drx_scan_func_t) (void *scan_context,
1104 1104
1105/*========================================*/ 1105/*========================================*/
1106 1106
1107/** 1107/*
1108* \struct struct drxtps_info * TPS information, DVB-T specific. 1108* \struct struct drxtps_info * TPS information, DVB-T specific.
1109* 1109*
1110* Used by DRX_CTRL_TPS_INFO. 1110* Used by DRX_CTRL_TPS_INFO.
1111*/ 1111*/
1112 struct drxtps_info { 1112 struct drxtps_info {
1113 enum drx_fft_mode fftmode; /**< Fft mode */ 1113 enum drx_fft_mode fftmode; /*< Fft mode */
1114 enum drx_guard guard; /**< Guard interval */ 1114 enum drx_guard guard; /*< Guard interval */
1115 enum drx_modulation constellation; 1115 enum drx_modulation constellation;
1116 /**< Constellation */ 1116 /*< Constellation */
1117 enum drx_hierarchy hierarchy; 1117 enum drx_hierarchy hierarchy;
1118 /**< Hierarchy */ 1118 /*< Hierarchy */
1119 enum drx_coderate high_coderate; 1119 enum drx_coderate high_coderate;
1120 /**< High code rate */ 1120 /*< High code rate */
1121 enum drx_coderate low_coderate; 1121 enum drx_coderate low_coderate;
1122 /**< Low cod rate */ 1122 /*< Low cod rate */
1123 enum drx_tps_frame frame; /**< Tps frame */ 1123 enum drx_tps_frame frame; /*< Tps frame */
1124 u8 length; /**< Length */ 1124 u8 length; /*< Length */
1125 u16 cell_id; /**< Cell id */ 1125 u16 cell_id; /*< Cell id */
1126 }; 1126 };
1127 1127
1128/*========================================*/ 1128/*========================================*/
1129 1129
1130/** 1130/*
1131* \brief Power mode of device. 1131* \brief Power mode of device.
1132* 1132*
1133* Used by DRX_CTRL_SET_POWER_MODE. 1133* Used by DRX_CTRL_SET_POWER_MODE.
1134*/ 1134*/
1135 enum drx_power_mode { 1135 enum drx_power_mode {
1136 DRX_POWER_UP = 0, 1136 DRX_POWER_UP = 0,
1137 /**< Generic , Power Up Mode */ 1137 /*< Generic , Power Up Mode */
1138 DRX_POWER_MODE_1, 1138 DRX_POWER_MODE_1,
1139 /**< Device specific , Power Up Mode */ 1139 /*< Device specific , Power Up Mode */
1140 DRX_POWER_MODE_2, 1140 DRX_POWER_MODE_2,
1141 /**< Device specific , Power Up Mode */ 1141 /*< Device specific , Power Up Mode */
1142 DRX_POWER_MODE_3, 1142 DRX_POWER_MODE_3,
1143 /**< Device specific , Power Up Mode */ 1143 /*< Device specific , Power Up Mode */
1144 DRX_POWER_MODE_4, 1144 DRX_POWER_MODE_4,
1145 /**< Device specific , Power Up Mode */ 1145 /*< Device specific , Power Up Mode */
1146 DRX_POWER_MODE_5, 1146 DRX_POWER_MODE_5,
1147 /**< Device specific , Power Up Mode */ 1147 /*< Device specific , Power Up Mode */
1148 DRX_POWER_MODE_6, 1148 DRX_POWER_MODE_6,
1149 /**< Device specific , Power Up Mode */ 1149 /*< Device specific , Power Up Mode */
1150 DRX_POWER_MODE_7, 1150 DRX_POWER_MODE_7,
1151 /**< Device specific , Power Up Mode */ 1151 /*< Device specific , Power Up Mode */
1152 DRX_POWER_MODE_8, 1152 DRX_POWER_MODE_8,
1153 /**< Device specific , Power Up Mode */ 1153 /*< Device specific , Power Up Mode */
1154 1154
1155 DRX_POWER_MODE_9, 1155 DRX_POWER_MODE_9,
1156 /**< Device specific , Power Down Mode */ 1156 /*< Device specific , Power Down Mode */
1157 DRX_POWER_MODE_10, 1157 DRX_POWER_MODE_10,
1158 /**< Device specific , Power Down Mode */ 1158 /*< Device specific , Power Down Mode */
1159 DRX_POWER_MODE_11, 1159 DRX_POWER_MODE_11,
1160 /**< Device specific , Power Down Mode */ 1160 /*< Device specific , Power Down Mode */
1161 DRX_POWER_MODE_12, 1161 DRX_POWER_MODE_12,
1162 /**< Device specific , Power Down Mode */ 1162 /*< Device specific , Power Down Mode */
1163 DRX_POWER_MODE_13, 1163 DRX_POWER_MODE_13,
1164 /**< Device specific , Power Down Mode */ 1164 /*< Device specific , Power Down Mode */
1165 DRX_POWER_MODE_14, 1165 DRX_POWER_MODE_14,
1166 /**< Device specific , Power Down Mode */ 1166 /*< Device specific , Power Down Mode */
1167 DRX_POWER_MODE_15, 1167 DRX_POWER_MODE_15,
1168 /**< Device specific , Power Down Mode */ 1168 /*< Device specific , Power Down Mode */
1169 DRX_POWER_MODE_16, 1169 DRX_POWER_MODE_16,
1170 /**< Device specific , Power Down Mode */ 1170 /*< Device specific , Power Down Mode */
1171 DRX_POWER_DOWN = 255 1171 DRX_POWER_DOWN = 255
1172 /**< Generic , Power Down Mode */ 1172 /*< Generic , Power Down Mode */
1173 }; 1173 };
1174 1174
1175/*========================================*/ 1175/*========================================*/
1176 1176
1177/** 1177/*
1178* \enum enum drx_module * \brief Software module identification. 1178* \enum enum drx_module * \brief Software module identification.
1179* 1179*
1180* Used by DRX_CTRL_VERSION. 1180* Used by DRX_CTRL_VERSION.
@@ -1191,93 +1191,93 @@ typedef int(*drx_scan_func_t) (void *scan_context,
1191 DRX_MODULE_UNKNOWN 1191 DRX_MODULE_UNKNOWN
1192 }; 1192 };
1193 1193
1194/** 1194/*
1195* \enum struct drx_version * \brief Version information of one software module. 1195* \enum struct drx_version * \brief Version information of one software module.
1196* 1196*
1197* Used by DRX_CTRL_VERSION. 1197* Used by DRX_CTRL_VERSION.
1198*/ 1198*/
1199 struct drx_version { 1199 struct drx_version {
1200 enum drx_module module_type; 1200 enum drx_module module_type;
1201 /**< Type identifier of the module */ 1201 /*< Type identifier of the module */
1202 char *module_name; 1202 char *module_name;
1203 /**< Name or description of module */ 1203 /*< Name or description of module */
1204 u16 v_major; /**< Major version number */ 1204 u16 v_major; /*< Major version number */
1205 u16 v_minor; /**< Minor version number */ 1205 u16 v_minor; /*< Minor version number */
1206 u16 v_patch; /**< Patch version number */ 1206 u16 v_patch; /*< Patch version number */
1207 char *v_string; /**< Version as text string */ 1207 char *v_string; /*< Version as text string */
1208 }; 1208 };
1209 1209
1210/** 1210/*
1211* \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information. 1211* \enum struct drx_version_list * \brief List element of NULL terminated, linked list for version information.
1212* 1212*
1213* Used by DRX_CTRL_VERSION. 1213* Used by DRX_CTRL_VERSION.
1214*/ 1214*/
1215struct drx_version_list { 1215struct drx_version_list {
1216 struct drx_version *version;/**< Version information */ 1216 struct drx_version *version;/*< Version information */
1217 struct drx_version_list *next; 1217 struct drx_version_list *next;
1218 /**< Next list element */ 1218 /*< Next list element */
1219}; 1219};
1220 1220
1221/*========================================*/ 1221/*========================================*/
1222 1222
1223/** 1223/*
1224* \brief Parameters needed to confiugure a UIO. 1224* \brief Parameters needed to confiugure a UIO.
1225* 1225*
1226* Used by DRX_CTRL_UIO_CFG. 1226* Used by DRX_CTRL_UIO_CFG.
1227*/ 1227*/
1228 struct drxuio_cfg { 1228 struct drxuio_cfg {
1229 enum drx_uio uio; 1229 enum drx_uio uio;
1230 /**< UIO identifier */ 1230 /*< UIO identifier */
1231 enum drxuio_mode mode; 1231 enum drxuio_mode mode;
1232 /**< UIO operational mode */ 1232 /*< UIO operational mode */
1233 }; 1233 };
1234 1234
1235/*========================================*/ 1235/*========================================*/
1236 1236
1237/** 1237/*
1238* \brief Parameters needed to read from or write to a UIO. 1238* \brief Parameters needed to read from or write to a UIO.
1239* 1239*
1240* Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE. 1240* Used by DRX_CTRL_UIO_READ and DRX_CTRL_UIO_WRITE.
1241*/ 1241*/
1242 struct drxuio_data { 1242 struct drxuio_data {
1243 enum drx_uio uio; 1243 enum drx_uio uio;
1244 /**< UIO identifier */ 1244 /*< UIO identifier */
1245 bool value; 1245 bool value;
1246 /**< UIO value (true=1, false=0) */ 1246 /*< UIO value (true=1, false=0) */
1247 }; 1247 };
1248 1248
1249/*========================================*/ 1249/*========================================*/
1250 1250
1251/** 1251/*
1252* \brief Parameters needed to configure OOB. 1252* \brief Parameters needed to configure OOB.
1253* 1253*
1254* Used by DRX_CTRL_SET_OOB. 1254* Used by DRX_CTRL_SET_OOB.
1255*/ 1255*/
1256 struct drxoob { 1256 struct drxoob {
1257 s32 frequency; /**< Frequency in kHz */ 1257 s32 frequency; /*< Frequency in kHz */
1258 enum drxoob_downstream_standard standard; 1258 enum drxoob_downstream_standard standard;
1259 /**< OOB standard */ 1259 /*< OOB standard */
1260 bool spectrum_inverted; /**< If true, then spectrum 1260 bool spectrum_inverted; /*< If true, then spectrum
1261 is inverted */ 1261 is inverted */
1262 }; 1262 };
1263 1263
1264/*========================================*/ 1264/*========================================*/
1265 1265
1266/** 1266/*
1267* \brief Metrics from OOB. 1267* \brief Metrics from OOB.
1268* 1268*
1269* Used by DRX_CTRL_GET_OOB. 1269* Used by DRX_CTRL_GET_OOB.
1270*/ 1270*/
1271 struct drxoob_status { 1271 struct drxoob_status {
1272 s32 frequency; /**< Frequency in Khz */ 1272 s32 frequency; /*< Frequency in Khz */
1273 enum drx_lock_status lock; /**< Lock status */ 1273 enum drx_lock_status lock; /*< Lock status */
1274 u32 mer; /**< MER */ 1274 u32 mer; /*< MER */
1275 s32 symbol_rate_offset; /**< Symbolrate offset in ppm */ 1275 s32 symbol_rate_offset; /*< Symbolrate offset in ppm */
1276 }; 1276 };
1277 1277
1278/*========================================*/ 1278/*========================================*/
1279 1279
1280/** 1280/*
1281* \brief Device dependent configuration data. 1281* \brief Device dependent configuration data.
1282* 1282*
1283* Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG. 1283* Used by DRX_CTRL_SET_CFG and DRX_CTRL_GET_CFG.
@@ -1285,14 +1285,14 @@ struct drx_version_list {
1285*/ 1285*/
1286 struct drx_cfg { 1286 struct drx_cfg {
1287 u32 cfg_type; 1287 u32 cfg_type;
1288 /**< Function identifier */ 1288 /*< Function identifier */
1289 void *cfg_data; 1289 void *cfg_data;
1290 /**< Function data */ 1290 /*< Function data */
1291 }; 1291 };
1292 1292
1293/*========================================*/ 1293/*========================================*/
1294 1294
1295/** 1295/*
1296* /struct DRXMpegStartWidth_t 1296* /struct DRXMpegStartWidth_t
1297* MStart width [nr MCLK cycles] for serial MPEG output. 1297* MStart width [nr MCLK cycles] for serial MPEG output.
1298*/ 1298*/
@@ -1303,7 +1303,7 @@ struct drx_version_list {
1303 }; 1303 };
1304 1304
1305/* CTRL CFG MPEG output */ 1305/* CTRL CFG MPEG output */
1306/** 1306/*
1307* \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control. 1307* \struct struct drx_cfg_mpeg_output * \brief Configuration parameters for MPEG output control.
1308* 1308*
1309* Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and 1309* Used by DRX_CFG_MPEG_OUTPUT, in combination with DRX_CTRL_SET_CFG and
@@ -1311,29 +1311,29 @@ struct drx_version_list {
1311*/ 1311*/
1312 1312
1313 struct drx_cfg_mpeg_output { 1313 struct drx_cfg_mpeg_output {
1314 bool enable_mpeg_output;/**< If true, enable MPEG output */ 1314 bool enable_mpeg_output;/*< If true, enable MPEG output */
1315 bool insert_rs_byte; /**< If true, insert RS byte */ 1315 bool insert_rs_byte; /*< If true, insert RS byte */
1316 bool enable_parallel; /**< If true, parallel out otherwise 1316 bool enable_parallel; /*< If true, parallel out otherwise
1317 serial */ 1317 serial */
1318 bool invert_data; /**< If true, invert DATA signals */ 1318 bool invert_data; /*< If true, invert DATA signals */
1319 bool invert_err; /**< If true, invert ERR signal */ 1319 bool invert_err; /*< If true, invert ERR signal */
1320 bool invert_str; /**< If true, invert STR signals */ 1320 bool invert_str; /*< If true, invert STR signals */
1321 bool invert_val; /**< If true, invert VAL signals */ 1321 bool invert_val; /*< If true, invert VAL signals */
1322 bool invert_clk; /**< If true, invert CLK signals */ 1322 bool invert_clk; /*< If true, invert CLK signals */
1323 bool static_clk; /**< If true, static MPEG clockrate 1323 bool static_clk; /*< If true, static MPEG clockrate
1324 will be used, otherwise clockrate 1324 will be used, otherwise clockrate
1325 will adapt to the bitrate of the 1325 will adapt to the bitrate of the
1326 TS */ 1326 TS */
1327 u32 bitrate; /**< Maximum bitrate in b/s in case 1327 u32 bitrate; /*< Maximum bitrate in b/s in case
1328 static clockrate is selected */ 1328 static clockrate is selected */
1329 enum drxmpeg_str_width width_str; 1329 enum drxmpeg_str_width width_str;
1330 /**< MPEG start width */ 1330 /*< MPEG start width */
1331 }; 1331 };
1332 1332
1333 1333
1334/*========================================*/ 1334/*========================================*/
1335 1335
1336/** 1336/*
1337* \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port. 1337* \struct struct drxi2c_data * \brief Data for I2C via 2nd or 3rd or etc I2C port.
1338* 1338*
1339* Used by DRX_CTRL_I2C_READWRITE. 1339* Used by DRX_CTRL_I2C_READWRITE.
@@ -1341,187 +1341,187 @@ struct drx_version_list {
1341* 1341*
1342*/ 1342*/
1343 struct drxi2c_data { 1343 struct drxi2c_data {
1344 u16 port_nr; /**< I2C port number */ 1344 u16 port_nr; /*< I2C port number */
1345 struct i2c_device_addr *w_dev_addr; 1345 struct i2c_device_addr *w_dev_addr;
1346 /**< Write device address */ 1346 /*< Write device address */
1347 u16 w_count; /**< Size of write data in bytes */ 1347 u16 w_count; /*< Size of write data in bytes */
1348 u8 *wData; /**< Pointer to write data */ 1348 u8 *wData; /*< Pointer to write data */
1349 struct i2c_device_addr *r_dev_addr; 1349 struct i2c_device_addr *r_dev_addr;
1350 /**< Read device address */ 1350 /*< Read device address */
1351 u16 r_count; /**< Size of data to read in bytes */ 1351 u16 r_count; /*< Size of data to read in bytes */
1352 u8 *r_data; /**< Pointer to read buffer */ 1352 u8 *r_data; /*< Pointer to read buffer */
1353 }; 1353 };
1354 1354
1355/*========================================*/ 1355/*========================================*/
1356 1356
1357/** 1357/*
1358* \enum enum drx_aud_standard * \brief Audio standard identifier. 1358* \enum enum drx_aud_standard * \brief Audio standard identifier.
1359* 1359*
1360* Used by DRX_CTRL_SET_AUD. 1360* Used by DRX_CTRL_SET_AUD.
1361*/ 1361*/
1362 enum drx_aud_standard { 1362 enum drx_aud_standard {
1363 DRX_AUD_STANDARD_BTSC, /**< set BTSC standard (USA) */ 1363 DRX_AUD_STANDARD_BTSC, /*< set BTSC standard (USA) */
1364 DRX_AUD_STANDARD_A2, /**< set A2-Korea FM Stereo */ 1364 DRX_AUD_STANDARD_A2, /*< set A2-Korea FM Stereo */
1365 DRX_AUD_STANDARD_EIAJ, /**< set to Japanese FM Stereo */ 1365 DRX_AUD_STANDARD_EIAJ, /*< set to Japanese FM Stereo */
1366 DRX_AUD_STANDARD_FM_STEREO,/**< set to FM-Stereo Radio */ 1366 DRX_AUD_STANDARD_FM_STEREO,/*< set to FM-Stereo Radio */
1367 DRX_AUD_STANDARD_M_MONO, /**< for 4.5 MHz mono detected */ 1367 DRX_AUD_STANDARD_M_MONO, /*< for 4.5 MHz mono detected */
1368 DRX_AUD_STANDARD_D_K_MONO, /**< for 6.5 MHz mono detected */ 1368 DRX_AUD_STANDARD_D_K_MONO, /*< for 6.5 MHz mono detected */
1369 DRX_AUD_STANDARD_BG_FM, /**< set BG_FM standard */ 1369 DRX_AUD_STANDARD_BG_FM, /*< set BG_FM standard */
1370 DRX_AUD_STANDARD_D_K1, /**< set D_K1 standard */ 1370 DRX_AUD_STANDARD_D_K1, /*< set D_K1 standard */
1371 DRX_AUD_STANDARD_D_K2, /**< set D_K2 standard */ 1371 DRX_AUD_STANDARD_D_K2, /*< set D_K2 standard */
1372 DRX_AUD_STANDARD_D_K3, /**< set D_K3 standard */ 1372 DRX_AUD_STANDARD_D_K3, /*< set D_K3 standard */
1373 DRX_AUD_STANDARD_BG_NICAM_FM, 1373 DRX_AUD_STANDARD_BG_NICAM_FM,
1374 /**< set BG_NICAM_FM standard */ 1374 /*< set BG_NICAM_FM standard */
1375 DRX_AUD_STANDARD_L_NICAM_AM, 1375 DRX_AUD_STANDARD_L_NICAM_AM,
1376 /**< set L_NICAM_AM standard */ 1376 /*< set L_NICAM_AM standard */
1377 DRX_AUD_STANDARD_I_NICAM_FM, 1377 DRX_AUD_STANDARD_I_NICAM_FM,
1378 /**< set I_NICAM_FM standard */ 1378 /*< set I_NICAM_FM standard */
1379 DRX_AUD_STANDARD_D_K_NICAM_FM, 1379 DRX_AUD_STANDARD_D_K_NICAM_FM,
1380 /**< set D_K_NICAM_FM standard */ 1380 /*< set D_K_NICAM_FM standard */
1381 DRX_AUD_STANDARD_NOT_READY,/**< used to detect audio standard */ 1381 DRX_AUD_STANDARD_NOT_READY,/*< used to detect audio standard */
1382 DRX_AUD_STANDARD_AUTO = DRX_AUTO, 1382 DRX_AUD_STANDARD_AUTO = DRX_AUTO,
1383 /**< Automatic Standard Detection */ 1383 /*< Automatic Standard Detection */
1384 DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN 1384 DRX_AUD_STANDARD_UNKNOWN = DRX_UNKNOWN
1385 /**< used as auto and for readback */ 1385 /*< used as auto and for readback */
1386 }; 1386 };
1387 1387
1388/* CTRL_AUD_GET_STATUS - struct drx_aud_status */ 1388/* CTRL_AUD_GET_STATUS - struct drx_aud_status */
1389/** 1389/*
1390* \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier. 1390* \enum enum drx_aud_nicam_status * \brief Status of NICAM carrier.
1391*/ 1391*/
1392 enum drx_aud_nicam_status { 1392 enum drx_aud_nicam_status {
1393 DRX_AUD_NICAM_DETECTED = 0, 1393 DRX_AUD_NICAM_DETECTED = 0,
1394 /**< NICAM carrier detected */ 1394 /*< NICAM carrier detected */
1395 DRX_AUD_NICAM_NOT_DETECTED, 1395 DRX_AUD_NICAM_NOT_DETECTED,
1396 /**< NICAM carrier not detected */ 1396 /*< NICAM carrier not detected */
1397 DRX_AUD_NICAM_BAD /**< NICAM carrier bad quality */ 1397 DRX_AUD_NICAM_BAD /*< NICAM carrier bad quality */
1398 }; 1398 };
1399 1399
1400/** 1400/*
1401* \struct struct drx_aud_status * \brief Audio status characteristics. 1401* \struct struct drx_aud_status * \brief Audio status characteristics.
1402*/ 1402*/
1403 struct drx_aud_status { 1403 struct drx_aud_status {
1404 bool stereo; /**< stereo detection */ 1404 bool stereo; /*< stereo detection */
1405 bool carrier_a; /**< carrier A detected */ 1405 bool carrier_a; /*< carrier A detected */
1406 bool carrier_b; /**< carrier B detected */ 1406 bool carrier_b; /*< carrier B detected */
1407 bool sap; /**< sap / bilingual detection */ 1407 bool sap; /*< sap / bilingual detection */
1408 bool rds; /**< RDS data array present */ 1408 bool rds; /*< RDS data array present */
1409 enum drx_aud_nicam_status nicam_status; 1409 enum drx_aud_nicam_status nicam_status;
1410 /**< status of NICAM carrier */ 1410 /*< status of NICAM carrier */
1411 s8 fm_ident; /**< FM Identification value */ 1411 s8 fm_ident; /*< FM Identification value */
1412 }; 1412 };
1413 1413
1414/* CTRL_AUD_READ_RDS - DRXRDSdata_t */ 1414/* CTRL_AUD_READ_RDS - DRXRDSdata_t */
1415 1415
1416/** 1416/*
1417* \struct DRXRDSdata_t 1417* \struct DRXRDSdata_t
1418* \brief Raw RDS data array. 1418* \brief Raw RDS data array.
1419*/ 1419*/
1420 struct drx_cfg_aud_rds { 1420 struct drx_cfg_aud_rds {
1421 bool valid; /**< RDS data validation */ 1421 bool valid; /*< RDS data validation */
1422 u16 data[18]; /**< data from one RDS data array */ 1422 u16 data[18]; /*< data from one RDS data array */
1423 }; 1423 };
1424 1424
1425/* DRX_CFG_AUD_VOLUME - struct drx_cfg_aud_volume - set/get */ 1425/* DRX_CFG_AUD_VOLUME - struct drx_cfg_aud_volume - set/get */
1426/** 1426/*
1427* \enum DRXAudAVCDecayTime_t 1427* \enum DRXAudAVCDecayTime_t
1428* \brief Automatic volume control configuration. 1428* \brief Automatic volume control configuration.
1429*/ 1429*/
1430 enum drx_aud_avc_mode { 1430 enum drx_aud_avc_mode {
1431 DRX_AUD_AVC_OFF, /**< Automatic volume control off */ 1431 DRX_AUD_AVC_OFF, /*< Automatic volume control off */
1432 DRX_AUD_AVC_DECAYTIME_8S, /**< level volume in 8 seconds */ 1432 DRX_AUD_AVC_DECAYTIME_8S, /*< level volume in 8 seconds */
1433 DRX_AUD_AVC_DECAYTIME_4S, /**< level volume in 4 seconds */ 1433 DRX_AUD_AVC_DECAYTIME_4S, /*< level volume in 4 seconds */
1434 DRX_AUD_AVC_DECAYTIME_2S, /**< level volume in 2 seconds */ 1434 DRX_AUD_AVC_DECAYTIME_2S, /*< level volume in 2 seconds */
1435 DRX_AUD_AVC_DECAYTIME_20MS/**< level volume in 20 millisec */ 1435 DRX_AUD_AVC_DECAYTIME_20MS/*< level volume in 20 millisec */
1436 }; 1436 };
1437 1437
1438/** 1438/*
1439* /enum DRXAudMaxAVCGain_t 1439* /enum DRXAudMaxAVCGain_t
1440* /brief Automatic volume control max gain in audio baseband. 1440* /brief Automatic volume control max gain in audio baseband.
1441*/ 1441*/
1442 enum drx_aud_avc_max_gain { 1442 enum drx_aud_avc_max_gain {
1443 DRX_AUD_AVC_MAX_GAIN_0DB, /**< maximum AVC gain 0 dB */ 1443 DRX_AUD_AVC_MAX_GAIN_0DB, /*< maximum AVC gain 0 dB */
1444 DRX_AUD_AVC_MAX_GAIN_6DB, /**< maximum AVC gain 6 dB */ 1444 DRX_AUD_AVC_MAX_GAIN_6DB, /*< maximum AVC gain 6 dB */
1445 DRX_AUD_AVC_MAX_GAIN_12DB /**< maximum AVC gain 12 dB */ 1445 DRX_AUD_AVC_MAX_GAIN_12DB /*< maximum AVC gain 12 dB */
1446 }; 1446 };
1447 1447
1448/** 1448/*
1449* /enum DRXAudMaxAVCAtten_t 1449* /enum DRXAudMaxAVCAtten_t
1450* /brief Automatic volume control max attenuation in audio baseband. 1450* /brief Automatic volume control max attenuation in audio baseband.
1451*/ 1451*/
1452 enum drx_aud_avc_max_atten { 1452 enum drx_aud_avc_max_atten {
1453 DRX_AUD_AVC_MAX_ATTEN_12DB, 1453 DRX_AUD_AVC_MAX_ATTEN_12DB,
1454 /**< maximum AVC attenuation 12 dB */ 1454 /*< maximum AVC attenuation 12 dB */
1455 DRX_AUD_AVC_MAX_ATTEN_18DB, 1455 DRX_AUD_AVC_MAX_ATTEN_18DB,
1456 /**< maximum AVC attenuation 18 dB */ 1456 /*< maximum AVC attenuation 18 dB */
1457 DRX_AUD_AVC_MAX_ATTEN_24DB/**< maximum AVC attenuation 24 dB */ 1457 DRX_AUD_AVC_MAX_ATTEN_24DB/*< maximum AVC attenuation 24 dB */
1458 }; 1458 };
1459/** 1459/*
1460* \struct struct drx_cfg_aud_volume * \brief Audio volume configuration. 1460* \struct struct drx_cfg_aud_volume * \brief Audio volume configuration.
1461*/ 1461*/
1462 struct drx_cfg_aud_volume { 1462 struct drx_cfg_aud_volume {
1463 bool mute; /**< mute overrides volume setting */ 1463 bool mute; /*< mute overrides volume setting */
1464 s16 volume; /**< volume, range -114 to 12 dB */ 1464 s16 volume; /*< volume, range -114 to 12 dB */
1465 enum drx_aud_avc_mode avc_mode; /**< AVC auto volume control mode */ 1465 enum drx_aud_avc_mode avc_mode; /*< AVC auto volume control mode */
1466 u16 avc_ref_level; /**< AVC reference level */ 1466 u16 avc_ref_level; /*< AVC reference level */
1467 enum drx_aud_avc_max_gain avc_max_gain; 1467 enum drx_aud_avc_max_gain avc_max_gain;
1468 /**< AVC max gain selection */ 1468 /*< AVC max gain selection */
1469 enum drx_aud_avc_max_atten avc_max_atten; 1469 enum drx_aud_avc_max_atten avc_max_atten;
1470 /**< AVC max attenuation selection */ 1470 /*< AVC max attenuation selection */
1471 s16 strength_left; /**< quasi-peak, left speaker */ 1471 s16 strength_left; /*< quasi-peak, left speaker */
1472 s16 strength_right; /**< quasi-peak, right speaker */ 1472 s16 strength_right; /*< quasi-peak, right speaker */
1473 }; 1473 };
1474 1474
1475/* DRX_CFG_I2S_OUTPUT - struct drx_cfg_i2s_output - set/get */ 1475/* DRX_CFG_I2S_OUTPUT - struct drx_cfg_i2s_output - set/get */
1476/** 1476/*
1477* \enum enum drxi2s_mode * \brief I2S output mode. 1477* \enum enum drxi2s_mode * \brief I2S output mode.
1478*/ 1478*/
1479 enum drxi2s_mode { 1479 enum drxi2s_mode {
1480 DRX_I2S_MODE_MASTER, /**< I2S is in master mode */ 1480 DRX_I2S_MODE_MASTER, /*< I2S is in master mode */
1481 DRX_I2S_MODE_SLAVE /**< I2S is in slave mode */ 1481 DRX_I2S_MODE_SLAVE /*< I2S is in slave mode */
1482 }; 1482 };
1483 1483
1484/** 1484/*
1485* \enum enum drxi2s_word_length * \brief Width of I2S data. 1485* \enum enum drxi2s_word_length * \brief Width of I2S data.
1486*/ 1486*/
1487 enum drxi2s_word_length { 1487 enum drxi2s_word_length {
1488 DRX_I2S_WORDLENGTH_32 = 0,/**< I2S data is 32 bit wide */ 1488 DRX_I2S_WORDLENGTH_32 = 0,/*< I2S data is 32 bit wide */
1489 DRX_I2S_WORDLENGTH_16 = 1 /**< I2S data is 16 bit wide */ 1489 DRX_I2S_WORDLENGTH_16 = 1 /*< I2S data is 16 bit wide */
1490 }; 1490 };
1491 1491
1492/** 1492/*
1493* \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S. 1493* \enum enum drxi2s_format * \brief Data wordstrobe alignment for I2S.
1494*/ 1494*/
1495 enum drxi2s_format { 1495 enum drxi2s_format {
1496 DRX_I2S_FORMAT_WS_WITH_DATA, 1496 DRX_I2S_FORMAT_WS_WITH_DATA,
1497 /**< I2S data and wordstrobe are aligned */ 1497 /*< I2S data and wordstrobe are aligned */
1498 DRX_I2S_FORMAT_WS_ADVANCED 1498 DRX_I2S_FORMAT_WS_ADVANCED
1499 /**< I2S data one cycle after wordstrobe */ 1499 /*< I2S data one cycle after wordstrobe */
1500 }; 1500 };
1501 1501
1502/** 1502/*
1503* \enum enum drxi2s_polarity * \brief Polarity of I2S data. 1503* \enum enum drxi2s_polarity * \brief Polarity of I2S data.
1504*/ 1504*/
1505 enum drxi2s_polarity { 1505 enum drxi2s_polarity {
1506 DRX_I2S_POLARITY_RIGHT,/**< wordstrobe - right high, left low */ 1506 DRX_I2S_POLARITY_RIGHT,/*< wordstrobe - right high, left low */
1507 DRX_I2S_POLARITY_LEFT /**< wordstrobe - right low, left high */ 1507 DRX_I2S_POLARITY_LEFT /*< wordstrobe - right low, left high */
1508 }; 1508 };
1509 1509
1510/** 1510/*
1511* \struct struct drx_cfg_i2s_output * \brief I2S output configuration. 1511* \struct struct drx_cfg_i2s_output * \brief I2S output configuration.
1512*/ 1512*/
1513 struct drx_cfg_i2s_output { 1513 struct drx_cfg_i2s_output {
1514 bool output_enable; /**< I2S output enable */ 1514 bool output_enable; /*< I2S output enable */
1515 u32 frequency; /**< range from 8000-48000 Hz */ 1515 u32 frequency; /*< range from 8000-48000 Hz */
1516 enum drxi2s_mode mode; /**< I2S mode, master or slave */ 1516 enum drxi2s_mode mode; /*< I2S mode, master or slave */
1517 enum drxi2s_word_length word_length; 1517 enum drxi2s_word_length word_length;
1518 /**< I2S wordlength, 16 or 32 bits */ 1518 /*< I2S wordlength, 16 or 32 bits */
1519 enum drxi2s_polarity polarity;/**< I2S wordstrobe polarity */ 1519 enum drxi2s_polarity polarity;/*< I2S wordstrobe polarity */
1520 enum drxi2s_format format; /**< I2S wordstrobe delay to data */ 1520 enum drxi2s_format format; /*< I2S wordstrobe delay to data */
1521 }; 1521 };
1522 1522
1523/* ------------------------------expert interface-----------------------------*/ 1523/* ------------------------------expert interface-----------------------------*/
1524/** 1524/*
1525* /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator. 1525* /enum enum drx_aud_fm_deemphasis * setting for FM-Deemphasis in audio demodulator.
1526* 1526*
1527*/ 1527*/
@@ -1531,7 +1531,7 @@ struct drx_version_list {
1531 DRX_AUD_FM_DEEMPH_OFF 1531 DRX_AUD_FM_DEEMPH_OFF
1532 }; 1532 };
1533 1533
1534/** 1534/*
1535* /enum DRXAudDeviation_t 1535* /enum DRXAudDeviation_t
1536* setting for deviation mode in audio demodulator. 1536* setting for deviation mode in audio demodulator.
1537* 1537*
@@ -1541,7 +1541,7 @@ struct drx_version_list {
1541 DRX_AUD_DEVIATION_HIGH 1541 DRX_AUD_DEVIATION_HIGH
1542 }; 1542 };
1543 1543
1544/** 1544/*
1545* /enum enum drx_no_carrier_option * setting for carrier, mute/noise. 1545* /enum enum drx_no_carrier_option * setting for carrier, mute/noise.
1546* 1546*
1547*/ 1547*/
@@ -1550,7 +1550,7 @@ struct drx_version_list {
1550 DRX_NO_CARRIER_NOISE 1550 DRX_NO_CARRIER_NOISE
1551 }; 1551 };
1552 1552
1553/** 1553/*
1554* \enum DRXAudAutoSound_t 1554* \enum DRXAudAutoSound_t
1555* \brief Automatic Sound 1555* \brief Automatic Sound
1556*/ 1556*/
@@ -1560,7 +1560,7 @@ struct drx_version_list {
1560 DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF 1560 DRX_AUD_AUTO_SOUND_SELECT_ON_CHANGE_OFF
1561 }; 1561 };
1562 1562
1563/** 1563/*
1564* \enum DRXAudASSThres_t 1564* \enum DRXAudASSThres_t
1565* \brief Automatic Sound Select Thresholds 1565* \brief Automatic Sound Select Thresholds
1566*/ 1566*/
@@ -1570,7 +1570,7 @@ struct drx_version_list {
1570 u16 nicam; /* Nicam Threshold for ASS configuration */ 1570 u16 nicam; /* Nicam Threshold for ASS configuration */
1571 }; 1571 };
1572 1572
1573/** 1573/*
1574* \struct struct drx_aud_carrier * \brief Carrier detection related parameters 1574* \struct struct drx_aud_carrier * \brief Carrier detection related parameters
1575*/ 1575*/
1576 struct drx_aud_carrier { 1576 struct drx_aud_carrier {
@@ -1580,7 +1580,7 @@ struct drx_version_list {
1580 s32 dco; /* frequency adjustment (A) */ 1580 s32 dco; /* frequency adjustment (A) */
1581 }; 1581 };
1582 1582
1583/** 1583/*
1584* \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct 1584* \struct struct drx_cfg_aud_carriers * \brief combining carrier A & B to one struct
1585*/ 1585*/
1586 struct drx_cfg_aud_carriers { 1586 struct drx_cfg_aud_carriers {
@@ -1588,7 +1588,7 @@ struct drx_version_list {
1588 struct drx_aud_carrier b; 1588 struct drx_aud_carrier b;
1589 }; 1589 };
1590 1590
1591/** 1591/*
1592* /enum enum drx_aud_i2s_src * Selection of audio source 1592* /enum enum drx_aud_i2s_src * Selection of audio source
1593*/ 1593*/
1594 enum drx_aud_i2s_src { 1594 enum drx_aud_i2s_src {
@@ -1597,19 +1597,19 @@ struct drx_version_list {
1597 DRX_AUD_SRC_STEREO_OR_A, 1597 DRX_AUD_SRC_STEREO_OR_A,
1598 DRX_AUD_SRC_STEREO_OR_B}; 1598 DRX_AUD_SRC_STEREO_OR_B};
1599 1599
1600/** 1600/*
1601* \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output. 1601* \enum enum drx_aud_i2s_matrix * \brief Used for selecting I2S output.
1602*/ 1602*/
1603 enum drx_aud_i2s_matrix { 1603 enum drx_aud_i2s_matrix {
1604 DRX_AUD_I2S_MATRIX_A_MONO, 1604 DRX_AUD_I2S_MATRIX_A_MONO,
1605 /**< A sound only, stereo or mono */ 1605 /*< A sound only, stereo or mono */
1606 DRX_AUD_I2S_MATRIX_B_MONO, 1606 DRX_AUD_I2S_MATRIX_B_MONO,
1607 /**< B sound only, stereo or mono */ 1607 /*< B sound only, stereo or mono */
1608 DRX_AUD_I2S_MATRIX_STEREO, 1608 DRX_AUD_I2S_MATRIX_STEREO,
1609 /**< A+B sound, transparant */ 1609 /*< A+B sound, transparant */
1610 DRX_AUD_I2S_MATRIX_MONO /**< A+B mixed to mono sum, (L+R)/2 */}; 1610 DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2 */};
1611 1611
1612/** 1612/*
1613* /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator. 1613* /enum enum drx_aud_fm_matrix * setting for FM-Matrix in audio demodulator.
1614* 1614*
1615*/ 1615*/
@@ -1620,7 +1620,7 @@ struct drx_version_list {
1620 DRX_AUD_FM_MATRIX_SOUND_A, 1620 DRX_AUD_FM_MATRIX_SOUND_A,
1621 DRX_AUD_FM_MATRIX_SOUND_B}; 1621 DRX_AUD_FM_MATRIX_SOUND_B};
1622 1622
1623/** 1623/*
1624* \struct DRXAudMatrices_t 1624* \struct DRXAudMatrices_t
1625* \brief Mixer settings 1625* \brief Mixer settings
1626*/ 1626*/
@@ -1630,22 +1630,22 @@ struct drx_cfg_aud_mixer {
1630 enum drx_aud_fm_matrix matrix_fm; 1630 enum drx_aud_fm_matrix matrix_fm;
1631}; 1631};
1632 1632
1633/** 1633/*
1634* \enum DRXI2SVidSync_t 1634* \enum DRXI2SVidSync_t
1635* \brief Audio/video synchronization, interacts with I2S mode. 1635* \brief Audio/video synchronization, interacts with I2S mode.
1636* AUTO_1 and AUTO_2 are for automatic video standard detection with preference 1636* AUTO_1 and AUTO_2 are for automatic video standard detection with preference
1637* for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz) 1637* for NTSC or Monochrome, because the frequencies are too close (59.94 & 60 Hz)
1638*/ 1638*/
1639 enum drx_cfg_aud_av_sync { 1639 enum drx_cfg_aud_av_sync {
1640 DRX_AUD_AVSYNC_OFF,/**< audio/video synchronization is off */ 1640 DRX_AUD_AVSYNC_OFF,/*< audio/video synchronization is off */
1641 DRX_AUD_AVSYNC_NTSC, 1641 DRX_AUD_AVSYNC_NTSC,
1642 /**< it is an NTSC system */ 1642 /*< it is an NTSC system */
1643 DRX_AUD_AVSYNC_MONOCHROME, 1643 DRX_AUD_AVSYNC_MONOCHROME,
1644 /**< it is a MONOCHROME system */ 1644 /*< it is a MONOCHROME system */
1645 DRX_AUD_AVSYNC_PAL_SECAM 1645 DRX_AUD_AVSYNC_PAL_SECAM
1646 /**< it is a PAL/SECAM system */}; 1646 /*< it is a PAL/SECAM system */};
1647 1647
1648/** 1648/*
1649* \struct struct drx_cfg_aud_prescale * \brief Prescalers 1649* \struct struct drx_cfg_aud_prescale * \brief Prescalers
1650*/ 1650*/
1651struct drx_cfg_aud_prescale { 1651struct drx_cfg_aud_prescale {
@@ -1653,7 +1653,7 @@ struct drx_cfg_aud_prescale {
1653 s16 nicam_gain; 1653 s16 nicam_gain;
1654}; 1654};
1655 1655
1656/** 1656/*
1657* \struct struct drx_aud_beep * \brief Beep 1657* \struct struct drx_aud_beep * \brief Beep
1658*/ 1658*/
1659struct drx_aud_beep { 1659struct drx_aud_beep {
@@ -1662,14 +1662,14 @@ struct drx_aud_beep {
1662 bool mute; 1662 bool mute;
1663}; 1663};
1664 1664
1665/** 1665/*
1666* \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode 1666* \enum enum drx_aud_btsc_detect * \brief BTSC detetcion mode
1667*/ 1667*/
1668 enum drx_aud_btsc_detect { 1668 enum drx_aud_btsc_detect {
1669 DRX_BTSC_STEREO, 1669 DRX_BTSC_STEREO,
1670 DRX_BTSC_MONO_AND_SAP}; 1670 DRX_BTSC_MONO_AND_SAP};
1671 1671
1672/** 1672/*
1673* \struct struct drx_aud_data * \brief Audio data structure 1673* \struct struct drx_aud_data * \brief Audio data structure
1674*/ 1674*/
1675struct drx_aud_data { 1675struct drx_aud_data {
@@ -1692,7 +1692,7 @@ struct drx_aud_data {
1692 bool rds_data_present; 1692 bool rds_data_present;
1693}; 1693};
1694 1694
1695/** 1695/*
1696* \enum enum drx_qam_lock_range * \brief QAM lock range mode 1696* \enum enum drx_qam_lock_range * \brief QAM lock range mode
1697*/ 1697*/
1698 enum drx_qam_lock_range { 1698 enum drx_qam_lock_range {
@@ -1782,7 +1782,7 @@ struct drx_aud_data {
1782 u32 wdata, /* data to write */ 1782 u32 wdata, /* data to write */
1783 u32 *rdata); /* data to read */ 1783 u32 *rdata); /* data to read */
1784 1784
1785/** 1785/*
1786* \struct struct drx_access_func * \brief Interface to an access protocol. 1786* \struct struct drx_access_func * \brief Interface to an access protocol.
1787*/ 1787*/
1788struct drx_access_func { 1788struct drx_access_func {
@@ -1811,85 +1811,85 @@ struct drx_reg_dump {
1811/*============================================================================*/ 1811/*============================================================================*/
1812/*============================================================================*/ 1812/*============================================================================*/
1813 1813
1814/** 1814/*
1815* \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices. 1815* \struct struct drx_common_attr * \brief Set of common attributes, shared by all DRX devices.
1816*/ 1816*/
1817 struct drx_common_attr { 1817 struct drx_common_attr {
1818 /* Microcode (firmware) attributes */ 1818 /* Microcode (firmware) attributes */
1819 char *microcode_file; /**< microcode filename */ 1819 char *microcode_file; /*< microcode filename */
1820 bool verify_microcode; 1820 bool verify_microcode;
1821 /**< Use microcode verify or not. */ 1821 /*< Use microcode verify or not. */
1822 struct drx_mc_version_rec mcversion; 1822 struct drx_mc_version_rec mcversion;
1823 /**< Version record of microcode from file */ 1823 /*< Version record of microcode from file */
1824 1824
1825 /* Clocks and tuner attributes */ 1825 /* Clocks and tuner attributes */
1826 s32 intermediate_freq; 1826 s32 intermediate_freq;
1827 /**< IF,if tuner instance not used. (kHz)*/ 1827 /*< IF,if tuner instance not used. (kHz)*/
1828 s32 sys_clock_freq; 1828 s32 sys_clock_freq;
1829 /**< Systemclock frequency. (kHz) */ 1829 /*< Systemclock frequency. (kHz) */
1830 s32 osc_clock_freq; 1830 s32 osc_clock_freq;
1831 /**< Oscillator clock frequency. (kHz) */ 1831 /*< Oscillator clock frequency. (kHz) */
1832 s16 osc_clock_deviation; 1832 s16 osc_clock_deviation;
1833 /**< Oscillator clock deviation. (ppm) */ 1833 /*< Oscillator clock deviation. (ppm) */
1834 bool mirror_freq_spect; 1834 bool mirror_freq_spect;
1835 /**< Mirror IF frequency spectrum or not.*/ 1835 /*< Mirror IF frequency spectrum or not.*/
1836 1836
1837 /* Initial MPEG output attributes */ 1837 /* Initial MPEG output attributes */
1838 struct drx_cfg_mpeg_output mpeg_cfg; 1838 struct drx_cfg_mpeg_output mpeg_cfg;
1839 /**< MPEG configuration */ 1839 /*< MPEG configuration */
1840 1840
1841 bool is_opened; /**< if true instance is already opened. */ 1841 bool is_opened; /*< if true instance is already opened. */
1842 1842
1843 /* Channel scan */ 1843 /* Channel scan */
1844 struct drx_scan_param *scan_param; 1844 struct drx_scan_param *scan_param;
1845 /**< scan parameters */ 1845 /*< scan parameters */
1846 u16 scan_freq_plan_index; 1846 u16 scan_freq_plan_index;
1847 /**< next index in freq plan */ 1847 /*< next index in freq plan */
1848 s32 scan_next_frequency; 1848 s32 scan_next_frequency;
1849 /**< next freq to scan */ 1849 /*< next freq to scan */
1850 bool scan_ready; /**< scan ready flag */ 1850 bool scan_ready; /*< scan ready flag */
1851 u32 scan_max_channels;/**< number of channels in freqplan */ 1851 u32 scan_max_channels;/*< number of channels in freqplan */
1852 u32 scan_channels_scanned; 1852 u32 scan_channels_scanned;
1853 /**< number of channels scanned */ 1853 /*< number of channels scanned */
1854 /* Channel scan - inner loop: demod related */ 1854 /* Channel scan - inner loop: demod related */
1855 drx_scan_func_t scan_function; 1855 drx_scan_func_t scan_function;
1856 /**< function to check channel */ 1856 /*< function to check channel */
1857 /* Channel scan - inner loop: SYSObj related */ 1857 /* Channel scan - inner loop: SYSObj related */
1858 void *scan_context; /**< Context Pointer of SYSObj */ 1858 void *scan_context; /*< Context Pointer of SYSObj */
1859 /* Channel scan - parameters for default DTV scan function in core driver */ 1859 /* Channel scan - parameters for default DTV scan function in core driver */
1860 u16 scan_demod_lock_timeout; 1860 u16 scan_demod_lock_timeout;
1861 /**< millisecs to wait for lock */ 1861 /*< millisecs to wait for lock */
1862 enum drx_lock_status scan_desired_lock; 1862 enum drx_lock_status scan_desired_lock;
1863 /**< lock requirement for channel found */ 1863 /*< lock requirement for channel found */
1864 /* scan_active can be used by SetChannel to decide how to program the tuner, 1864 /* scan_active can be used by SetChannel to decide how to program the tuner,
1865 fast or slow (but stable). Usually fast during scan. */ 1865 fast or slow (but stable). Usually fast during scan. */
1866 bool scan_active; /**< true when scan routines are active */ 1866 bool scan_active; /*< true when scan routines are active */
1867 1867
1868 /* Power management */ 1868 /* Power management */
1869 enum drx_power_mode current_power_mode; 1869 enum drx_power_mode current_power_mode;
1870 /**< current power management mode */ 1870 /*< current power management mode */
1871 1871
1872 /* Tuner */ 1872 /* Tuner */
1873 u8 tuner_port_nr; /**< nr of I2C port to wich tuner is */ 1873 u8 tuner_port_nr; /*< nr of I2C port to wich tuner is */
1874 s32 tuner_min_freq_rf; 1874 s32 tuner_min_freq_rf;
1875 /**< minimum RF input frequency, in kHz */ 1875 /*< minimum RF input frequency, in kHz */
1876 s32 tuner_max_freq_rf; 1876 s32 tuner_max_freq_rf;
1877 /**< maximum RF input frequency, in kHz */ 1877 /*< maximum RF input frequency, in kHz */
1878 bool tuner_rf_agc_pol; /**< if true invert RF AGC polarity */ 1878 bool tuner_rf_agc_pol; /*< if true invert RF AGC polarity */
1879 bool tuner_if_agc_pol; /**< if true invert IF AGC polarity */ 1879 bool tuner_if_agc_pol; /*< if true invert IF AGC polarity */
1880 bool tuner_slow_mode; /**< if true invert IF AGC polarity */ 1880 bool tuner_slow_mode; /*< if true invert IF AGC polarity */
1881 1881
1882 struct drx_channel current_channel; 1882 struct drx_channel current_channel;
1883 /**< current channel parameters */ 1883 /*< current channel parameters */
1884 enum drx_standard current_standard; 1884 enum drx_standard current_standard;
1885 /**< current standard selection */ 1885 /*< current standard selection */
1886 enum drx_standard prev_standard; 1886 enum drx_standard prev_standard;
1887 /**< previous standard selection */ 1887 /*< previous standard selection */
1888 enum drx_standard di_cache_standard; 1888 enum drx_standard di_cache_standard;
1889 /**< standard in DI cache if available */ 1889 /*< standard in DI cache if available */
1890 bool use_bootloader; /**< use bootloader in open */ 1890 bool use_bootloader; /*< use bootloader in open */
1891 u32 capabilities; /**< capabilities flags */ 1891 u32 capabilities; /*< capabilities flags */
1892 u32 product_id; /**< product ID inc. metal fix number */}; 1892 u32 product_id; /*< product ID inc. metal fix number */};
1893 1893
1894/* 1894/*
1895* Generic functions for DRX devices. 1895* Generic functions for DRX devices.
@@ -1897,16 +1897,16 @@ struct drx_reg_dump {
1897 1897
1898struct drx_demod_instance; 1898struct drx_demod_instance;
1899 1899
1900/** 1900/*
1901* \struct struct drx_demod_instance * \brief Top structure of demodulator instance. 1901* \struct struct drx_demod_instance * \brief Top structure of demodulator instance.
1902*/ 1902*/
1903struct drx_demod_instance { 1903struct drx_demod_instance {
1904 /**< data access protocol functions */ 1904 /*< data access protocol functions */
1905 struct i2c_device_addr *my_i2c_dev_addr; 1905 struct i2c_device_addr *my_i2c_dev_addr;
1906 /**< i2c address and device identifier */ 1906 /*< i2c address and device identifier */
1907 struct drx_common_attr *my_common_attr; 1907 struct drx_common_attr *my_common_attr;
1908 /**< common DRX attributes */ 1908 /*< common DRX attributes */
1909 void *my_ext_attr; /**< device specific attributes */ 1909 void *my_ext_attr; /*< device specific attributes */
1910 /* generic demodulator data */ 1910 /* generic demodulator data */
1911 1911
1912 struct i2c_adapter *i2c; 1912 struct i2c_adapter *i2c;
@@ -2195,7 +2195,7 @@ Conversion from enum values to human readable form.
2195Access macros 2195Access macros
2196-------------------------------------------------------------------------*/ 2196-------------------------------------------------------------------------*/
2197 2197
2198/** 2198/*
2199* \brief Create a compilable reference to the microcode attribute 2199* \brief Create a compilable reference to the microcode attribute
2200* \param d pointer to demod instance 2200* \param d pointer to demod instance
2201* 2201*
@@ -2229,7 +2229,7 @@ Access macros
2229#define DRX_ATTR_I2CDEVID(d) ((d)->my_i2c_dev_addr->i2c_dev_id) 2229#define DRX_ATTR_I2CDEVID(d) ((d)->my_i2c_dev_addr->i2c_dev_id)
2230#define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD) 2230#define DRX_ISMCVERTYPE(x) ((x) == AUX_VER_RECORD)
2231 2231
2232/**************************/ 2232/*************************/
2233 2233
2234/* Macros with device-specific handling are converted to CFG functions */ 2234/* Macros with device-specific handling are converted to CFG functions */
2235 2235
@@ -2285,7 +2285,7 @@ Access macros
2285#define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \ 2285#define DRX_GET_QAM_LOCKRANGE(d, x) DRX_ACCESSMACRO_GET((d), (x), \
2286 DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN) 2286 DRX_XS_CFG_QAM_LOCKRANGE, enum drx_qam_lock_range, DRX_UNKNOWN)
2287 2287
2288/** 2288/*
2289* \brief Macro to check if std is an ATV standard 2289* \brief Macro to check if std is an ATV standard
2290* \retval true std is an ATV standard 2290* \retval true std is an ATV standard
2291* \retval false std is an ATV standard 2291* \retval false std is an ATV standard
@@ -2298,7 +2298,7 @@ Access macros
2298 ((std) == DRX_STANDARD_NTSC) || \ 2298 ((std) == DRX_STANDARD_NTSC) || \
2299 ((std) == DRX_STANDARD_FM)) 2299 ((std) == DRX_STANDARD_FM))
2300 2300
2301/** 2301/*
2302* \brief Macro to check if std is an QAM standard 2302* \brief Macro to check if std is an QAM standard
2303* \retval true std is an QAM standards 2303* \retval true std is an QAM standards
2304* \retval false std is an QAM standards 2304* \retval false std is an QAM standards
@@ -2308,14 +2308,14 @@ Access macros
2308 ((std) == DRX_STANDARD_ITU_C) || \ 2308 ((std) == DRX_STANDARD_ITU_C) || \
2309 ((std) == DRX_STANDARD_ITU_D)) 2309 ((std) == DRX_STANDARD_ITU_D))
2310 2310
2311/** 2311/*
2312* \brief Macro to check if std is VSB standard 2312* \brief Macro to check if std is VSB standard
2313* \retval true std is VSB standard 2313* \retval true std is VSB standard
2314* \retval false std is not VSB standard 2314* \retval false std is not VSB standard
2315*/ 2315*/
2316#define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB) 2316#define DRX_ISVSBSTD(std) ((std) == DRX_STANDARD_8VSB)
2317 2317
2318/** 2318/*
2319* \brief Macro to check if std is DVBT standard 2319* \brief Macro to check if std is DVBT standard
2320* \retval true std is DVBT standard 2320* \retval true std is DVBT standard
2321* \retval false std is not DVBT standard 2321* \retval false std is not DVBT standard
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 499ccff557bf..8cbd8cc21059 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -73,7 +73,7 @@ INCLUDE FILES
73 73
74#define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw" 74#define DRX39XX_MAIN_FIRMWARE "dvb-fe-drxj-mc-1.0.8.fw"
75 75
76/** 76/*
77* \brief Maximum u32 value. 77* \brief Maximum u32 value.
78*/ 78*/
79#ifndef MAX_U32 79#ifndef MAX_U32
@@ -100,8 +100,8 @@ INCLUDE FILES
100#ifndef OOB_DRX_DRIVE_STRENGTH 100#ifndef OOB_DRX_DRIVE_STRENGTH
101#define OOB_DRX_DRIVE_STRENGTH 0x02 101#define OOB_DRX_DRIVE_STRENGTH 0x02
102#endif 102#endif
103/**** START DJCOMBO patches to DRXJ registermap constants *********************/ 103/*** START DJCOMBO patches to DRXJ registermap constants *********************/
104/**** registermap 200706071303 from drxj **************************************/ 104/*** registermap 200706071303 from drxj **************************************/
105#define ATV_TOP_CR_AMP_TH_FM 0x0 105#define ATV_TOP_CR_AMP_TH_FM 0x0
106#define ATV_TOP_CR_AMP_TH_L 0xA 106#define ATV_TOP_CR_AMP_TH_L 0xA
107#define ATV_TOP_CR_AMP_TH_LP 0xA 107#define ATV_TOP_CR_AMP_TH_LP 0xA
@@ -188,7 +188,7 @@ INCLUDE FILES
188#define IQM_RC_ADJ_SEL_B_OFF 0x0 188#define IQM_RC_ADJ_SEL_B_OFF 0x0
189#define IQM_RC_ADJ_SEL_B_QAM 0x1 189#define IQM_RC_ADJ_SEL_B_QAM 0x1
190#define IQM_RC_ADJ_SEL_B_VSB 0x2 190#define IQM_RC_ADJ_SEL_B_VSB 0x2
191/**** END DJCOMBO patches to DRXJ registermap *********************************/ 191/*** END DJCOMBO patches to DRXJ registermap *********************************/
192 192
193#include "drx_driver_version.h" 193#include "drx_driver_version.h"
194 194
@@ -208,25 +208,25 @@ DEFINES
208#define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr) 208#define DRXJ_WAKE_UP_KEY (demod->my_i2c_dev_addr->i2c_addr)
209#endif 209#endif
210 210
211/** 211/*
212* \def DRXJ_DEF_I2C_ADDR 212* \def DRXJ_DEF_I2C_ADDR
213* \brief Default I2C address of a demodulator instance. 213* \brief Default I2C address of a demodulator instance.
214*/ 214*/
215#define DRXJ_DEF_I2C_ADDR (0x52) 215#define DRXJ_DEF_I2C_ADDR (0x52)
216 216
217/** 217/*
218* \def DRXJ_DEF_DEMOD_DEV_ID 218* \def DRXJ_DEF_DEMOD_DEV_ID
219* \brief Default device identifier of a demodultor instance. 219* \brief Default device identifier of a demodultor instance.
220*/ 220*/
221#define DRXJ_DEF_DEMOD_DEV_ID (1) 221#define DRXJ_DEF_DEMOD_DEV_ID (1)
222 222
223/** 223/*
224* \def DRXJ_SCAN_TIMEOUT 224* \def DRXJ_SCAN_TIMEOUT
225* \brief Timeout value for waiting on demod lock during channel scan (millisec). 225* \brief Timeout value for waiting on demod lock during channel scan (millisec).
226*/ 226*/
227#define DRXJ_SCAN_TIMEOUT 1000 227#define DRXJ_SCAN_TIMEOUT 1000
228 228
229/** 229/*
230* \def HI_I2C_DELAY 230* \def HI_I2C_DELAY
231* \brief HI timing delay for I2C timing (in nano seconds) 231* \brief HI timing delay for I2C timing (in nano seconds)
232* 232*
@@ -234,7 +234,7 @@ DEFINES
234*/ 234*/
235#define HI_I2C_DELAY 42 235#define HI_I2C_DELAY 42
236 236
237/** 237/*
238* \def HI_I2C_BRIDGE_DELAY 238* \def HI_I2C_BRIDGE_DELAY
239* \brief HI timing delay for I2C timing (in nano seconds) 239* \brief HI timing delay for I2C timing (in nano seconds)
240* 240*
@@ -242,13 +242,13 @@ DEFINES
242*/ 242*/
243#define HI_I2C_BRIDGE_DELAY 750 243#define HI_I2C_BRIDGE_DELAY 750
244 244
245/** 245/*
246* \brief Time Window for MER and SER Measurement in Units of Segment duration. 246* \brief Time Window for MER and SER Measurement in Units of Segment duration.
247*/ 247*/
248#define VSB_TOP_MEASUREMENT_PERIOD 64 248#define VSB_TOP_MEASUREMENT_PERIOD 64
249#define SYMBOLS_PER_SEGMENT 832 249#define SYMBOLS_PER_SEGMENT 832
250 250
251/** 251/*
252* \brief bit rate and segment rate constants used for SER and BER. 252* \brief bit rate and segment rate constants used for SER and BER.
253*/ 253*/
254/* values taken from the QAM microcode */ 254/* values taken from the QAM microcode */
@@ -260,21 +260,21 @@ DEFINES
260#define DRXJ_QAM_SL_SIG_POWER_QAM64 43008 260#define DRXJ_QAM_SL_SIG_POWER_QAM64 43008
261#define DRXJ_QAM_SL_SIG_POWER_QAM128 20992 261#define DRXJ_QAM_SL_SIG_POWER_QAM128 20992
262#define DRXJ_QAM_SL_SIG_POWER_QAM256 43520 262#define DRXJ_QAM_SL_SIG_POWER_QAM256 43520
263/** 263/*
264* \brief Min supported symbolrates. 264* \brief Min supported symbolrates.
265*/ 265*/
266#ifndef DRXJ_QAM_SYMBOLRATE_MIN 266#ifndef DRXJ_QAM_SYMBOLRATE_MIN
267#define DRXJ_QAM_SYMBOLRATE_MIN (520000) 267#define DRXJ_QAM_SYMBOLRATE_MIN (520000)
268#endif 268#endif
269 269
270/** 270/*
271* \brief Max supported symbolrates. 271* \brief Max supported symbolrates.
272*/ 272*/
273#ifndef DRXJ_QAM_SYMBOLRATE_MAX 273#ifndef DRXJ_QAM_SYMBOLRATE_MAX
274#define DRXJ_QAM_SYMBOLRATE_MAX (7233000) 274#define DRXJ_QAM_SYMBOLRATE_MAX (7233000)
275#endif 275#endif
276 276
277/** 277/*
278* \def DRXJ_QAM_MAX_WAITTIME 278* \def DRXJ_QAM_MAX_WAITTIME
279* \brief Maximal wait time for QAM auto constellation in ms 279* \brief Maximal wait time for QAM auto constellation in ms
280*/ 280*/
@@ -290,7 +290,7 @@ DEFINES
290#define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200 290#define DRXJ_QAM_DEMOD_LOCK_EXT_WAITTIME 200
291#endif 291#endif
292 292
293/** 293/*
294* \def SCU status and results 294* \def SCU status and results
295* \brief SCU 295* \brief SCU
296*/ 296*/
@@ -299,7 +299,7 @@ DEFINES
299#define FEC_RS_MEASUREMENT_PERIOD 12894 /* 1 sec */ 299#define FEC_RS_MEASUREMENT_PERIOD 12894 /* 1 sec */
300#define FEC_RS_MEASUREMENT_PRESCALE 1 /* n sec */ 300#define FEC_RS_MEASUREMENT_PRESCALE 1 /* n sec */
301 301
302/** 302/*
303* \def DRX_AUD_MAX_DEVIATION 303* \def DRX_AUD_MAX_DEVIATION
304* \brief Needed for calculation of prescale feature in AUD 304* \brief Needed for calculation of prescale feature in AUD
305*/ 305*/
@@ -307,14 +307,14 @@ DEFINES
307#define DRXJ_AUD_MAX_FM_DEVIATION 100 /* kHz */ 307#define DRXJ_AUD_MAX_FM_DEVIATION 100 /* kHz */
308#endif 308#endif
309 309
310/** 310/*
311* \brief Needed for calculation of NICAM prescale feature in AUD 311* \brief Needed for calculation of NICAM prescale feature in AUD
312*/ 312*/
313#ifndef DRXJ_AUD_MAX_NICAM_PRESCALE 313#ifndef DRXJ_AUD_MAX_NICAM_PRESCALE
314#define DRXJ_AUD_MAX_NICAM_PRESCALE (9) /* dB */ 314#define DRXJ_AUD_MAX_NICAM_PRESCALE (9) /* dB */
315#endif 315#endif
316 316
317/** 317/*
318* \brief Needed for calculation of NICAM prescale feature in AUD 318* \brief Needed for calculation of NICAM prescale feature in AUD
319*/ 319*/
320#ifndef DRXJ_AUD_MAX_WAITTIME 320#ifndef DRXJ_AUD_MAX_WAITTIME
@@ -371,21 +371,21 @@ DEFINES
371/*============================================================================*/ 371/*============================================================================*/
372/*=== GLOBAL VARIABLEs =======================================================*/ 372/*=== GLOBAL VARIABLEs =======================================================*/
373/*============================================================================*/ 373/*============================================================================*/
374/** 374/*
375*/ 375*/
376 376
377/** 377/*
378* \brief Temporary register definitions. 378* \brief Temporary register definitions.
379* (register definitions that are not yet available in register master) 379* (register definitions that are not yet available in register master)
380*/ 380*/
381 381
382/******************************************************************************/ 382/*****************************************************************************/
383/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */ 383/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */
384/* RAM adresses directly. This must be READ ONLY to avoid problems. */ 384/* RAM adresses directly. This must be READ ONLY to avoid problems. */
385/* Writing to the interface adresses is more than only writing the RAM */ 385/* Writing to the interface adresses is more than only writing the RAM */
386/* locations */ 386/* locations */
387/******************************************************************************/ 387/*****************************************************************************/
388/** 388/*
389* \brief RAM location of MODUS registers 389* \brief RAM location of MODUS registers
390*/ 390*/
391#define AUD_DEM_RAM_MODUS_HI__A 0x10204A3 391#define AUD_DEM_RAM_MODUS_HI__A 0x10204A3
@@ -394,13 +394,13 @@ DEFINES
394#define AUD_DEM_RAM_MODUS_LO__A 0x10204A4 394#define AUD_DEM_RAM_MODUS_LO__A 0x10204A4
395#define AUD_DEM_RAM_MODUS_LO__M 0x0FFF 395#define AUD_DEM_RAM_MODUS_LO__M 0x0FFF
396 396
397/** 397/*
398* \brief RAM location of I2S config registers 398* \brief RAM location of I2S config registers
399*/ 399*/
400#define AUD_DEM_RAM_I2S_CONFIG1__A 0x10204B1 400#define AUD_DEM_RAM_I2S_CONFIG1__A 0x10204B1
401#define AUD_DEM_RAM_I2S_CONFIG2__A 0x10204B2 401#define AUD_DEM_RAM_I2S_CONFIG2__A 0x10204B2
402 402
403/** 403/*
404* \brief RAM location of DCO config registers 404* \brief RAM location of DCO config registers
405*/ 405*/
406#define AUD_DEM_RAM_DCO_B_HI__A 0x1020461 406#define AUD_DEM_RAM_DCO_B_HI__A 0x1020461
@@ -408,20 +408,20 @@ DEFINES
408#define AUD_DEM_RAM_DCO_A_HI__A 0x1020463 408#define AUD_DEM_RAM_DCO_A_HI__A 0x1020463
409#define AUD_DEM_RAM_DCO_A_LO__A 0x1020464 409#define AUD_DEM_RAM_DCO_A_LO__A 0x1020464
410 410
411/** 411/*
412* \brief RAM location of Threshold registers 412* \brief RAM location of Threshold registers
413*/ 413*/
414#define AUD_DEM_RAM_NICAM_THRSHLD__A 0x102045A 414#define AUD_DEM_RAM_NICAM_THRSHLD__A 0x102045A
415#define AUD_DEM_RAM_A2_THRSHLD__A 0x10204BB 415#define AUD_DEM_RAM_A2_THRSHLD__A 0x10204BB
416#define AUD_DEM_RAM_BTSC_THRSHLD__A 0x10204A6 416#define AUD_DEM_RAM_BTSC_THRSHLD__A 0x10204A6
417 417
418/** 418/*
419* \brief RAM location of Carrier Threshold registers 419* \brief RAM location of Carrier Threshold registers
420*/ 420*/
421#define AUD_DEM_RAM_CM_A_THRSHLD__A 0x10204AF 421#define AUD_DEM_RAM_CM_A_THRSHLD__A 0x10204AF
422#define AUD_DEM_RAM_CM_B_THRSHLD__A 0x10204B0 422#define AUD_DEM_RAM_CM_B_THRSHLD__A 0x10204B0
423 423
424/** 424/*
425* \brief FM Matrix register fix 425* \brief FM Matrix register fix
426*/ 426*/
427#ifdef AUD_DEM_WR_FM_MATRIX__A 427#ifdef AUD_DEM_WR_FM_MATRIX__A
@@ -430,7 +430,7 @@ DEFINES
430#define AUD_DEM_WR_FM_MATRIX__A 0x105006F 430#define AUD_DEM_WR_FM_MATRIX__A 0x105006F
431 431
432/*============================================================================*/ 432/*============================================================================*/
433/** 433/*
434* \brief Defines required for audio 434* \brief Defines required for audio
435*/ 435*/
436#define AUD_VOLUME_ZERO_DB 115 436#define AUD_VOLUME_ZERO_DB 115
@@ -443,14 +443,14 @@ DEFINES
443#define AUD_I2S_FREQUENCY_MIN 12000UL 443#define AUD_I2S_FREQUENCY_MIN 12000UL
444#define AUD_RDS_ARRAY_SIZE 18 444#define AUD_RDS_ARRAY_SIZE 18
445 445
446/** 446/*
447* \brief Needed for calculation of prescale feature in AUD 447* \brief Needed for calculation of prescale feature in AUD
448*/ 448*/
449#ifndef DRX_AUD_MAX_FM_DEVIATION 449#ifndef DRX_AUD_MAX_FM_DEVIATION
450#define DRX_AUD_MAX_FM_DEVIATION (100) /* kHz */ 450#define DRX_AUD_MAX_FM_DEVIATION (100) /* kHz */
451#endif 451#endif
452 452
453/** 453/*
454* \brief Needed for calculation of NICAM prescale feature in AUD 454* \brief Needed for calculation of NICAM prescale feature in AUD
455*/ 455*/
456#ifndef DRX_AUD_MAX_NICAM_PRESCALE 456#ifndef DRX_AUD_MAX_NICAM_PRESCALE
@@ -478,7 +478,7 @@ DEFINES
478/*=== REGISTER ACCESS MACROS =================================================*/ 478/*=== REGISTER ACCESS MACROS =================================================*/
479/*============================================================================*/ 479/*============================================================================*/
480 480
481/** 481/*
482* This macro is used to create byte arrays for block writes. 482* This macro is used to create byte arrays for block writes.
483* Block writes speed up I2C traffic between host and demod. 483* Block writes speed up I2C traffic between host and demod.
484* The macro takes care of the required byte order in a 16 bits word. 484* The macro takes care of the required byte order in a 16 bits word.
@@ -486,7 +486,7 @@ DEFINES
486*/ 486*/
487#define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \ 487#define DRXJ_16TO8(x) ((u8) (((u16)x) & 0xFF)), \
488 ((u8)((((u16)x)>>8)&0xFF)) 488 ((u8)((((u16)x)>>8)&0xFF))
489/** 489/*
490* This macro is used to convert byte array to 16 bit register value for block read. 490* This macro is used to convert byte array to 16 bit register value for block read.
491* Block read speed up I2C traffic between host and demod. 491* Block read speed up I2C traffic between host and demod.
492* The macro takes care of the required byte order in a 16 bits word. 492* The macro takes care of the required byte order in a 16 bits word.
@@ -501,7 +501,7 @@ DEFINES
501/*=== HI COMMAND RELATED DEFINES =============================================*/ 501/*=== HI COMMAND RELATED DEFINES =============================================*/
502/*============================================================================*/ 502/*============================================================================*/
503 503
504/** 504/*
505* \brief General maximum number of retries for ucode command interfaces 505* \brief General maximum number of retries for ucode command interfaces
506*/ 506*/
507#define DRXJ_MAX_RETRIES (100) 507#define DRXJ_MAX_RETRIES (100)
@@ -807,7 +807,7 @@ static struct drxj_data drxj_data_g = {
807 }, 807 },
808}; 808};
809 809
810/** 810/*
811* \var drxj_default_addr_g 811* \var drxj_default_addr_g
812* \brief Default I2C address and device identifier. 812* \brief Default I2C address and device identifier.
813*/ 813*/
@@ -816,7 +816,7 @@ static struct i2c_device_addr drxj_default_addr_g = {
816 DRXJ_DEF_DEMOD_DEV_ID /* device id */ 816 DRXJ_DEF_DEMOD_DEV_ID /* device id */
817}; 817};
818 818
819/** 819/*
820* \var drxj_default_comm_attr_g 820* \var drxj_default_comm_attr_g
821* \brief Default common attributes of a drxj demodulator instance. 821* \brief Default common attributes of a drxj demodulator instance.
822*/ 822*/
@@ -887,7 +887,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = {
887 0 /* mfx */ 887 0 /* mfx */
888}; 888};
889 889
890/** 890/*
891* \var drxj_default_demod_g 891* \var drxj_default_demod_g
892* \brief Default drxj demodulator instance. 892* \brief Default drxj demodulator instance.
893*/ 893*/
@@ -897,7 +897,7 @@ static struct drx_demod_instance drxj_default_demod_g = {
897 &drxj_data_g /* demod device specific attributes */ 897 &drxj_data_g /* demod device specific attributes */
898}; 898};
899 899
900/** 900/*
901* \brief Default audio data structure for DRK demodulator instance. 901* \brief Default audio data structure for DRK demodulator instance.
902* 902*
903* This structure is DRXK specific. 903* This structure is DRXK specific.
@@ -997,7 +997,7 @@ struct drxj_hi_cmd {
997/*=== MICROCODE RELATED STRUCTURES ===========================================*/ 997/*=== MICROCODE RELATED STRUCTURES ===========================================*/
998/*============================================================================*/ 998/*============================================================================*/
999 999
1000/** 1000/*
1001 * struct drxu_code_block_hdr - Structure of the microcode block headers 1001 * struct drxu_code_block_hdr - Structure of the microcode block headers
1002 * 1002 *
1003 * @addr: Destination address of the data in this block 1003 * @addr: Destination address of the data in this block
@@ -1086,7 +1086,7 @@ static u32 frac28(u32 N, u32 D)
1086 return Q1; 1086 return Q1;
1087} 1087}
1088 1088
1089/** 1089/*
1090* \fn u32 log1_times100( u32 x) 1090* \fn u32 log1_times100( u32 x)
1091* \brief Compute: 100*log10(x) 1091* \brief Compute: 100*log10(x)
1092* \param x 32 bits 1092* \param x 32 bits
@@ -1198,7 +1198,7 @@ static u32 log1_times100(u32 x)
1198 1198
1199} 1199}
1200 1200
1201/** 1201/*
1202* \fn u32 frac_times1e6( u16 N, u32 D) 1202* \fn u32 frac_times1e6( u16 N, u32 D)
1203* \brief Compute: (N/D) * 1000000. 1203* \brief Compute: (N/D) * 1000000.
1204* \param N nominator 16-bits. 1204* \param N nominator 16-bits.
@@ -1235,7 +1235,7 @@ static u32 frac_times1e6(u32 N, u32 D)
1235/*============================================================================*/ 1235/*============================================================================*/
1236 1236
1237 1237
1238/** 1238/*
1239* \brief Values for NICAM prescaler gain. Computed from dB to integer 1239* \brief Values for NICAM prescaler gain. Computed from dB to integer
1240* and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20). 1240* and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
1241* 1241*
@@ -1280,7 +1280,7 @@ static const u16 nicam_presc_table_val[43] = {
1280#define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */ 1280#define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */
1281/*============================================================================*/ 1281/*============================================================================*/
1282 1282
1283/** 1283/*
1284* \fn bool is_handled_by_aud_tr_if( u32 addr ) 1284* \fn bool is_handled_by_aud_tr_if( u32 addr )
1285* \brief Check if this address is handled by the audio token ring interface. 1285* \brief Check if this address is handled by the audio token ring interface.
1286* \param addr 1286* \param addr
@@ -1386,7 +1386,7 @@ int drxbsp_i2c_write_read(struct i2c_device_addr *w_dev_addr,
1386 1386
1387/*============================================================================*/ 1387/*============================================================================*/
1388 1388
1389/****************************** 1389/*****************************
1390* 1390*
1391* int drxdap_fasi_read_block ( 1391* int drxdap_fasi_read_block (
1392* struct i2c_device_addr *dev_addr, -- address of I2C device 1392* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1498,7 +1498,7 @@ static int drxdap_fasi_read_block(struct i2c_device_addr *dev_addr,
1498} 1498}
1499 1499
1500 1500
1501/****************************** 1501/*****************************
1502* 1502*
1503* int drxdap_fasi_read_reg16 ( 1503* int drxdap_fasi_read_reg16 (
1504* struct i2c_device_addr *dev_addr, -- address of I2C device 1504* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1531,7 +1531,7 @@ static int drxdap_fasi_read_reg16(struct i2c_device_addr *dev_addr,
1531 return rc; 1531 return rc;
1532} 1532}
1533 1533
1534/****************************** 1534/*****************************
1535* 1535*
1536* int drxdap_fasi_read_reg32 ( 1536* int drxdap_fasi_read_reg32 (
1537* struct i2c_device_addr *dev_addr, -- address of I2C device 1537* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1566,7 +1566,7 @@ static int drxdap_fasi_read_reg32(struct i2c_device_addr *dev_addr,
1566 return rc; 1566 return rc;
1567} 1567}
1568 1568
1569/****************************** 1569/*****************************
1570* 1570*
1571* int drxdap_fasi_write_block ( 1571* int drxdap_fasi_write_block (
1572* struct i2c_device_addr *dev_addr, -- address of I2C device 1572* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1705,7 +1705,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr,
1705 return first_err; 1705 return first_err;
1706} 1706}
1707 1707
1708/****************************** 1708/*****************************
1709* 1709*
1710* int drxdap_fasi_write_reg16 ( 1710* int drxdap_fasi_write_reg16 (
1711* struct i2c_device_addr *dev_addr, -- address of I2C device 1711* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1734,7 +1734,7 @@ static int drxdap_fasi_write_reg16(struct i2c_device_addr *dev_addr,
1734 return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags); 1734 return drxdap_fasi_write_block(dev_addr, addr, sizeof(data), buf, flags);
1735} 1735}
1736 1736
1737/****************************** 1737/*****************************
1738* 1738*
1739* int drxdap_fasi_read_modify_write_reg16 ( 1739* int drxdap_fasi_read_modify_write_reg16 (
1740* struct i2c_device_addr *dev_addr, -- address of I2C device 1740* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1778,7 +1778,7 @@ static int drxdap_fasi_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
1778 return rc; 1778 return rc;
1779} 1779}
1780 1780
1781/****************************** 1781/*****************************
1782* 1782*
1783* int drxdap_fasi_write_reg32 ( 1783* int drxdap_fasi_write_reg32 (
1784* struct i2c_device_addr *dev_addr, -- address of I2C device 1784* struct i2c_device_addr *dev_addr, -- address of I2C device
@@ -1811,7 +1811,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr,
1811 1811
1812/*============================================================================*/ 1812/*============================================================================*/
1813 1813
1814/** 1814/*
1815* \fn int drxj_dap_rm_write_reg16short 1815* \fn int drxj_dap_rm_write_reg16short
1816* \brief Read modify write 16 bits audio register using short format only. 1816* \brief Read modify write 16 bits audio register using short format only.
1817* \param dev_addr 1817* \param dev_addr
@@ -1890,7 +1890,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr,
1890 1890
1891/*============================================================================*/ 1891/*============================================================================*/
1892 1892
1893/** 1893/*
1894* \fn int drxj_dap_read_aud_reg16 1894* \fn int drxj_dap_read_aud_reg16
1895* \brief Read 16 bits audio register 1895* \brief Read 16 bits audio register
1896* \param dev_addr 1896* \param dev_addr
@@ -1997,7 +1997,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr,
1997} 1997}
1998/*============================================================================*/ 1998/*============================================================================*/
1999 1999
2000/** 2000/*
2001* \fn int drxj_dap_write_aud_reg16 2001* \fn int drxj_dap_write_aud_reg16
2002* \brief Write 16 bits audio register 2002* \brief Write 16 bits audio register
2003* \param dev_addr 2003* \param dev_addr
@@ -2086,7 +2086,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr,
2086#define DRXJ_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 2086#define DRXJ_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
2087#define DRXJ_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 2087#define DRXJ_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
2088 2088
2089/** 2089/*
2090* \fn int drxj_dap_atomic_read_write_block() 2090* \fn int drxj_dap_atomic_read_write_block()
2091* \brief Basic access routine for atomic read or write access 2091* \brief Basic access routine for atomic read or write access
2092* \param dev_addr pointer to i2c dev address 2092* \param dev_addr pointer to i2c dev address
@@ -2168,7 +2168,7 @@ rw_error:
2168 2168
2169/*============================================================================*/ 2169/*============================================================================*/
2170 2170
2171/** 2171/*
2172* \fn int drxj_dap_atomic_read_reg32() 2172* \fn int drxj_dap_atomic_read_reg32()
2173* \brief Atomic read of 32 bits words 2173* \brief Atomic read of 32 bits words
2174*/ 2174*/
@@ -2215,7 +2215,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
2215/*============================================================================*/ 2215/*============================================================================*/
2216/*============================================================================*/ 2216/*============================================================================*/
2217 2217
2218/** 2218/*
2219* \fn int hi_cfg_command() 2219* \fn int hi_cfg_command()
2220* \brief Configure HI with settings stored in the demod structure. 2220* \brief Configure HI with settings stored in the demod structure.
2221* \param demod Demodulator. 2221* \param demod Demodulator.
@@ -2258,7 +2258,7 @@ rw_error:
2258 return rc; 2258 return rc;
2259} 2259}
2260 2260
2261/** 2261/*
2262* \fn int hi_command() 2262* \fn int hi_command()
2263* \brief Configure HI with settings stored in the demod structure. 2263* \brief Configure HI with settings stored in the demod structure.
2264* \param dev_addr I2C address. 2264* \param dev_addr I2C address.
@@ -2369,7 +2369,7 @@ rw_error:
2369 return rc; 2369 return rc;
2370} 2370}
2371 2371
2372/** 2372/*
2373* \fn int init_hi( const struct drx_demod_instance *demod ) 2373* \fn int init_hi( const struct drx_demod_instance *demod )
2374* \brief Initialise and configurate HI. 2374* \brief Initialise and configurate HI.
2375* \param demod pointer to demod data. 2375* \param demod pointer to demod data.
@@ -2450,7 +2450,7 @@ rw_error:
2450/*============================================================================*/ 2450/*============================================================================*/
2451/*============================================================================*/ 2451/*============================================================================*/
2452 2452
2453/** 2453/*
2454* \fn int get_device_capabilities() 2454* \fn int get_device_capabilities()
2455* \brief Get and store device capabilities. 2455* \brief Get and store device capabilities.
2456* \param demod Pointer to demodulator instance. 2456* \param demod Pointer to demodulator instance.
@@ -2656,7 +2656,7 @@ rw_error:
2656 return rc; 2656 return rc;
2657} 2657}
2658 2658
2659/** 2659/*
2660* \fn int power_up_device() 2660* \fn int power_up_device()
2661* \brief Power up device. 2661* \brief Power up device.
2662* \param demod Pointer to demodulator instance. 2662* \param demod Pointer to demodulator instance.
@@ -2710,7 +2710,7 @@ static int power_up_device(struct drx_demod_instance *demod)
2710/*----------------------------------------------------------------------------*/ 2710/*----------------------------------------------------------------------------*/
2711/* MPEG Output Configuration Functions - begin */ 2711/* MPEG Output Configuration Functions - begin */
2712/*----------------------------------------------------------------------------*/ 2712/*----------------------------------------------------------------------------*/
2713/** 2713/*
2714* \fn int ctrl_set_cfg_mpeg_output() 2714* \fn int ctrl_set_cfg_mpeg_output()
2715* \brief Set MPEG output configuration of the device. 2715* \brief Set MPEG output configuration of the device.
2716* \param devmod Pointer to demodulator instance. 2716* \param devmod Pointer to demodulator instance.
@@ -3356,7 +3356,7 @@ rw_error:
3356/* miscellaneous configurations - begin */ 3356/* miscellaneous configurations - begin */
3357/*----------------------------------------------------------------------------*/ 3357/*----------------------------------------------------------------------------*/
3358 3358
3359/** 3359/*
3360* \fn int set_mpegtei_handling() 3360* \fn int set_mpegtei_handling()
3361* \brief Activate MPEG TEI handling settings. 3361* \brief Activate MPEG TEI handling settings.
3362* \param devmod Pointer to demodulator instance. 3362* \param devmod Pointer to demodulator instance.
@@ -3429,7 +3429,7 @@ rw_error:
3429} 3429}
3430 3430
3431/*----------------------------------------------------------------------------*/ 3431/*----------------------------------------------------------------------------*/
3432/** 3432/*
3433* \fn int bit_reverse_mpeg_output() 3433* \fn int bit_reverse_mpeg_output()
3434* \brief Set MPEG output bit-endian settings. 3434* \brief Set MPEG output bit-endian settings.
3435* \param devmod Pointer to demodulator instance. 3435* \param devmod Pointer to demodulator instance.
@@ -3472,7 +3472,7 @@ rw_error:
3472} 3472}
3473 3473
3474/*----------------------------------------------------------------------------*/ 3474/*----------------------------------------------------------------------------*/
3475/** 3475/*
3476* \fn int set_mpeg_start_width() 3476* \fn int set_mpeg_start_width()
3477* \brief Set MPEG start width. 3477* \brief Set MPEG start width.
3478* \param devmod Pointer to demodulator instance. 3478* \param devmod Pointer to demodulator instance.
@@ -3522,7 +3522,7 @@ rw_error:
3522/*----------------------------------------------------------------------------*/ 3522/*----------------------------------------------------------------------------*/
3523/* UIO Configuration Functions - begin */ 3523/* UIO Configuration Functions - begin */
3524/*----------------------------------------------------------------------------*/ 3524/*----------------------------------------------------------------------------*/
3525/** 3525/*
3526* \fn int ctrl_set_uio_cfg() 3526* \fn int ctrl_set_uio_cfg()
3527* \brief Configure modus oprandi UIO. 3527* \brief Configure modus oprandi UIO.
3528* \param demod Pointer to demodulator instance. 3528* \param demod Pointer to demodulator instance.
@@ -3659,7 +3659,7 @@ rw_error:
3659 return rc; 3659 return rc;
3660} 3660}
3661 3661
3662/** 3662/*
3663* \fn int ctrl_uio_write() 3663* \fn int ctrl_uio_write()
3664* \brief Write to a UIO. 3664* \brief Write to a UIO.
3665* \param demod Pointer to demodulator instance. 3665* \param demod Pointer to demodulator instance.
@@ -3868,7 +3868,7 @@ rw_error:
3868/*----------------------------------------------------------------------------*/ 3868/*----------------------------------------------------------------------------*/
3869/* I2C Bridge Functions - begin */ 3869/* I2C Bridge Functions - begin */
3870/*----------------------------------------------------------------------------*/ 3870/*----------------------------------------------------------------------------*/
3871/** 3871/*
3872* \fn int ctrl_i2c_bridge() 3872* \fn int ctrl_i2c_bridge()
3873* \brief Open or close the I2C switch to tuner. 3873* \brief Open or close the I2C switch to tuner.
3874* \param demod Pointer to demodulator instance. 3874* \param demod Pointer to demodulator instance.
@@ -3903,7 +3903,7 @@ ctrl_i2c_bridge(struct drx_demod_instance *demod, bool *bridge_closed)
3903/*----------------------------------------------------------------------------*/ 3903/*----------------------------------------------------------------------------*/
3904/* Smart antenna Functions - begin */ 3904/* Smart antenna Functions - begin */
3905/*----------------------------------------------------------------------------*/ 3905/*----------------------------------------------------------------------------*/
3906/** 3906/*
3907* \fn int smart_ant_init() 3907* \fn int smart_ant_init()
3908* \brief Initialize Smart Antenna. 3908* \brief Initialize Smart Antenna.
3909* \param pointer to struct drx_demod_instance. 3909* \param pointer to struct drx_demod_instance.
@@ -4116,7 +4116,7 @@ rw_error:
4116 return rc; 4116 return rc;
4117} 4117}
4118 4118
4119/** 4119/*
4120* \fn int DRXJ_DAP_SCUAtomicReadWriteBlock() 4120* \fn int DRXJ_DAP_SCUAtomicReadWriteBlock()
4121* \brief Basic access routine for SCU atomic read or write access 4121* \brief Basic access routine for SCU atomic read or write access
4122* \param dev_addr pointer to i2c dev address 4122* \param dev_addr pointer to i2c dev address
@@ -4188,7 +4188,7 @@ rw_error:
4188 4188
4189/*============================================================================*/ 4189/*============================================================================*/
4190 4190
4191/** 4191/*
4192* \fn int DRXJ_DAP_AtomicReadReg16() 4192* \fn int DRXJ_DAP_AtomicReadReg16()
4193* \brief Atomic read of 16 bits words 4193* \brief Atomic read of 16 bits words
4194*/ 4194*/
@@ -4216,7 +4216,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
4216} 4216}
4217 4217
4218/*============================================================================*/ 4218/*============================================================================*/
4219/** 4219/*
4220* \fn int drxj_dap_scu_atomic_write_reg16() 4220* \fn int drxj_dap_scu_atomic_write_reg16()
4221* \brief Atomic read of 16 bits words 4221* \brief Atomic read of 16 bits words
4222*/ 4222*/
@@ -4237,7 +4237,7 @@ int drxj_dap_scu_atomic_write_reg16(struct i2c_device_addr *dev_addr,
4237} 4237}
4238 4238
4239/* -------------------------------------------------------------------------- */ 4239/* -------------------------------------------------------------------------- */
4240/** 4240/*
4241* \brief Measure result of ADC synchronisation 4241* \brief Measure result of ADC synchronisation
4242* \param demod demod instance 4242* \param demod demod instance
4243* \param count (returned) count 4243* \param count (returned) count
@@ -4297,7 +4297,7 @@ rw_error:
4297 return rc; 4297 return rc;
4298} 4298}
4299 4299
4300/** 4300/*
4301* \brief Synchronize analog and digital clock domains 4301* \brief Synchronize analog and digital clock domains
4302* \param demod demod instance 4302* \param demod demod instance
4303* \return int. 4303* \return int.
@@ -4365,7 +4365,7 @@ rw_error:
4365/*== 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/ 4365/*== 8VSB & QAM COMMON DATAPATH FUNCTIONS ==*/
4366/*============================================================================*/ 4366/*============================================================================*/
4367/*============================================================================*/ 4367/*============================================================================*/
4368/** 4368/*
4369* \fn int init_agc () 4369* \fn int init_agc ()
4370* \brief Initialize AGC for all standards. 4370* \brief Initialize AGC for all standards.
4371* \param demod instance of demodulator. 4371* \param demod instance of demodulator.
@@ -4741,7 +4741,7 @@ rw_error:
4741 return rc; 4741 return rc;
4742} 4742}
4743 4743
4744/** 4744/*
4745* \fn int set_frequency () 4745* \fn int set_frequency ()
4746* \brief Set frequency shift. 4746* \brief Set frequency shift.
4747* \param demod instance of demodulator. 4747* \param demod instance of demodulator.
@@ -4839,7 +4839,7 @@ rw_error:
4839 return rc; 4839 return rc;
4840} 4840}
4841 4841
4842/** 4842/*
4843* \fn int get_acc_pkt_err() 4843* \fn int get_acc_pkt_err()
4844* \brief Retrieve signal strength for VSB and QAM. 4844* \brief Retrieve signal strength for VSB and QAM.
4845* \param demod Pointer to demod instance 4845* \param demod Pointer to demod instance
@@ -4891,7 +4891,7 @@ rw_error:
4891 4891
4892/*============================================================================*/ 4892/*============================================================================*/
4893 4893
4894/** 4894/*
4895* \fn int set_agc_rf () 4895* \fn int set_agc_rf ()
4896* \brief Configure RF AGC 4896* \brief Configure RF AGC
4897* \param demod instance of demodulator. 4897* \param demod instance of demodulator.
@@ -5105,7 +5105,7 @@ rw_error:
5105 return rc; 5105 return rc;
5106} 5106}
5107 5107
5108/** 5108/*
5109* \fn int set_agc_if () 5109* \fn int set_agc_if ()
5110* \brief Configure If AGC 5110* \brief Configure If AGC
5111* \param demod instance of demodulator. 5111* \param demod instance of demodulator.
@@ -5334,7 +5334,7 @@ rw_error:
5334 return rc; 5334 return rc;
5335} 5335}
5336 5336
5337/** 5337/*
5338* \fn int set_iqm_af () 5338* \fn int set_iqm_af ()
5339* \brief Configure IQM AF registers 5339* \brief Configure IQM AF registers
5340* \param demod instance of demodulator. 5340* \param demod instance of demodulator.
@@ -5380,7 +5380,7 @@ rw_error:
5380/*============================================================================*/ 5380/*============================================================================*/
5381/*============================================================================*/ 5381/*============================================================================*/
5382 5382
5383/** 5383/*
5384* \fn int power_down_vsb () 5384* \fn int power_down_vsb ()
5385* \brief Powr down QAM related blocks. 5385* \brief Powr down QAM related blocks.
5386* \param demod instance of demodulator. 5386* \param demod instance of demodulator.
@@ -5478,7 +5478,7 @@ rw_error:
5478 return rc; 5478 return rc;
5479} 5479}
5480 5480
5481/** 5481/*
5482* \fn int set_vsb_leak_n_gain () 5482* \fn int set_vsb_leak_n_gain ()
5483* \brief Set ATSC demod. 5483* \brief Set ATSC demod.
5484* \param demod instance of demodulator. 5484* \param demod instance of demodulator.
@@ -5694,7 +5694,7 @@ rw_error:
5694 return rc; 5694 return rc;
5695} 5695}
5696 5696
5697/** 5697/*
5698* \fn int set_vsb() 5698* \fn int set_vsb()
5699* \brief Set 8VSB demod. 5699* \brief Set 8VSB demod.
5700* \param demod instance of demodulator. 5700* \param demod instance of demodulator.
@@ -6200,7 +6200,7 @@ rw_error:
6200 return rc; 6200 return rc;
6201} 6201}
6202 6202
6203/** 6203/*
6204* \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs) 6204* \fn static short get_vsb_post_rs_pck_err(struct i2c_device_addr *dev_addr, u16 *PckErrs)
6205* \brief Get the values of packet error in 8VSB mode 6205* \brief Get the values of packet error in 8VSB mode
6206* \return Error code 6206* \return Error code
@@ -6239,7 +6239,7 @@ rw_error:
6239 return rc; 6239 return rc;
6240} 6240}
6241 6241
6242/** 6242/*
6243* \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber) 6243* \fn static short GetVSBBer(struct i2c_device_addr *dev_addr, u32 *ber)
6244* \brief Get the values of ber in VSB mode 6244* \brief Get the values of ber in VSB mode
6245* \return Error code 6245* \return Error code
@@ -6284,7 +6284,7 @@ rw_error:
6284 return rc; 6284 return rc;
6285} 6285}
6286 6286
6287/** 6287/*
6288* \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber) 6288* \fn static short get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr, u32 *ber)
6289* \brief Get the values of ber in VSB mode 6289* \brief Get the values of ber in VSB mode
6290* \return Error code 6290* \return Error code
@@ -6306,7 +6306,7 @@ static int get_vs_bpre_viterbi_ber(struct i2c_device_addr *dev_addr,
6306 return 0; 6306 return 0;
6307} 6307}
6308 6308
6309/** 6309/*
6310* \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer) 6310* \fn static int get_vsbmer(struct i2c_device_addr *dev_addr, u16 *mer)
6311* \brief Get the values of MER 6311* \brief Get the values of MER
6312* \return Error code 6312* \return Error code
@@ -6340,7 +6340,7 @@ rw_error:
6340/*============================================================================*/ 6340/*============================================================================*/
6341/*============================================================================*/ 6341/*============================================================================*/
6342 6342
6343/** 6343/*
6344* \fn int power_down_qam () 6344* \fn int power_down_qam ()
6345* \brief Powr down QAM related blocks. 6345* \brief Powr down QAM related blocks.
6346* \param demod instance of demodulator. 6346* \param demod instance of demodulator.
@@ -6444,7 +6444,7 @@ rw_error:
6444 6444
6445/*============================================================================*/ 6445/*============================================================================*/
6446 6446
6447/** 6447/*
6448* \fn int set_qam_measurement () 6448* \fn int set_qam_measurement ()
6449* \brief Setup of the QAM Measuremnt intervals for signal quality 6449* \brief Setup of the QAM Measuremnt intervals for signal quality
6450* \param demod instance of demod. 6450* \param demod instance of demod.
@@ -6656,7 +6656,7 @@ rw_error:
6656 6656
6657/*============================================================================*/ 6657/*============================================================================*/
6658 6658
6659/** 6659/*
6660* \fn int set_qam16 () 6660* \fn int set_qam16 ()
6661* \brief QAM16 specific setup 6661* \brief QAM16 specific setup
6662* \param demod instance of demod. 6662* \param demod instance of demod.
@@ -6891,7 +6891,7 @@ rw_error:
6891 6891
6892/*============================================================================*/ 6892/*============================================================================*/
6893 6893
6894/** 6894/*
6895* \fn int set_qam32 () 6895* \fn int set_qam32 ()
6896* \brief QAM32 specific setup 6896* \brief QAM32 specific setup
6897* \param demod instance of demod. 6897* \param demod instance of demod.
@@ -7126,7 +7126,7 @@ rw_error:
7126 7126
7127/*============================================================================*/ 7127/*============================================================================*/
7128 7128
7129/** 7129/*
7130* \fn int set_qam64 () 7130* \fn int set_qam64 ()
7131* \brief QAM64 specific setup 7131* \brief QAM64 specific setup
7132* \param demod instance of demod. 7132* \param demod instance of demod.
@@ -7362,7 +7362,7 @@ rw_error:
7362 7362
7363/*============================================================================*/ 7363/*============================================================================*/
7364 7364
7365/** 7365/*
7366* \fn int set_qam128 () 7366* \fn int set_qam128 ()
7367* \brief QAM128 specific setup 7367* \brief QAM128 specific setup
7368* \param demod: instance of demod. 7368* \param demod: instance of demod.
@@ -7597,7 +7597,7 @@ rw_error:
7597 7597
7598/*============================================================================*/ 7598/*============================================================================*/
7599 7599
7600/** 7600/*
7601* \fn int set_qam256 () 7601* \fn int set_qam256 ()
7602* \brief QAM256 specific setup 7602* \brief QAM256 specific setup
7603* \param demod: instance of demod. 7603* \param demod: instance of demod.
@@ -7835,7 +7835,7 @@ rw_error:
7835#define QAM_SET_OP_CONSTELLATION 0x2 7835#define QAM_SET_OP_CONSTELLATION 0x2
7836#define QAM_SET_OP_SPECTRUM 0X4 7836#define QAM_SET_OP_SPECTRUM 0X4
7837 7837
7838/** 7838/*
7839* \fn int set_qam () 7839* \fn int set_qam ()
7840* \brief Set QAM demod. 7840* \brief Set QAM demod.
7841* \param demod: instance of demod. 7841* \param demod: instance of demod.
@@ -8845,7 +8845,7 @@ rw_error:
8845#define DEMOD_LOCKED 0x1 8845#define DEMOD_LOCKED 0x1
8846#define SYNC_FLIPPED 0x2 8846#define SYNC_FLIPPED 0x2
8847#define SPEC_MIRRORED 0x4 8847#define SPEC_MIRRORED 0x4
8848/** 8848/*
8849* \fn int qam64auto () 8849* \fn int qam64auto ()
8850* \brief auto do sync pattern switching and mirroring. 8850* \brief auto do sync pattern switching and mirroring.
8851* \param demod: instance of demod. 8851* \param demod: instance of demod.
@@ -8993,7 +8993,7 @@ rw_error:
8993 return rc; 8993 return rc;
8994} 8994}
8995 8995
8996/** 8996/*
8997* \fn int qam256auto () 8997* \fn int qam256auto ()
8998* \brief auto do sync pattern switching and mirroring. 8998* \brief auto do sync pattern switching and mirroring.
8999* \param demod: instance of demod. 8999* \param demod: instance of demod.
@@ -9077,7 +9077,7 @@ rw_error:
9077 return rc; 9077 return rc;
9078} 9078}
9079 9079
9080/** 9080/*
9081* \fn int set_qam_channel () 9081* \fn int set_qam_channel ()
9082* \brief Set QAM channel according to the requested constellation. 9082* \brief Set QAM channel according to the requested constellation.
9083* \param demod: instance of demod. 9083* \param demod: instance of demod.
@@ -9284,7 +9284,7 @@ rw_error:
9284 9284
9285/*============================================================================*/ 9285/*============================================================================*/
9286 9286
9287/** 9287/*
9288* \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr) 9288* \fn static short get_qamrs_err_count(struct i2c_device_addr *dev_addr)
9289* \brief Get RS error count in QAM mode (used for post RS BER calculation) 9289* \brief Get RS error count in QAM mode (used for post RS BER calculation)
9290* \return Error code 9290* \return Error code
@@ -9355,7 +9355,7 @@ rw_error:
9355 9355
9356/*============================================================================*/ 9356/*============================================================================*/
9357 9357
9358/** 9358/*
9359 * \fn int get_sig_strength() 9359 * \fn int get_sig_strength()
9360 * \brief Retrieve signal strength for VSB and QAM. 9360 * \brief Retrieve signal strength for VSB and QAM.
9361 * \param demod Pointer to demod instance 9361 * \param demod Pointer to demod instance
@@ -9435,7 +9435,7 @@ rw_error:
9435 return rc; 9435 return rc;
9436} 9436}
9437 9437
9438/** 9438/*
9439* \fn int ctrl_get_qam_sig_quality() 9439* \fn int ctrl_get_qam_sig_quality()
9440* \brief Retrieve QAM signal quality from device. 9440* \brief Retrieve QAM signal quality from device.
9441* \param devmod Pointer to demodulator instance. 9441* \param devmod Pointer to demodulator instance.
@@ -9721,7 +9721,7 @@ rw_error:
9721*/ 9721*/
9722/* -------------------------------------------------------------------------- */ 9722/* -------------------------------------------------------------------------- */
9723 9723
9724/** 9724/*
9725* \fn int power_down_atv () 9725* \fn int power_down_atv ()
9726* \brief Power down ATV. 9726* \brief Power down ATV.
9727* \param demod instance of demodulator 9727* \param demod instance of demodulator
@@ -9822,7 +9822,7 @@ rw_error:
9822 9822
9823/*============================================================================*/ 9823/*============================================================================*/
9824 9824
9825/** 9825/*
9826* \brief Power up AUD. 9826* \brief Power up AUD.
9827* \param demod instance of demodulator 9827* \param demod instance of demodulator
9828* \return int. 9828* \return int.
@@ -9850,7 +9850,7 @@ rw_error:
9850 return rc; 9850 return rc;
9851} 9851}
9852 9852
9853/** 9853/*
9854* \fn int set_orx_nsu_aox() 9854* \fn int set_orx_nsu_aox()
9855* \brief Configure OrxNsuAox for OOB 9855* \brief Configure OrxNsuAox for OOB
9856* \param demod instance of demodulator. 9856* \param demod instance of demodulator.
@@ -9884,7 +9884,7 @@ rw_error:
9884 return rc; 9884 return rc;
9885} 9885}
9886 9886
9887/** 9887/*
9888* \fn int ctrl_set_oob() 9888* \fn int ctrl_set_oob()
9889* \brief Set OOB channel to be used. 9889* \brief Set OOB channel to be used.
9890* \param demod instance of demodulator 9890* \param demod instance of demodulator
@@ -9986,9 +9986,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
9986 20; 9986 20;
9987 } 9987 }
9988 9988
9989 /*********/ 9989 /********/
9990 /* Stop */ 9990 /* Stop */
9991 /*********/ 9991 /********/
9992 rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0); 9992 rc = drxj_dap_write_reg16(dev_addr, ORX_COMM_EXEC__A, ORX_COMM_EXEC_STOP, 0);
9993 if (rc != 0) { 9993 if (rc != 0) {
9994 pr_err("error %d\n", rc); 9994 pr_err("error %d\n", rc);
@@ -10004,9 +10004,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
10004 pr_err("error %d\n", rc); 10004 pr_err("error %d\n", rc);
10005 goto rw_error; 10005 goto rw_error;
10006 } 10006 }
10007 /*********/ 10007 /********/
10008 /* Reset */ 10008 /* Reset */
10009 /*********/ 10009 /********/
10010 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB 10010 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
10011 | SCU_RAM_COMMAND_CMD_DEMOD_RESET; 10011 | SCU_RAM_COMMAND_CMD_DEMOD_RESET;
10012 scu_cmd.parameter_len = 0; 10012 scu_cmd.parameter_len = 0;
@@ -10017,9 +10017,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
10017 pr_err("error %d\n", rc); 10017 pr_err("error %d\n", rc);
10018 goto rw_error; 10018 goto rw_error;
10019 } 10019 }
10020 /***********/ 10020 /**********/
10021 /* SET_ENV */ 10021 /* SET_ENV */
10022 /***********/ 10022 /**********/
10023 /* set frequency, spectrum inversion and data rate */ 10023 /* set frequency, spectrum inversion and data rate */
10024 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB 10024 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
10025 | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV; 10025 | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV;
@@ -10376,9 +10376,9 @@ static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_par
10376 pr_err("error %d\n", rc); 10376 pr_err("error %d\n", rc);
10377 goto rw_error; 10377 goto rw_error;
10378 } 10378 }
10379 /*********/ 10379 /********/
10380 /* Start */ 10380 /* Start */
10381 /*********/ 10381 /********/
10382 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB 10382 scu_cmd.command = SCU_RAM_COMMAND_STANDARD_OOB
10383 | SCU_RAM_COMMAND_CMD_DEMOD_START; 10383 | SCU_RAM_COMMAND_CMD_DEMOD_START;
10384 scu_cmd.parameter_len = 0; 10384 scu_cmd.parameter_len = 0;
@@ -10419,7 +10419,7 @@ rw_error:
10419/*============================================================================= 10419/*=============================================================================
10420 ===== ctrl_set_channel() ========================================================== 10420 ===== ctrl_set_channel() ==========================================================
10421 ===========================================================================*/ 10421 ===========================================================================*/
10422/** 10422/*
10423* \fn int ctrl_set_channel() 10423* \fn int ctrl_set_channel()
10424* \brief Select a new transmission channel. 10424* \brief Select a new transmission channel.
10425* \param demod instance of demod. 10425* \param demod instance of demod.
@@ -10652,7 +10652,7 @@ rw_error:
10652 ===== SigQuality() ========================================================== 10652 ===== SigQuality() ==========================================================
10653 ===========================================================================*/ 10653 ===========================================================================*/
10654 10654
10655/** 10655/*
10656* \fn int ctrl_sig_quality() 10656* \fn int ctrl_sig_quality()
10657* \brief Retrieve signal quality form device. 10657* \brief Retrieve signal quality form device.
10658* \param devmod Pointer to demodulator instance. 10658* \param devmod Pointer to demodulator instance.
@@ -10768,7 +10768,7 @@ rw_error:
10768 10768
10769/*============================================================================*/ 10769/*============================================================================*/
10770 10770
10771/** 10771/*
10772* \fn int ctrl_lock_status() 10772* \fn int ctrl_lock_status()
10773* \brief Retrieve lock status . 10773* \brief Retrieve lock status .
10774* \param dev_addr Pointer to demodulator device address. 10774* \param dev_addr Pointer to demodulator device address.
@@ -10856,7 +10856,7 @@ rw_error:
10856 10856
10857/*============================================================================*/ 10857/*============================================================================*/
10858 10858
10859/** 10859/*
10860* \fn int ctrl_set_standard() 10860* \fn int ctrl_set_standard()
10861* \brief Set modulation standard to be used. 10861* \brief Set modulation standard to be used.
10862* \param standard Modulation standard. 10862* \param standard Modulation standard.
@@ -11012,7 +11012,7 @@ static void drxj_reset_mode(struct drxj_data *ext_attr)
11012 ext_attr->vsb_pre_saw_cfg.use_pre_saw = true; 11012 ext_attr->vsb_pre_saw_cfg.use_pre_saw = true;
11013} 11013}
11014 11014
11015/** 11015/*
11016* \fn int ctrl_power_mode() 11016* \fn int ctrl_power_mode()
11017* \brief Set the power mode of the device to the specified power mode 11017* \brief Set the power mode of the device to the specified power mode
11018* \param demod Pointer to demodulator instance. 11018* \param demod Pointer to demodulator instance.
@@ -11171,7 +11171,7 @@ rw_error:
11171/*== CTRL Set/Get Config related functions ===================================*/ 11171/*== CTRL Set/Get Config related functions ===================================*/
11172/*============================================================================*/ 11172/*============================================================================*/
11173 11173
11174/** 11174/*
11175* \fn int ctrl_set_cfg_pre_saw() 11175* \fn int ctrl_set_cfg_pre_saw()
11176* \brief Set Pre-saw reference. 11176* \brief Set Pre-saw reference.
11177* \param demod demod instance 11177* \param demod demod instance
@@ -11234,7 +11234,7 @@ rw_error:
11234 11234
11235/*============================================================================*/ 11235/*============================================================================*/
11236 11236
11237/** 11237/*
11238* \fn int ctrl_set_cfg_afe_gain() 11238* \fn int ctrl_set_cfg_afe_gain()
11239* \brief Set AFE Gain. 11239* \brief Set AFE Gain.
11240* \param demod demod instance 11240* \param demod demod instance
@@ -11324,7 +11324,7 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
11324 enum drxu_code_action action); 11324 enum drxu_code_action action);
11325static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state); 11325static int drxj_set_lna_state(struct drx_demod_instance *demod, bool state);
11326 11326
11327/** 11327/*
11328* \fn drxj_open() 11328* \fn drxj_open()
11329* \brief Open the demod instance, configure device, configure drxdriver 11329* \brief Open the demod instance, configure device, configure drxdriver
11330* \return Status_t Return status. 11330* \return Status_t Return status.
@@ -11543,7 +11543,7 @@ rw_error:
11543} 11543}
11544 11544
11545/*============================================================================*/ 11545/*============================================================================*/
11546/** 11546/*
11547* \fn drxj_close() 11547* \fn drxj_close()
11548* \brief Close the demod instance, power down the device 11548* \brief Close the demod instance, power down the device
11549* \return Status_t Return status. 11549* \return Status_t Return status.
@@ -11594,7 +11594,7 @@ rw_error:
11594 * Microcode related functions 11594 * Microcode related functions
11595 */ 11595 */
11596 11596
11597/** 11597/*
11598 * drx_u_code_compute_crc - Compute CRC of block of microcode data. 11598 * drx_u_code_compute_crc - Compute CRC of block of microcode data.
11599 * @block_data: Pointer to microcode data. 11599 * @block_data: Pointer to microcode data.
11600 * @nr_words: Size of microcode block (number of 16 bits words). 11600 * @nr_words: Size of microcode block (number of 16 bits words).
@@ -11622,7 +11622,7 @@ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
11622 return (u16)(crc_word >> 16); 11622 return (u16)(crc_word >> 16);
11623} 11623}
11624 11624
11625/** 11625/*
11626 * drx_check_firmware - checks if the loaded firmware is valid 11626 * drx_check_firmware - checks if the loaded firmware is valid
11627 * 11627 *
11628 * @demod: demod structure 11628 * @demod: demod structure
@@ -11708,7 +11708,7 @@ eof:
11708 return -EINVAL; 11708 return -EINVAL;
11709} 11709}
11710 11710
11711/** 11711/*
11712 * drx_ctrl_u_code - Handle microcode upload or verify. 11712 * drx_ctrl_u_code - Handle microcode upload or verify.
11713 * @dev_addr: Address of device. 11713 * @dev_addr: Address of device.
11714 * @mc_info: Pointer to information about microcode data. 11714 * @mc_info: Pointer to information about microcode data.
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h
index 6c5b8f78f9f6..d3ee1c23bb2f 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h
@@ -69,15 +69,15 @@ TYPEDEFS
69 69
70 struct drxjscu_cmd { 70 struct drxjscu_cmd {
71 u16 command; 71 u16 command;
72 /**< Command number */ 72 /*< Command number */
73 u16 parameter_len; 73 u16 parameter_len;
74 /**< Data length in byte */ 74 /*< Data length in byte */
75 u16 result_len; 75 u16 result_len;
76 /**< result length in byte */ 76 /*< result length in byte */
77 u16 *parameter; 77 u16 *parameter;
78 /**< General purpous param */ 78 /*< General purpous param */
79 u16 *result; 79 u16 *result;
80 /**< General purpous param */}; 80 /*< General purpous param */};
81 81
82/*============================================================================*/ 82/*============================================================================*/
83/*============================================================================*/ 83/*============================================================================*/
@@ -130,7 +130,7 @@ TYPEDEFS
130 130
131 DRXJ_CFG_MAX /* dummy, never to be used */}; 131 DRXJ_CFG_MAX /* dummy, never to be used */};
132 132
133/** 133/*
134* /struct enum drxj_cfg_smart_ant_io * smart antenna i/o. 134* /struct enum drxj_cfg_smart_ant_io * smart antenna i/o.
135*/ 135*/
136enum drxj_cfg_smart_ant_io { 136enum drxj_cfg_smart_ant_io {
@@ -138,7 +138,7 @@ enum drxj_cfg_smart_ant_io {
138 DRXJ_SMT_ANT_INPUT 138 DRXJ_SMT_ANT_INPUT
139}; 139};
140 140
141/** 141/*
142* /struct struct drxj_cfg_smart_ant * Set smart antenna. 142* /struct struct drxj_cfg_smart_ant * Set smart antenna.
143*/ 143*/
144 struct drxj_cfg_smart_ant { 144 struct drxj_cfg_smart_ant {
@@ -146,7 +146,7 @@ enum drxj_cfg_smart_ant_io {
146 u16 ctrl_data; 146 u16 ctrl_data;
147 }; 147 };
148 148
149/** 149/*
150* /struct DRXJAGCSTATUS_t 150* /struct DRXJAGCSTATUS_t
151* AGC status information from the DRXJ-IQM-AF. 151* AGC status information from the DRXJ-IQM-AF.
152*/ 152*/
@@ -158,7 +158,7 @@ struct drxj_agc_status {
158 158
159/* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */ 159/* DRXJ_CFG_AGC_RF, DRXJ_CFG_AGC_IF */
160 160
161/** 161/*
162* /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ. 162* /struct enum drxj_agc_ctrl_mode * Available AGCs modes in the DRXJ.
163*/ 163*/
164 enum drxj_agc_ctrl_mode { 164 enum drxj_agc_ctrl_mode {
@@ -166,7 +166,7 @@ struct drxj_agc_status {
166 DRX_AGC_CTRL_USER, 166 DRX_AGC_CTRL_USER,
167 DRX_AGC_CTRL_OFF}; 167 DRX_AGC_CTRL_OFF};
168 168
169/** 169/*
170* /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ. 170* /struct struct drxj_cfg_agc * Generic interface for all AGCs present on the DRXJ.
171*/ 171*/
172 struct drxj_cfg_agc { 172 struct drxj_cfg_agc {
@@ -182,7 +182,7 @@ struct drxj_agc_status {
182 182
183/* DRXJ_CFG_PRE_SAW */ 183/* DRXJ_CFG_PRE_SAW */
184 184
185/** 185/*
186* /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense. 186* /struct struct drxj_cfg_pre_saw * Interface to configure pre SAW sense.
187*/ 187*/
188 struct drxj_cfg_pre_saw { 188 struct drxj_cfg_pre_saw {
@@ -192,14 +192,14 @@ struct drxj_agc_status {
192 192
193/* DRXJ_CFG_AFE_GAIN */ 193/* DRXJ_CFG_AFE_GAIN */
194 194
195/** 195/*
196* /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA). 196* /struct struct drxj_cfg_afe_gain * Interface to configure gain of AFE (LNA + PGA).
197*/ 197*/
198 struct drxj_cfg_afe_gain { 198 struct drxj_cfg_afe_gain {
199 enum drx_standard standard; /* standard to which these settings apply */ 199 enum drx_standard standard; /* standard to which these settings apply */
200 u16 gain; /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */}; 200 u16 gain; /* gain in 0.1 dB steps, DRXJ range 140 .. 335 */};
201 201
202/** 202/*
203* /struct drxjrs_errors 203* /struct drxjrs_errors
204* Available failure information in DRXJ_FEC_RS. 204* Available failure information in DRXJ_FEC_RS.
205* 205*
@@ -208,25 +208,25 @@ struct drxj_agc_status {
208*/ 208*/
209 struct drxjrs_errors { 209 struct drxjrs_errors {
210 u16 nr_bit_errors; 210 u16 nr_bit_errors;
211 /**< no of pre RS bit errors */ 211 /*< no of pre RS bit errors */
212 u16 nr_symbol_errors; 212 u16 nr_symbol_errors;
213 /**< no of pre RS symbol errors */ 213 /*< no of pre RS symbol errors */
214 u16 nr_packet_errors; 214 u16 nr_packet_errors;
215 /**< no of pre RS packet errors */ 215 /*< no of pre RS packet errors */
216 u16 nr_failures; 216 u16 nr_failures;
217 /**< no of post RS failures to decode */ 217 /*< no of post RS failures to decode */
218 u16 nr_snc_par_fail_count; 218 u16 nr_snc_par_fail_count;
219 /**< no of post RS bit erros */ 219 /*< no of post RS bit erros */
220 }; 220 };
221 221
222/** 222/*
223* /struct struct drxj_cfg_vsb_misc * symbol error rate 223* /struct struct drxj_cfg_vsb_misc * symbol error rate
224*/ 224*/
225 struct drxj_cfg_vsb_misc { 225 struct drxj_cfg_vsb_misc {
226 u32 symb_error; 226 u32 symb_error;
227 /**< symbol error rate sps */}; 227 /*< symbol error rate sps */};
228 228
229/** 229/*
230* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate. 230* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
231* 231*
232*/ 232*/
@@ -234,7 +234,7 @@ struct drxj_agc_status {
234 DRXJ_MPEG_START_WIDTH_1CLKCYC, 234 DRXJ_MPEG_START_WIDTH_1CLKCYC,
235 DRXJ_MPEG_START_WIDTH_8CLKCYC}; 235 DRXJ_MPEG_START_WIDTH_8CLKCYC};
236 236
237/** 237/*
238* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate. 238* /enum enum drxj_mpeg_output_clock_rate * Mpeg output clock rate.
239* 239*
240*/ 240*/
@@ -247,20 +247,20 @@ struct drxj_agc_status {
247 DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K, 247 DRXJ_MPEGOUTPUT_CLOCK_RATE_25313K,
248 DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K}; 248 DRXJ_MPEGOUTPUT_CLOCK_RATE_21696K};
249 249
250/** 250/*
251* /struct DRXJCfgMisc_t 251* /struct DRXJCfgMisc_t
252* Change TEI bit of MPEG output 252* Change TEI bit of MPEG output
253* reverse MPEG output bit order 253* reverse MPEG output bit order
254* set MPEG output clock rate 254* set MPEG output clock rate
255*/ 255*/
256 struct drxj_cfg_mpeg_output_misc { 256 struct drxj_cfg_mpeg_output_misc {
257 bool disable_tei_handling; /**< if true pass (not change) TEI bit */ 257 bool disable_tei_handling; /*< if true pass (not change) TEI bit */
258 bool bit_reverse_mpeg_outout; /**< if true, parallel: msb on MD0; serial: lsb out first */ 258 bool bit_reverse_mpeg_outout; /*< if true, parallel: msb on MD0; serial: lsb out first */
259 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate; 259 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
260 /**< set MPEG output clock rate that overwirtes the derived one from symbol rate */ 260 /*< set MPEG output clock rate that overwirtes the derived one from symbol rate */
261 enum drxj_mpeg_start_width mpeg_start_width; /**< set MPEG output start width */}; 261 enum drxj_mpeg_start_width mpeg_start_width; /*< set MPEG output start width */};
262 262
263/** 263/*
264* /enum enum drxj_xtal_freq * Supported external crystal reference frequency. 264* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
265*/ 265*/
266 enum drxj_xtal_freq { 266 enum drxj_xtal_freq {
@@ -269,21 +269,21 @@ struct drxj_agc_status {
269 DRXJ_XTAL_FREQ_20P25MHZ, 269 DRXJ_XTAL_FREQ_20P25MHZ,
270 DRXJ_XTAL_FREQ_4MHZ}; 270 DRXJ_XTAL_FREQ_4MHZ};
271 271
272/** 272/*
273* /enum enum drxj_xtal_freq * Supported external crystal reference frequency. 273* /enum enum drxj_xtal_freq * Supported external crystal reference frequency.
274*/ 274*/
275 enum drxji2c_speed { 275 enum drxji2c_speed {
276 DRXJ_I2C_SPEED_400KBPS, 276 DRXJ_I2C_SPEED_400KBPS,
277 DRXJ_I2C_SPEED_100KBPS}; 277 DRXJ_I2C_SPEED_100KBPS};
278 278
279/** 279/*
280* /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc... 280* /struct struct drxj_cfg_hw_cfg * Get hw configuration, such as crystal reference frequency, I2C speed, etc...
281*/ 281*/
282 struct drxj_cfg_hw_cfg { 282 struct drxj_cfg_hw_cfg {
283 enum drxj_xtal_freq xtal_freq; 283 enum drxj_xtal_freq xtal_freq;
284 /**< crystal reference frequency */ 284 /*< crystal reference frequency */
285 enum drxji2c_speed i2c_speed; 285 enum drxji2c_speed i2c_speed;
286 /**< 100 or 400 kbps */}; 286 /*< 100 or 400 kbps */};
287 287
288/* 288/*
289 * DRXJ_CFG_ATV_MISC 289 * DRXJ_CFG_ATV_MISC
@@ -352,7 +352,7 @@ struct drxj_cfg_oob_misc {
352 * DRXJ_CFG_ATV_OUTPUT 352 * DRXJ_CFG_ATV_OUTPUT
353 */ 353 */
354 354
355/** 355/*
356* /enum DRXJAttenuation_t 356* /enum DRXJAttenuation_t
357* Attenuation setting for SIF AGC. 357* Attenuation setting for SIF AGC.
358* 358*
@@ -363,7 +363,7 @@ struct drxj_cfg_oob_misc {
363 DRXJ_SIF_ATTENUATION_6DB, 363 DRXJ_SIF_ATTENUATION_6DB,
364 DRXJ_SIF_ATTENUATION_9DB}; 364 DRXJ_SIF_ATTENUATION_9DB};
365 365
366/** 366/*
367* /struct struct drxj_cfg_atv_output * SIF attenuation setting. 367* /struct struct drxj_cfg_atv_output * SIF attenuation setting.
368* 368*
369*/ 369*/
@@ -398,7 +398,7 @@ struct drxj_cfg_atv_output {
398/*============================================================================*/ 398/*============================================================================*/
399 399
400/*========================================*/ 400/*========================================*/
401/** 401/*
402* /struct struct drxj_data * DRXJ specific attributes. 402* /struct struct drxj_data * DRXJ specific attributes.
403* 403*
404* Global data container for DRXJ specific data. 404* Global data container for DRXJ specific data.
@@ -406,93 +406,93 @@ struct drxj_cfg_atv_output {
406*/ 406*/
407 struct drxj_data { 407 struct drxj_data {
408 /* device capabilties (determined during drx_open()) */ 408 /* device capabilties (determined during drx_open()) */
409 bool has_lna; /**< true if LNA (aka PGA) present */ 409 bool has_lna; /*< true if LNA (aka PGA) present */
410 bool has_oob; /**< true if OOB supported */ 410 bool has_oob; /*< true if OOB supported */
411 bool has_ntsc; /**< true if NTSC supported */ 411 bool has_ntsc; /*< true if NTSC supported */
412 bool has_btsc; /**< true if BTSC supported */ 412 bool has_btsc; /*< true if BTSC supported */
413 bool has_smatx; /**< true if mat_tx is available */ 413 bool has_smatx; /*< true if mat_tx is available */
414 bool has_smarx; /**< true if mat_rx is available */ 414 bool has_smarx; /*< true if mat_rx is available */
415 bool has_gpio; /**< true if GPIO is available */ 415 bool has_gpio; /*< true if GPIO is available */
416 bool has_irqn; /**< true if IRQN is available */ 416 bool has_irqn; /*< true if IRQN is available */
417 /* A1/A2/A... */ 417 /* A1/A2/A... */
418 u8 mfx; /**< metal fix */ 418 u8 mfx; /*< metal fix */
419 419
420 /* tuner settings */ 420 /* tuner settings */
421 bool mirror_freq_spect_oob;/**< tuner inversion (true = tuner mirrors the signal */ 421 bool mirror_freq_spect_oob;/*< tuner inversion (true = tuner mirrors the signal */
422 422
423 /* standard/channel settings */ 423 /* standard/channel settings */
424 enum drx_standard standard; /**< current standard information */ 424 enum drx_standard standard; /*< current standard information */
425 enum drx_modulation constellation; 425 enum drx_modulation constellation;
426 /**< current constellation */ 426 /*< current constellation */
427 s32 frequency; /**< center signal frequency in KHz */ 427 s32 frequency; /*< center signal frequency in KHz */
428 enum drx_bandwidth curr_bandwidth; 428 enum drx_bandwidth curr_bandwidth;
429 /**< current channel bandwidth */ 429 /*< current channel bandwidth */
430 enum drx_mirror mirror; /**< current channel mirror */ 430 enum drx_mirror mirror; /*< current channel mirror */
431 431
432 /* signal quality information */ 432 /* signal quality information */
433 u32 fec_bits_desired; /**< BER accounting period */ 433 u32 fec_bits_desired; /*< BER accounting period */
434 u16 fec_vd_plen; /**< no of trellis symbols: VD SER measurement period */ 434 u16 fec_vd_plen; /*< no of trellis symbols: VD SER measurement period */
435 u16 qam_vd_prescale; /**< Viterbi Measurement Prescale */ 435 u16 qam_vd_prescale; /*< Viterbi Measurement Prescale */
436 u16 qam_vd_period; /**< Viterbi Measurement period */ 436 u16 qam_vd_period; /*< Viterbi Measurement period */
437 u16 fec_rs_plen; /**< defines RS BER measurement period */ 437 u16 fec_rs_plen; /*< defines RS BER measurement period */
438 u16 fec_rs_prescale; /**< ReedSolomon Measurement Prescale */ 438 u16 fec_rs_prescale; /*< ReedSolomon Measurement Prescale */
439 u16 fec_rs_period; /**< ReedSolomon Measurement period */ 439 u16 fec_rs_period; /*< ReedSolomon Measurement period */
440 bool reset_pkt_err_acc; /**< Set a flag to reset accumulated packet error */ 440 bool reset_pkt_err_acc; /*< Set a flag to reset accumulated packet error */
441 u16 pkt_err_acc_start; /**< Set a flag to reset accumulated packet error */ 441 u16 pkt_err_acc_start; /*< Set a flag to reset accumulated packet error */
442 442
443 /* HI configuration */ 443 /* HI configuration */
444 u16 hi_cfg_timing_div; /**< HI Configure() parameter 2 */ 444 u16 hi_cfg_timing_div; /*< HI Configure() parameter 2 */
445 u16 hi_cfg_bridge_delay; /**< HI Configure() parameter 3 */ 445 u16 hi_cfg_bridge_delay; /*< HI Configure() parameter 3 */
446 u16 hi_cfg_wake_up_key; /**< HI Configure() parameter 4 */ 446 u16 hi_cfg_wake_up_key; /*< HI Configure() parameter 4 */
447 u16 hi_cfg_ctrl; /**< HI Configure() parameter 5 */ 447 u16 hi_cfg_ctrl; /*< HI Configure() parameter 5 */
448 u16 hi_cfg_transmit; /**< HI Configure() parameter 6 */ 448 u16 hi_cfg_transmit; /*< HI Configure() parameter 6 */
449 449
450 /* UIO configuration */ 450 /* UIO configuration */
451 enum drxuio_mode uio_sma_rx_mode;/**< current mode of SmaRx pin */ 451 enum drxuio_mode uio_sma_rx_mode;/*< current mode of SmaRx pin */
452 enum drxuio_mode uio_sma_tx_mode;/**< current mode of SmaTx pin */ 452 enum drxuio_mode uio_sma_tx_mode;/*< current mode of SmaTx pin */
453 enum drxuio_mode uio_gpio_mode; /**< current mode of ASEL pin */ 453 enum drxuio_mode uio_gpio_mode; /*< current mode of ASEL pin */
454 enum drxuio_mode uio_irqn_mode; /**< current mode of IRQN pin */ 454 enum drxuio_mode uio_irqn_mode; /*< current mode of IRQN pin */
455 455
456 /* IQM fs frequecy shift and inversion */ 456 /* IQM fs frequecy shift and inversion */
457 u32 iqm_fs_rate_ofs; /**< frequency shifter setting after setchannel */ 457 u32 iqm_fs_rate_ofs; /*< frequency shifter setting after setchannel */
458 bool pos_image; /**< Ture: positive image */ 458 bool pos_image; /*< Ture: positive image */
459 /* IQM RC frequecy shift */ 459 /* IQM RC frequecy shift */
460 u32 iqm_rc_rate_ofs; /**< frequency shifter setting after setchannel */ 460 u32 iqm_rc_rate_ofs; /*< frequency shifter setting after setchannel */
461 461
462 /* ATV configuration */ 462 /* ATV configuration */
463 u32 atv_cfg_changed_flags; /**< flag: flags cfg changes */ 463 u32 atv_cfg_changed_flags; /*< flag: flags cfg changes */
464 s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU0__A */ 464 s16 atv_top_equ0[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU0__A */
465 s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU1__A */ 465 s16 atv_top_equ1[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU1__A */
466 s16 atv_top_equ2[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU2__A */ 466 s16 atv_top_equ2[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU2__A */
467 s16 atv_top_equ3[DRXJ_COEF_IDX_MAX]; /**< shadow of ATV_TOP_EQU3__A */ 467 s16 atv_top_equ3[DRXJ_COEF_IDX_MAX]; /*< shadow of ATV_TOP_EQU3__A */
468 bool phase_correction_bypass;/**< flag: true=bypass */ 468 bool phase_correction_bypass;/*< flag: true=bypass */
469 s16 atv_top_vid_peak; /**< shadow of ATV_TOP_VID_PEAK__A */ 469 s16 atv_top_vid_peak; /*< shadow of ATV_TOP_VID_PEAK__A */
470 u16 atv_top_noise_th; /**< shadow of ATV_TOP_NOISE_TH__A */ 470 u16 atv_top_noise_th; /*< shadow of ATV_TOP_NOISE_TH__A */
471 bool enable_cvbs_output; /**< flag CVBS ouput enable */ 471 bool enable_cvbs_output; /*< flag CVBS ouput enable */
472 bool enable_sif_output; /**< flag SIF ouput enable */ 472 bool enable_sif_output; /*< flag SIF ouput enable */
473 enum drxjsif_attenuation sif_attenuation; 473 enum drxjsif_attenuation sif_attenuation;
474 /**< current SIF att setting */ 474 /*< current SIF att setting */
475 /* Agc configuration for QAM and VSB */ 475 /* Agc configuration for QAM and VSB */
476 struct drxj_cfg_agc qam_rf_agc_cfg; /**< qam RF AGC config */ 476 struct drxj_cfg_agc qam_rf_agc_cfg; /*< qam RF AGC config */
477 struct drxj_cfg_agc qam_if_agc_cfg; /**< qam IF AGC config */ 477 struct drxj_cfg_agc qam_if_agc_cfg; /*< qam IF AGC config */
478 struct drxj_cfg_agc vsb_rf_agc_cfg; /**< vsb RF AGC config */ 478 struct drxj_cfg_agc vsb_rf_agc_cfg; /*< vsb RF AGC config */
479 struct drxj_cfg_agc vsb_if_agc_cfg; /**< vsb IF AGC config */ 479 struct drxj_cfg_agc vsb_if_agc_cfg; /*< vsb IF AGC config */
480 480
481 /* PGA gain configuration for QAM and VSB */ 481 /* PGA gain configuration for QAM and VSB */
482 u16 qam_pga_cfg; /**< qam PGA config */ 482 u16 qam_pga_cfg; /*< qam PGA config */
483 u16 vsb_pga_cfg; /**< vsb PGA config */ 483 u16 vsb_pga_cfg; /*< vsb PGA config */
484 484
485 /* Pre SAW configuration for QAM and VSB */ 485 /* Pre SAW configuration for QAM and VSB */
486 struct drxj_cfg_pre_saw qam_pre_saw_cfg; 486 struct drxj_cfg_pre_saw qam_pre_saw_cfg;
487 /**< qam pre SAW config */ 487 /*< qam pre SAW config */
488 struct drxj_cfg_pre_saw vsb_pre_saw_cfg; 488 struct drxj_cfg_pre_saw vsb_pre_saw_cfg;
489 /**< qam pre SAW config */ 489 /*< qam pre SAW config */
490 490
491 /* Version information */ 491 /* Version information */
492 char v_text[2][12]; /**< allocated text versions */ 492 char v_text[2][12]; /*< allocated text versions */
493 struct drx_version v_version[2]; /**< allocated versions structs */ 493 struct drx_version v_version[2]; /*< allocated versions structs */
494 struct drx_version_list v_list_elements[2]; 494 struct drx_version_list v_list_elements[2];
495 /**< allocated version list */ 495 /*< allocated version list */
496 496
497 /* smart antenna configuration */ 497 /* smart antenna configuration */
498 bool smart_ant_inverted; 498 bool smart_ant_inverted;
@@ -502,25 +502,25 @@ struct drxj_cfg_atv_output {
502 bool oob_power_on; 502 bool oob_power_on;
503 503
504 /* MPEG static bitrate setting */ 504 /* MPEG static bitrate setting */
505 u32 mpeg_ts_static_bitrate; /**< bitrate static MPEG output */ 505 u32 mpeg_ts_static_bitrate; /*< bitrate static MPEG output */
506 bool disable_te_ihandling; /**< MPEG TS TEI handling */ 506 bool disable_te_ihandling; /*< MPEG TS TEI handling */
507 bool bit_reverse_mpeg_outout;/**< MPEG output bit order */ 507 bool bit_reverse_mpeg_outout;/*< MPEG output bit order */
508 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate; 508 enum drxj_mpeg_output_clock_rate mpeg_output_clock_rate;
509 /**< MPEG output clock rate */ 509 /*< MPEG output clock rate */
510 enum drxj_mpeg_start_width mpeg_start_width; 510 enum drxj_mpeg_start_width mpeg_start_width;
511 /**< MPEG Start width */ 511 /*< MPEG Start width */
512 512
513 /* Pre SAW & Agc configuration for ATV */ 513 /* Pre SAW & Agc configuration for ATV */
514 struct drxj_cfg_pre_saw atv_pre_saw_cfg; 514 struct drxj_cfg_pre_saw atv_pre_saw_cfg;
515 /**< atv pre SAW config */ 515 /*< atv pre SAW config */
516 struct drxj_cfg_agc atv_rf_agc_cfg; /**< atv RF AGC config */ 516 struct drxj_cfg_agc atv_rf_agc_cfg; /*< atv RF AGC config */
517 struct drxj_cfg_agc atv_if_agc_cfg; /**< atv IF AGC config */ 517 struct drxj_cfg_agc atv_if_agc_cfg; /*< atv IF AGC config */
518 u16 atv_pga_cfg; /**< atv pga config */ 518 u16 atv_pga_cfg; /*< atv pga config */
519 519
520 u32 curr_symbol_rate; 520 u32 curr_symbol_rate;
521 521
522 /* pin-safe mode */ 522 /* pin-safe mode */
523 bool pdr_safe_mode; /**< PDR safe mode activated */ 523 bool pdr_safe_mode; /*< PDR safe mode activated */
524 u16 pdr_safe_restore_val_gpio; 524 u16 pdr_safe_restore_val_gpio;
525 u16 pdr_safe_restore_val_v_sync; 525 u16 pdr_safe_restore_val_v_sync;
526 u16 pdr_safe_restore_val_sma_rx; 526 u16 pdr_safe_restore_val_sma_rx;
@@ -531,12 +531,12 @@ struct drxj_cfg_atv_output {
531 enum drxj_cfg_oob_lo_power oob_lo_pow; 531 enum drxj_cfg_oob_lo_power oob_lo_pow;
532 532
533 struct drx_aud_data aud_data; 533 struct drx_aud_data aud_data;
534 /**< audio storage */}; 534 /*< audio storage */};
535 535
536/*------------------------------------------------------------------------- 536/*-------------------------------------------------------------------------
537Access MACROS 537Access MACROS
538-------------------------------------------------------------------------*/ 538-------------------------------------------------------------------------*/
539/** 539/*
540* \brief Compilable references to attributes 540* \brief Compilable references to attributes
541* \param d pointer to demod instance 541* \param d pointer to demod instance
542* 542*
@@ -554,7 +554,7 @@ Access MACROS
554DEFINES 554DEFINES
555-------------------------------------------------------------------------*/ 555-------------------------------------------------------------------------*/
556 556
557/** 557/*
558* \def DRXJ_NTSC_CARRIER_FREQ_OFFSET 558* \def DRXJ_NTSC_CARRIER_FREQ_OFFSET
559* \brief Offset from picture carrier to centre frequency in kHz, in RF domain 559* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
560* 560*
@@ -569,7 +569,7 @@ DEFINES
569*/ 569*/
570#define DRXJ_NTSC_CARRIER_FREQ_OFFSET ((s32)(1750)) 570#define DRXJ_NTSC_CARRIER_FREQ_OFFSET ((s32)(1750))
571 571
572/** 572/*
573* \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET 573* \def DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET
574* \brief Offset from picture carrier to centre frequency in kHz, in RF domain 574* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
575* 575*
@@ -585,7 +585,7 @@ DEFINES
585*/ 585*/
586#define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET ((s32)(2375)) 586#define DRXJ_PAL_SECAM_BG_CARRIER_FREQ_OFFSET ((s32)(2375))
587 587
588/** 588/*
589* \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET 589* \def DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET
590* \brief Offset from picture carrier to centre frequency in kHz, in RF domain 590* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
591* 591*
@@ -601,7 +601,7 @@ DEFINES
601*/ 601*/
602#define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775)) 602#define DRXJ_PAL_SECAM_DKIL_CARRIER_FREQ_OFFSET ((s32)(2775))
603 603
604/** 604/*
605* \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET 605* \def DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET
606* \brief Offset from picture carrier to centre frequency in kHz, in RF domain 606* \brief Offset from picture carrier to centre frequency in kHz, in RF domain
607* 607*
@@ -616,7 +616,7 @@ DEFINES
616*/ 616*/
617#define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET ((s32)(-3255)) 617#define DRXJ_PAL_SECAM_LP_CARRIER_FREQ_OFFSET ((s32)(-3255))
618 618
619/** 619/*
620* \def DRXJ_FM_CARRIER_FREQ_OFFSET 620* \def DRXJ_FM_CARRIER_FREQ_OFFSET
621* \brief Offset from sound carrier to centre frequency in kHz, in RF domain 621* \brief Offset from sound carrier to centre frequency in kHz, in RF domain
622* 622*
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index eb9bdc9f59c4..b16fedbb53a3 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -20,17 +20,18 @@
20 * @antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1 20 * @antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1
21 * means that 1=DVBC, 0 = DVBT. Zero means the opposite. 21 * means that 1=DVBC, 0 = DVBT. Zero means the opposite.
22 * @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength. 22 * @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength.
23 * @chunk_size: maximum size for I2C messages
23 * @microcode_name: Name of the firmware file with the microcode 24 * @microcode_name: Name of the firmware file with the microcode
24 * @qam_demod_parameter_count: The number of parameters used for the command 25 * @qam_demod_parameter_count: The number of parameters used for the command
25 * to set the demodulator parameters. All 26 * to set the demodulator parameters. All
26 * firmwares are using the 2-parameter commmand. 27 * firmwares are using the 2-parameter commmand.
27 * An exception is the "drxk_a3.mc" firmware, 28 * An exception is the ``drxk_a3.mc`` firmware,
28 * which uses the 4-parameter command. 29 * which uses the 4-parameter command.
29 * A value of 0 (default) or lower indicates that 30 * A value of 0 (default) or lower indicates that
30 * the correct number of parameters will be 31 * the correct number of parameters will be
31 * automatically detected. 32 * automatically detected.
32 * 33 *
33 * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is 34 * On the ``*_gpio`` vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
34 * UIO-3. 35 * UIO-3.
35 */ 36 */
36struct drxk_config { 37struct drxk_config {
@@ -52,6 +53,14 @@ struct drxk_config {
52}; 53};
53 54
54#if IS_REACHABLE(CONFIG_DVB_DRXK) 55#if IS_REACHABLE(CONFIG_DVB_DRXK)
56/**
57 * Attach a drxk demod
58 *
59 * @config: pointer to &struct drxk_config with demod configuration.
60 * @i2c: i2c adapter to use.
61 *
62 * return: FE pointer on success, NULL on failure.
63 */
55extern struct dvb_frontend *drxk_attach(const struct drxk_config *config, 64extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
56 struct i2c_adapter *i2c); 65 struct i2c_adapter *i2c);
57#else 66#else
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 48a8aad47a74..f59ac2e91c59 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -207,9 +207,9 @@ static inline u32 log10times100(u32 value)
207 return (100L * intlog10(value)) >> 24; 207 return (100L * intlog10(value)) >> 24;
208} 208}
209 209
210/****************************************************************************/ 210/***************************************************************************/
211/* I2C **********************************************************************/ 211/* I2C **********************************************************************/
212/****************************************************************************/ 212/***************************************************************************/
213 213
214static int drxk_i2c_lock(struct drxk_state *state) 214static int drxk_i2c_lock(struct drxk_state *state)
215{ 215{
@@ -3444,7 +3444,7 @@ error:
3444 3444
3445/*============================================================================*/ 3445/*============================================================================*/
3446 3446
3447/** 3447/*
3448* \brief Activate DVBT specific presets 3448* \brief Activate DVBT specific presets
3449* \param demod instance of demodulator. 3449* \param demod instance of demodulator.
3450* \return DRXStatus_t. 3450* \return DRXStatus_t.
@@ -3484,7 +3484,7 @@ error:
3484 3484
3485/*============================================================================*/ 3485/*============================================================================*/
3486 3486
3487/** 3487/*
3488* \brief Initialize channelswitch-independent settings for DVBT. 3488* \brief Initialize channelswitch-independent settings for DVBT.
3489* \param demod instance of demodulator. 3489* \param demod instance of demodulator.
3490* \return DRXStatus_t. 3490* \return DRXStatus_t.
@@ -3696,7 +3696,7 @@ error:
3696} 3696}
3697 3697
3698/*============================================================================*/ 3698/*============================================================================*/
3699/** 3699/*
3700* \brief start dvbt demodulating for channel. 3700* \brief start dvbt demodulating for channel.
3701* \param demod instance of demodulator. 3701* \param demod instance of demodulator.
3702* \return DRXStatus_t. 3702* \return DRXStatus_t.
@@ -3732,7 +3732,7 @@ error:
3732 3732
3733/*============================================================================*/ 3733/*============================================================================*/
3734 3734
3735/** 3735/*
3736* \brief Set up dvbt demodulator for channel. 3736* \brief Set up dvbt demodulator for channel.
3737* \param demod instance of demodulator. 3737* \param demod instance of demodulator.
3738* \return DRXStatus_t. 3738* \return DRXStatus_t.
@@ -4086,7 +4086,7 @@ error:
4086 4086
4087/*============================================================================*/ 4087/*============================================================================*/
4088 4088
4089/** 4089/*
4090* \brief Retrieve lock status . 4090* \brief Retrieve lock status .
4091* \param demod Pointer to demodulator instance. 4091* \param demod Pointer to demodulator instance.
4092* \param lockStat Pointer to lock status structure. 4092* \param lockStat Pointer to lock status structure.
@@ -4148,7 +4148,7 @@ static int power_up_qam(struct drxk_state *state)
4148} 4148}
4149 4149
4150 4150
4151/** Power Down QAM */ 4151/* Power Down QAM */
4152static int power_down_qam(struct drxk_state *state) 4152static int power_down_qam(struct drxk_state *state)
4153{ 4153{
4154 u16 data = 0; 4154 u16 data = 0;
@@ -4186,7 +4186,7 @@ error:
4186 4186
4187/*============================================================================*/ 4187/*============================================================================*/
4188 4188
4189/** 4189/*
4190* \brief Setup of the QAM Measurement intervals for signal quality 4190* \brief Setup of the QAM Measurement intervals for signal quality
4191* \param demod instance of demod. 4191* \param demod instance of demod.
4192* \param modulation current modulation. 4192* \param modulation current modulation.
@@ -4461,7 +4461,7 @@ error:
4461 4461
4462/*============================================================================*/ 4462/*============================================================================*/
4463 4463
4464/** 4464/*
4465* \brief QAM32 specific setup 4465* \brief QAM32 specific setup
4466* \param demod instance of demod. 4466* \param demod instance of demod.
4467* \return DRXStatus_t. 4467* \return DRXStatus_t.
@@ -4657,7 +4657,7 @@ error:
4657 4657
4658/*============================================================================*/ 4658/*============================================================================*/
4659 4659
4660/** 4660/*
4661* \brief QAM64 specific setup 4661* \brief QAM64 specific setup
4662* \param demod instance of demod. 4662* \param demod instance of demod.
4663* \return DRXStatus_t. 4663* \return DRXStatus_t.
@@ -4852,7 +4852,7 @@ error:
4852 4852
4853/*============================================================================*/ 4853/*============================================================================*/
4854 4854
4855/** 4855/*
4856* \brief QAM128 specific setup 4856* \brief QAM128 specific setup
4857* \param demod: instance of demod. 4857* \param demod: instance of demod.
4858* \return DRXStatus_t. 4858* \return DRXStatus_t.
@@ -5049,7 +5049,7 @@ error:
5049 5049
5050/*============================================================================*/ 5050/*============================================================================*/
5051 5051
5052/** 5052/*
5053* \brief QAM256 specific setup 5053* \brief QAM256 specific setup
5054* \param demod: instance of demod. 5054* \param demod: instance of demod.
5055* \return DRXStatus_t. 5055* \return DRXStatus_t.
@@ -5244,7 +5244,7 @@ error:
5244 5244
5245 5245
5246/*============================================================================*/ 5246/*============================================================================*/
5247/** 5247/*
5248* \brief Reset QAM block. 5248* \brief Reset QAM block.
5249* \param demod: instance of demod. 5249* \param demod: instance of demod.
5250* \param channel: pointer to channel data. 5250* \param channel: pointer to channel data.
@@ -5272,7 +5272,7 @@ error:
5272 5272
5273/*============================================================================*/ 5273/*============================================================================*/
5274 5274
5275/** 5275/*
5276* \brief Set QAM symbolrate. 5276* \brief Set QAM symbolrate.
5277* \param demod: instance of demod. 5277* \param demod: instance of demod.
5278* \param channel: pointer to channel data. 5278* \param channel: pointer to channel data.
@@ -5341,7 +5341,7 @@ error:
5341 5341
5342/*============================================================================*/ 5342/*============================================================================*/
5343 5343
5344/** 5344/*
5345* \brief Get QAM lock status. 5345* \brief Get QAM lock status.
5346* \param demod: instance of demod. 5346* \param demod: instance of demod.
5347* \param channel: pointer to channel data. 5347* \param channel: pointer to channel data.
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index 6aaa9c6bff9c..212e0730f154 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -30,16 +30,17 @@
30#define DVB_PLL_TDEE4 18 30#define DVB_PLL_TDEE4 18
31#define DVB_PLL_THOMSON_DTT7520X 19 31#define DVB_PLL_THOMSON_DTT7520X 19
32 32
33#if IS_REACHABLE(CONFIG_DVB_PLL)
33/** 34/**
34 * Attach a dvb-pll to the supplied frontend structure. 35 * Attach a dvb-pll to the supplied frontend structure.
35 * 36 *
36 * @param fe Frontend to attach to. 37 * @fe: Frontend to attach to.
37 * @param pll_addr i2c address of the PLL (if used). 38 * @pll_addr: i2c address of the PLL (if used).
38 * @param i2c i2c adapter to use (set to NULL if not used). 39 * @i2c: i2c adapter to use (set to NULL if not used).
39 * @param pll_desc_id dvb_pll_desc to use. 40 * @pll_desc_id: dvb_pll_desc to use.
40 * @return Frontend pointer on success, NULL on failure 41 *
42 * return: Frontend pointer on success, NULL on failure
41 */ 43 */
42#if IS_REACHABLE(CONFIG_DVB_PLL)
43extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, 44extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
44 int pll_addr, 45 int pll_addr,
45 struct i2c_adapter *i2c, 46 struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index 333615491d9e..c9fc81c7e4e7 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -38,6 +38,7 @@ enum helene_xtal {
38 * @set_tuner_priv: Callback function private context 38 * @set_tuner_priv: Callback function private context
39 * @set_tuner_callback: Callback function that notifies the parent driver 39 * @set_tuner_callback: Callback function that notifies the parent driver
40 * which tuner is active now 40 * which tuner is active now
41 * @xtal: Cristal frequency as described by &enum helene_xtal
41 */ 42 */
42struct helene_config { 43struct helene_config {
43 u8 i2c_address; 44 u8 i2c_address;
@@ -48,9 +49,31 @@ struct helene_config {
48}; 49};
49 50
50#if IS_REACHABLE(CONFIG_DVB_HELENE) 51#if IS_REACHABLE(CONFIG_DVB_HELENE)
52/**
53 * Attach a helene tuner (terrestrial and cable standards)
54 *
55 * @fe: frontend to be attached
56 * @config: pointer to &struct helene_config with tuner configuration.
57 * @i2c: i2c adapter to use.
58 *
59 * return: FE pointer on success, NULL on failure.
60 */
51extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe, 61extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
52 const struct helene_config *config, 62 const struct helene_config *config,
53 struct i2c_adapter *i2c); 63 struct i2c_adapter *i2c);
64
65/**
66 * Attach a helene tuner (satellite standards)
67 *
68 * @fe: frontend to be attached
69 * @config: pointer to &struct helene_config with tuner configuration.
70 * @i2c: i2c adapter to use.
71 *
72 * return: FE pointer on success, NULL on failure.
73 */
74extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
75 const struct helene_config *config,
76 struct i2c_adapter *i2c);
54#else 77#else
55static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe, 78static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
56 const struct helene_config *config, 79 const struct helene_config *config,
@@ -59,13 +82,6 @@ static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
59 pr_warn("%s: driver disabled by Kconfig\n", __func__); 82 pr_warn("%s: driver disabled by Kconfig\n", __func__);
60 return NULL; 83 return NULL;
61} 84}
62#endif
63
64#if IS_REACHABLE(CONFIG_DVB_HELENE)
65extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
66 const struct helene_config *config,
67 struct i2c_adapter *i2c);
68#else
69static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe, 85static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
70 const struct helene_config *config, 86 const struct helene_config *config,
71 struct i2c_adapter *i2c) 87 struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index 672a556df71a..9157fd037e2f 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -41,6 +41,15 @@ struct horus3a_config {
41}; 41};
42 42
43#if IS_REACHABLE(CONFIG_DVB_HORUS3A) 43#if IS_REACHABLE(CONFIG_DVB_HORUS3A)
44/**
45 * Attach a horus3a tuner
46 *
47 * @fe: frontend to be attached
48 * @config: pointer to &struct helene_config with tuner configuration.
49 * @i2c: i2c adapter to use.
50 *
51 * return: FE pointer on success, NULL on failure.
52 */
44extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, 53extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
45 const struct horus3a_config *config, 54 const struct horus3a_config *config,
46 struct i2c_adapter *i2c); 55 struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 534b24fa2b95..965012ad5c59 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -1,4 +1,4 @@
1/** 1/*
2 * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner 2 * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
3 * 3 *
4 * Copyright (C) 2010 Malcolm Priestley 4 * Copyright (C) 2010 Malcolm Priestley
@@ -36,7 +36,7 @@ struct ix2505v_state {
36 u32 frequency; 36 u32 frequency;
37}; 37};
38 38
39/** 39/*
40 * Data read format of the Sharp IX2505V B0017 40 * Data read format of the Sharp IX2505V B0017
41 * 41 *
42 * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1 42 * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1
@@ -99,7 +99,7 @@ static void ix2505v_release(struct dvb_frontend *fe)
99 99
100} 100}
101 101
102/** 102/*
103 * Data write format of the Sharp IX2505V B0017 103 * Data write format of the Sharp IX2505V B0017
104 * 104 *
105 * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0 105 * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index 0b0a431c74f6..49ed93e754ed 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -20,31 +20,33 @@
20#include "dvb_frontend.h" 20#include "dvb_frontend.h"
21 21
22/** 22/**
23 * Attach a ix2505v tuner to the supplied frontend structure. 23 * struct ix2505v_config - ix2505 attachment configuration
24 * 24 *
25 * @param fe Frontend to attach to. 25 * @tuner_address: tuner address
26 * @param config ix2505v_config structure 26 * @tuner_gain: Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB
27 * @return FE pointer on success, NULL on failure. 27 * @tuner_chargepump: Charge pump output +/- 0=120 1=260 2=555 3=1200(default)
28 * @min_delay_ms: delay after tune
29 * @tuner_write_only: disables reads
28 */ 30 */
29
30struct ix2505v_config { 31struct ix2505v_config {
31 u8 tuner_address; 32 u8 tuner_address;
32
33 /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
34 u8 tuner_gain; 33 u8 tuner_gain;
35
36 /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
37 u8 tuner_chargepump; 34 u8 tuner_chargepump;
38
39 /* delay after tune */
40 int min_delay_ms; 35 int min_delay_ms;
41
42 /* disables reads*/
43 u8 tuner_write_only; 36 u8 tuner_write_only;
44 37
45}; 38};
46 39
47#if IS_REACHABLE(CONFIG_DVB_IX2505V) 40#if IS_REACHABLE(CONFIG_DVB_IX2505V)
41/**
42 * Attach a ix2505v tuner to the supplied frontend structure.
43 *
44 * @fe: Frontend to attach to.
45 * @config: pointer to &struct ix2505v_config
46 * @i2c: pointer to &struct i2c_adapter.
47 *
48 * return: FE pointer on success, NULL on failure.
49 */
48extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe, 50extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
49 const struct ix2505v_config *config, struct i2c_adapter *i2c); 51 const struct ix2505v_config *config, struct i2c_adapter *i2c);
50#else 52#else
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 68923c84679a..e5a6c1766664 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -517,7 +517,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
517 state->i2c = i2c; 517 state->i2c = i2c;
518 state->first = 1; 518 state->first = 1;
519 519
520 /** 520 /*
521 * the L64781 won't show up before we send the reset_and_configure() 521 * the L64781 won't show up before we send the reset_and_configure()
522 * broadcast. If nothing responds there is no L64781 on the bus... 522 * broadcast. If nothing responds there is no L64781 on the bus...
523 */ 523 */
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
index 04b355a005fb..1a8964a2265d 100644
--- a/drivers/media/dvb-frontends/m88ds3103.h
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -25,6 +25,34 @@
25 */ 25 */
26 26
27/** 27/**
28 * enum m88ds3103_ts_mode - TS connection mode
29 * @M88DS3103_TS_SERIAL: TS output pin D0, normal
30 * @M88DS3103_TS_SERIAL_D7: TS output pin D7
31 * @M88DS3103_TS_PARALLEL: TS Parallel mode
32 * @M88DS3103_TS_CI: TS CI Mode
33 */
34enum m88ds3103_ts_mode {
35 M88DS3103_TS_SERIAL,
36 M88DS3103_TS_SERIAL_D7,
37 M88DS3103_TS_PARALLEL,
38 M88DS3103_TS_CI
39};
40
41/**
42 * enum m88ds3103_clock_out
43 * @M88DS3103_CLOCK_OUT_DISABLED: Clock output is disabled
44 * @M88DS3103_CLOCK_OUT_ENABLED: Clock output is enabled with crystal
45 * clock.
46 * @M88DS3103_CLOCK_OUT_ENABLED_DIV2: Clock output is enabled with half
47 * crystal clock.
48 */
49enum m88ds3103_clock_out {
50 M88DS3103_CLOCK_OUT_DISABLED,
51 M88DS3103_CLOCK_OUT_ENABLED,
52 M88DS3103_CLOCK_OUT_ENABLED_DIV2
53};
54
55/**
28 * struct m88ds3103_platform_data - Platform data for the m88ds3103 driver 56 * struct m88ds3103_platform_data - Platform data for the m88ds3103 driver
29 * @clk: Clock frequency. 57 * @clk: Clock frequency.
30 * @i2c_wr_max: Max bytes I2C adapter can write at once. 58 * @i2c_wr_max: Max bytes I2C adapter can write at once.
@@ -44,24 +72,16 @@
44 * @get_dvb_frontend: Get DVB frontend. 72 * @get_dvb_frontend: Get DVB frontend.
45 * @get_i2c_adapter: Get I2C adapter. 73 * @get_i2c_adapter: Get I2C adapter.
46 */ 74 */
47
48struct m88ds3103_platform_data { 75struct m88ds3103_platform_data {
49 u32 clk; 76 u32 clk;
50 u16 i2c_wr_max; 77 u16 i2c_wr_max;
51#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */ 78 enum m88ds3103_ts_mode ts_mode;
52#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
53#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
54#define M88DS3103_TS_CI 3 /* TS CI Mode */
55 u8 ts_mode:2;
56 u32 ts_clk; 79 u32 ts_clk;
80 enum m88ds3103_clock_out clk_out;
57 u8 ts_clk_pol:1; 81 u8 ts_clk_pol:1;
58 u8 spec_inv:1; 82 u8 spec_inv:1;
59 u8 agc; 83 u8 agc;
60 u8 agc_inv:1; 84 u8 agc_inv:1;
61#define M88DS3103_CLOCK_OUT_DISABLED 0
62#define M88DS3103_CLOCK_OUT_ENABLED 1
63#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
64 u8 clk_out:2;
65 u8 envelope_mode:1; 85 u8 envelope_mode:1;
66 u8 lnb_hv_pol:1; 86 u8 lnb_hv_pol:1;
67 u8 lnb_en_pol:1; 87 u8 lnb_en_pol:1;
@@ -73,105 +93,60 @@ struct m88ds3103_platform_data {
73 u8 attach_in_use:1; 93 u8 attach_in_use:1;
74}; 94};
75 95
76/* 96/**
77 * Do not add new m88ds3103_attach() users! Use I2C bindings instead. 97 * struct m88ds3103_config - m88ds3102 configuration
98 *
99 * @i2c_addr: I2C address. Default: none, must set. Example: 0x68, ...
100 * @clock: Device's clock. Default: none, must set. Example: 27000000
101 * @i2c_wr_max: Max bytes I2C provider is asked to write at once.
102 * Default: none, must set. Example: 33, 65, ...
103 * @ts_mode: TS output mode, as defined by &enum m88ds3103_ts_mode.
104 * Default: M88DS3103_TS_SERIAL.
105 * @ts_clk: TS clk in KHz. Default: 0.
106 * @ts_clk_pol: TS clk polarity.Default: 0.
107 * 1-active at falling edge; 0-active at rising edge.
108 * @spec_inv: Spectrum inversion. Default: 0.
109 * @agc_inv: AGC polarity. Default: 0.
110 * @clock_out: Clock output, as defined by &enum m88ds3103_clock_out.
111 * Default: M88DS3103_CLOCK_OUT_DISABLED.
112 * @envelope_mode: DiSEqC envelope mode. Default: 0.
113 * @agc: AGC configuration. Default: none, must set.
114 * @lnb_hv_pol: LNB H/V pin polarity. Default: 0. Values:
115 * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18;
116 * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
117 * @lnb_en_pol: LNB enable pin polarity. Default: 0. Values:
118 * 1: pin high to enable, pin low to disable;
119 * 0: pin high to disable, pin low to enable.
78 */ 120 */
79struct m88ds3103_config { 121struct m88ds3103_config {
80 /*
81 * I2C address
82 * Default: none, must set
83 * 0x68, ...
84 */
85 u8 i2c_addr; 122 u8 i2c_addr;
86
87 /*
88 * clock
89 * Default: none, must set
90 * 27000000
91 */
92 u32 clock; 123 u32 clock;
93
94 /*
95 * max bytes I2C provider is asked to write at once
96 * Default: none, must set
97 * 33, 65, ...
98 */
99 u16 i2c_wr_max; 124 u16 i2c_wr_max;
100
101 /*
102 * TS output mode
103 * Default: M88DS3103_TS_SERIAL
104 */
105#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
106#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
107#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
108#define M88DS3103_TS_CI 3 /* TS CI Mode */
109 u8 ts_mode; 125 u8 ts_mode;
110
111 /*
112 * TS clk in KHz
113 * Default: 0.
114 */
115 u32 ts_clk; 126 u32 ts_clk;
116
117 /*
118 * TS clk polarity.
119 * Default: 0. 1-active at falling edge; 0-active at rising edge.
120 */
121 u8 ts_clk_pol:1; 127 u8 ts_clk_pol:1;
122
123 /*
124 * spectrum inversion
125 * Default: 0
126 */
127 u8 spec_inv:1; 128 u8 spec_inv:1;
128
129 /*
130 * AGC polarity
131 * Default: 0
132 */
133 u8 agc_inv:1; 129 u8 agc_inv:1;
134
135 /*
136 * clock output
137 * Default: M88DS3103_CLOCK_OUT_DISABLED
138 */
139#define M88DS3103_CLOCK_OUT_DISABLED 0
140#define M88DS3103_CLOCK_OUT_ENABLED 1
141#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
142 u8 clock_out; 130 u8 clock_out;
143
144 /*
145 * DiSEqC envelope mode
146 * Default: 0
147 */
148 u8 envelope_mode:1; 131 u8 envelope_mode:1;
149
150 /*
151 * AGC configuration
152 * Default: none, must set
153 */
154 u8 agc; 132 u8 agc;
155
156 /*
157 * LNB H/V pin polarity
158 * Default: 0.
159 * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
160 * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
161 */
162 u8 lnb_hv_pol:1; 133 u8 lnb_hv_pol:1;
163
164 /*
165 * LNB enable pin polarity
166 * Default: 0.
167 * 1: pin high to enable, pin low to disable.
168 * 0: pin high to disable, pin low to enable.
169 */
170 u8 lnb_en_pol:1; 134 u8 lnb_en_pol:1;
171}; 135};
172 136
173#if defined(CONFIG_DVB_M88DS3103) || \ 137#if defined(CONFIG_DVB_M88DS3103) || \
174 (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE)) 138 (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
139/**
140 * Attach a m88ds3103 demod
141 *
142 * @config: pointer to &struct m88ds3103_config with demod configuration.
143 * @i2c: i2c adapter to use.
144 * @tuner_i2c: on success, returns the I2C adapter associated with
145 * m88ds3103 tuner.
146 *
147 * return: FE pointer on success, NULL on failure.
148 * Note: Do not add new m88ds3103_attach() users! Use I2C bindings instead.
149 */
175extern struct dvb_frontend *m88ds3103_attach( 150extern struct dvb_frontend *m88ds3103_attach(
176 const struct m88ds3103_config *config, 151 const struct m88ds3103_config *config,
177 struct i2c_adapter *i2c, 152 struct i2c_adapter *i2c,
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index dfb02db2126c..05c9725d1c5f 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -26,7 +26,6 @@
26 * @demod_address: the demodulator's i2c address 26 * @demod_address: the demodulator's i2c address
27 * @is_serial: if true, TS is serial. Otherwise, TS is parallel 27 * @is_serial: if true, TS is serial. Otherwise, TS is parallel
28 */ 28 */
29
30struct mb86a20s_config { 29struct mb86a20s_config {
31 u32 fclk; 30 u32 fclk;
32 u8 demod_address; 31 u8 demod_address;
@@ -34,9 +33,17 @@ struct mb86a20s_config {
34}; 33};
35 34
36#if IS_REACHABLE(CONFIG_DVB_MB86A20S) 35#if IS_REACHABLE(CONFIG_DVB_MB86A20S)
36/**
37 * Attach a mb86a20s demod
38 *
39 * @config: pointer to &struct mb86a20s_config with demod configuration.
40 * @i2c: i2c adapter to use.
41 *
42 * return: FE pointer on success, NULL on failure.
43 */
37extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, 44extern struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
38 struct i2c_adapter *i2c); 45 struct i2c_adapter *i2c);
39extern struct i2c_adapter *mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *); 46
40#else 47#else
41static inline struct dvb_frontend *mb86a20s_attach( 48static inline struct dvb_frontend *mb86a20s_attach(
42 const struct mb86a20s_config *config, struct i2c_adapter *i2c) 49 const struct mb86a20s_config *config, struct i2c_adapter *i2c)
@@ -44,12 +51,6 @@ static inline struct dvb_frontend *mb86a20s_attach(
44 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 51 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
45 return NULL; 52 return NULL;
46} 53}
47static inline struct i2c_adapter *
48 mb86a20s_get_tuner_i2c_adapter(struct dvb_frontend *fe)
49{
50 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
51 return NULL;
52}
53#endif 54#endif
54 55
55#endif /* MB86A20S */ 56#endif /* MB86A20S */
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
index 323632523876..8cd5ef61903b 100644
--- a/drivers/media/dvb-frontends/mn88472.h
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -19,21 +19,21 @@
19 19
20#include <linux/dvb/frontend.h> 20#include <linux/dvb/frontend.h>
21 21
22/* Define old names for backward compatibility */
23#define VARIABLE_TS_CLOCK MN88472_TS_CLK_VARIABLE
24#define FIXED_TS_CLOCK MN88472_TS_CLK_FIXED
25#define SERIAL_TS_MODE MN88472_TS_MODE_SERIAL
26#define PARALLEL_TS_MODE MN88472_TS_MODE_PARALLEL
27
22/** 28/**
23 * struct mn88472_config - Platform data for the mn88472 driver 29 * struct mn88472_config - Platform data for the mn88472 driver
24 * @xtal: Clock frequency. 30 * @xtal: Clock frequency.
25 * @ts_mode: TS mode. 31 * @ts_mode: TS mode.
26 * @ts_clock: TS clock config. 32 * @ts_clock: TS clock config.
27 * @i2c_wr_max: Max number of bytes driver writes to I2C at once. 33 * @i2c_wr_max: Max number of bytes driver writes to I2C at once.
28 * @get_dvb_frontend: Get DVB frontend. 34 * @fe: pointer to a frontend pointer
35 * @get_dvb_frontend: Get DVB frontend callback.
29 */ 36 */
30
31/* Define old names for backward compatibility */
32#define VARIABLE_TS_CLOCK MN88472_TS_CLK_VARIABLE
33#define FIXED_TS_CLOCK MN88472_TS_CLK_FIXED
34#define SERIAL_TS_MODE MN88472_TS_MODE_SERIAL
35#define PARALLEL_TS_MODE MN88472_TS_MODE_PARALLEL
36
37struct mn88472_config { 37struct mn88472_config {
38 unsigned int xtal; 38 unsigned int xtal;
39 39
diff --git a/drivers/media/dvb-frontends/rtl2830.h b/drivers/media/dvb-frontends/rtl2830.h
index 0cde151e6608..458ac94e8a8b 100644
--- a/drivers/media/dvb-frontends/rtl2830.h
+++ b/drivers/media/dvb-frontends/rtl2830.h
@@ -32,7 +32,6 @@
32 * @pid_filter: Set PID to PID filter. 32 * @pid_filter: Set PID to PID filter.
33 * @pid_filter_ctrl: Control PID filter. 33 * @pid_filter_ctrl: Control PID filter.
34 */ 34 */
35
36struct rtl2830_platform_data { 35struct rtl2830_platform_data {
37 u32 clk; 36 u32 clk;
38 bool spec_inv; 37 bool spec_inv;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 03c0de039fa9..6a124ff71c2b 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -35,7 +35,6 @@
35 * @pid_filter: Set PID to PID filter. 35 * @pid_filter: Set PID to PID filter.
36 * @pid_filter_ctrl: Control PID filter. 36 * @pid_filter_ctrl: Control PID filter.
37 */ 37 */
38
39struct rtl2832_platform_data { 38struct rtl2832_platform_data {
40 u32 clk; 39 u32 clk;
41 /* 40 /*
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.h b/drivers/media/dvb-frontends/rtl2832_sdr.h
index d8fc7e7212e3..8f88c2fb8627 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.h
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.h
@@ -33,15 +33,11 @@
33 * struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver 33 * struct rtl2832_sdr_platform_data - Platform data for the rtl2832_sdr driver
34 * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000). 34 * @clk: Clock frequency (4000000, 16000000, 25000000, 28800000).
35 * @tuner: Used tuner model. 35 * @tuner: Used tuner model.
36 * @i2c_client: rtl2832 demod driver I2C client. 36 * @regmap: pointer to &struct regmap.
37 * @bulk_read: rtl2832 driver private I/O interface.
38 * @bulk_write: rtl2832 driver private I/O interface.
39 * @update_bits: rtl2832 driver private I/O interface.
40 * @dvb_frontend: rtl2832 DVB frontend. 37 * @dvb_frontend: rtl2832 DVB frontend.
41 * @v4l2_subdev: Tuner v4l2 controls. 38 * @v4l2_subdev: Tuner v4l2 controls.
42 * @dvb_usb_device: DVB USB interface for USB streaming. 39 * @dvb_usb_device: DVB USB interface for USB streaming.
43 */ 40 */
44
45struct rtl2832_sdr_platform_data { 41struct rtl2832_sdr_platform_data {
46 u32 clk; 42 u32 clk;
47 /* 43 /*
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 7c511c3cd4ca..d2c402b52c6e 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -57,7 +57,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
57 int ret; 57 int ret;
58 58
59 if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) { 59 if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1) {
60 /** 60 /*
61 * in case of soft reset we ignore ACK errors... 61 * in case of soft reset we ignore ACK errors...
62 */ 62 */
63 if (!(reg == 0xf1a && data == 0x000 && 63 if (!(reg == 0xf1a && data == 0x000 &&
@@ -130,7 +130,7 @@ static void sp887x_setup_agc (struct sp887x_state* state)
130 130
131#define BLOCKSIZE 30 131#define BLOCKSIZE 30
132#define FW_SIZE 0x4000 132#define FW_SIZE 0x4000
133/** 133/*
134 * load firmware and setup MPEG interface... 134 * load firmware and setup MPEG interface...
135 */ 135 */
136static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware *fw) 136static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware *fw)
@@ -279,7 +279,7 @@ static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
279 return 0; 279 return 0;
280} 280}
281 281
282/** 282/*
283 * estimates division of two 24bit numbers, 283 * estimates division of two 24bit numbers,
284 * derived from the ves1820/stv0299 driver code 284 * derived from the ves1820/stv0299 driver code
285 */ 285 */
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index 78e75dfc317f..e94a3d5facf6 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -26,15 +26,16 @@
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include "dvb_frontend.h" 27#include "dvb_frontend.h"
28 28
29#if IS_REACHABLE(CONFIG_DVB_STB6000)
29/** 30/**
30 * Attach a stb6000 tuner to the supplied frontend structure. 31 * Attach a stb6000 tuner to the supplied frontend structure.
31 * 32 *
32 * @param fe Frontend to attach to. 33 * @fe: Frontend to attach to.
33 * @param addr i2c address of the tuner. 34 * @addr: i2c address of the tuner.
34 * @param i2c i2c adapter to use. 35 * @i2c: i2c adapter to use.
35 * @return FE pointer on success, NULL on failure. 36 *
37 * return: FE pointer on success, NULL on failure.
36 */ 38 */
37#if IS_REACHABLE(CONFIG_DVB_STB6000)
38extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr, 39extern struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
39 struct i2c_adapter *i2c); 40 struct i2c_adapter *i2c);
40#else 41#else
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index b36b21a13201..b1f3d675d316 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -368,7 +368,7 @@ static int stv0299_set_voltage(struct dvb_frontend *fe,
368 reg0x08 = stv0299_readreg (state, 0x08); 368 reg0x08 = stv0299_readreg (state, 0x08);
369 reg0x0c = stv0299_readreg (state, 0x0c); 369 reg0x0c = stv0299_readreg (state, 0x0c);
370 370
371 /** 371 /*
372 * H/V switching over OP0, OP1 and OP2 are LNB power enable bits 372 * H/V switching over OP0, OP1 and OP2 are LNB power enable bits
373 */ 373 */
374 reg0x0c &= 0x0f; 374 reg0x0c &= 0x0f;
diff --git a/drivers/media/dvb-frontends/tda10071.h b/drivers/media/dvb-frontends/tda10071.h
index 8f184026ee11..da1a87bc1603 100644
--- a/drivers/media/dvb-frontends/tda10071.h
+++ b/drivers/media/dvb-frontends/tda10071.h
@@ -38,7 +38,6 @@
38 * @tuner_i2c_addr: CX24118A tuner I2C address (0x14, 0x54, ...). 38 * @tuner_i2c_addr: CX24118A tuner I2C address (0x14, 0x54, ...).
39 * @get_dvb_frontend: Get DVB frontend. 39 * @get_dvb_frontend: Get DVB frontend.
40 */ 40 */
41
42struct tda10071_platform_data { 41struct tda10071_platform_data {
43 u32 clk; 42 u32 clk;
44 u16 i2c_wr_max; 43 u16 i2c_wr_max;
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index 81abe1aebe9f..6a7bed12e741 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -29,11 +29,12 @@
29/** 29/**
30 * Attach a tda826x tuner to the supplied frontend structure. 30 * Attach a tda826x tuner to the supplied frontend structure.
31 * 31 *
32 * @param fe Frontend to attach to. 32 * @fe: Frontend to attach to.
33 * @param addr i2c address of the tuner. 33 * @addr: i2c address of the tuner.
34 * @param i2c i2c adapter to use. 34 * @i2c: i2c adapter to use.
35 * @param has_loopthrough Set to 1 if the card has a loopthrough RF connector. 35 * @has_loopthrough: Set to 1 if the card has a loopthrough RF connector.
36 * @return FE pointer on success, NULL on failure. 36 *
37 * return: FE pointer on success, NULL on failure.
37 */ 38 */
38#if IS_REACHABLE(CONFIG_DVB_TDA826X) 39#if IS_REACHABLE(CONFIG_DVB_TDA826X)
39extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr, 40extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 18e6d4c5be21..1d41abd47f04 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -1,4 +1,4 @@
1/** 1/*
2 * Driver for Infineon tua6100 pll. 2 * Driver for Infineon tua6100 pll.
3 * 3 *
4 * (c) 2006 Andrew de Quincey 4 * (c) 2006 Andrew de Quincey
diff --git a/drivers/media/dvb-frontends/tua6100.h b/drivers/media/dvb-frontends/tua6100.h
index 9f15cbdfdeca..6c098a894ea6 100644
--- a/drivers/media/dvb-frontends/tua6100.h
+++ b/drivers/media/dvb-frontends/tua6100.h
@@ -1,4 +1,4 @@
1/** 1/*
2 * Driver for Infineon tua6100 PLL. 2 * Driver for Infineon tua6100 PLL.
3 * 3 *
4 * (c) 2006 Andrew de Quincey 4 * (c) 2006 Andrew de Quincey
diff --git a/drivers/media/dvb-frontends/zd1301_demod.h b/drivers/media/dvb-frontends/zd1301_demod.h
index ceb2e05e873c..6cd8f6f9c415 100644
--- a/drivers/media/dvb-frontends/zd1301_demod.h
+++ b/drivers/media/dvb-frontends/zd1301_demod.h
@@ -27,7 +27,6 @@
27 * @reg_read: Register read callback. 27 * @reg_read: Register read callback.
28 * @reg_write: Register write callback. 28 * @reg_write: Register write callback.
29 */ 29 */
30
31struct zd1301_demod_platform_data { 30struct zd1301_demod_platform_data {
32 void *reg_priv; 31 void *reg_priv;
33 int (*reg_read)(void *, u16, u8 *); 32 int (*reg_read)(void *, u16, u8 *);
@@ -41,8 +40,7 @@ struct zd1301_demod_platform_data {
41 * 40 *
42 * Return: Pointer to DVB frontend which given platform device owns. 41 * Return: Pointer to DVB frontend which given platform device owns.
43 */ 42 */
44 43struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *pdev);
45struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
46 44
47/** 45/**
48 * zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter 46 * zd1301_demod_get_i2c_adapter() - Get pointer to I2C adapter
@@ -50,11 +48,16 @@ struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *);
50 * 48 *
51 * Return: Pointer to I2C adapter which given platform device owns. 49 * Return: Pointer to I2C adapter which given platform device owns.
52 */ 50 */
53 51struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev);
54struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *);
55 52
56#else 53#else
57 54
55/**
56 * zd1301_demod_get_dvb_frontend() - Attach a zd1301 frontend
57 * @dev: Pointer to platform device
58 *
59 * Return: Pointer to %struct dvb_frontend or NULL if attach fails.
60 */
58static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev) 61static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
59{ 62{
60 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 062282739ce5..89dd65ae88ad 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -1,4 +1,4 @@
1/** 1/*
2 * Driver for Zarlink zl10036 DVB-S silicon tuner 2 * Driver for Zarlink zl10036 DVB-S silicon tuner
3 * 3 *
4 * Copyright (C) 2006 Tino Reichardt 4 * Copyright (C) 2006 Tino Reichardt
@@ -157,7 +157,7 @@ static int zl10036_sleep(struct dvb_frontend *fe)
157 return ret; 157 return ret;
158} 158}
159 159
160/** 160/*
161 * register map of the ZL10036/ZL10038 161 * register map of the ZL10036/ZL10038
162 * 162 *
163 * reg[default] content 163 * reg[default] content
@@ -219,7 +219,7 @@ static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw)
219 if (fbw <= 28820) { 219 if (fbw <= 28820) {
220 br = _BR_MAXIMUM; 220 br = _BR_MAXIMUM;
221 } else { 221 } else {
222 /** 222 /*
223 * f(bw)=34,6MHz f(xtal)=10.111MHz 223 * f(bw)=34,6MHz f(xtal)=10.111MHz
224 * br = (10111/34600) * 63 * 1/K = 14; 224 * br = (10111/34600) * 63 * 1/K = 14;
225 */ 225 */
@@ -315,7 +315,7 @@ static int zl10036_set_params(struct dvb_frontend *fe)
315 || (frequency > fe->ops.info.frequency_max)) 315 || (frequency > fe->ops.info.frequency_max))
316 return -EINVAL; 316 return -EINVAL;
317 317
318 /** 318 /*
319 * alpha = 1.35 for dvb-s 319 * alpha = 1.35 for dvb-s
320 * fBW = (alpha*symbolrate)/(2*0.8) 320 * fBW = (alpha*symbolrate)/(2*0.8)
321 * 1.35 / (2*0.8) = 27 / 32 321 * 1.35 / (2*0.8) = 27 / 32
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 88751adfecf7..ec90ca927739 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -20,20 +20,20 @@
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include "dvb_frontend.h" 21#include "dvb_frontend.h"
22 22
23/**
24 * Attach a zl10036 tuner to the supplied frontend structure.
25 *
26 * @param fe Frontend to attach to.
27 * @param config zl10036_config structure
28 * @return FE pointer on success, NULL on failure.
29 */
30
31struct zl10036_config { 23struct zl10036_config {
32 u8 tuner_address; 24 u8 tuner_address;
33 int rf_loop_enable; 25 int rf_loop_enable;
34}; 26};
35 27
36#if IS_REACHABLE(CONFIG_DVB_ZL10036) 28#if IS_REACHABLE(CONFIG_DVB_ZL10036)
29/**
30 * Attach a zl10036 tuner to the supplied frontend structure.
31 *
32 * @fe: Frontend to attach to.
33 * @config: zl10036_config structure.
34 * @i2c: pointer to struct i2c_adapter.
35 * return: FE pointer on success, NULL on failure.
36 */
37extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe, 37extern struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe,
38 const struct zl10036_config *config, struct i2c_adapter *i2c); 38 const struct zl10036_config *config, struct i2c_adapter *i2c);
39#else 39#else
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 3c6d6428f525..cb5d7ff82915 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -676,6 +676,7 @@ config VIDEO_OV13858
676 tristate "OmniVision OV13858 sensor support" 676 tristate "OmniVision OV13858 sensor support"
677 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 677 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
678 depends on MEDIA_CAMERA_SUPPORT 678 depends on MEDIA_CAMERA_SUPPORT
679 select V4L2_FWNODE
679 ---help--- 680 ---help---
680 This is a Video4Linux2 sensor-level driver for the OmniVision 681 This is a Video4Linux2 sensor-level driver for the OmniVision
681 OV13858 camera. 682 OV13858 camera.
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index 14399365ad7f..9fe409e95666 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -1,6 +1,7 @@
1config VIDEO_ET8EK8 1config VIDEO_ET8EK8
2 tristate "ET8EK8 camera sensor support" 2 tristate "ET8EK8 camera sensor support"
3 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 3 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
4 select V4L2_FWNODE
4 ---help--- 5 ---help---
5 This is a driver for the Toshiba ET8EK8 5 MP camera sensor. 6 This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
6 It is used for example in Nokia N900 (RX-51). 7 It is used for example in Nokia N900 (RX-51).
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 800b9bf9cdd3..2f71af2f90bf 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1770,8 +1770,7 @@ static int imx274_probe(struct i2c_client *client,
1770 return 0; 1770 return 0;
1771 1771
1772err_ctrls: 1772err_ctrls:
1773 v4l2_async_unregister_subdev(sd); 1773 v4l2_ctrl_handler_free(&imx274->ctrls.handler);
1774 v4l2_ctrl_handler_free(sd->ctrl_handler);
1775err_me: 1774err_me:
1776 media_entity_cleanup(&sd->entity); 1775 media_entity_cleanup(&sd->entity);
1777err_regmap: 1776err_regmap:
@@ -1788,7 +1787,7 @@ static int imx274_remove(struct i2c_client *client)
1788 imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]); 1787 imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
1789 1788
1790 v4l2_async_unregister_subdev(sd); 1789 v4l2_async_unregister_subdev(sd);
1791 v4l2_ctrl_handler_free(sd->ctrl_handler); 1790 v4l2_ctrl_handler_free(&imx274->ctrls.handler);
1792 media_entity_cleanup(&sd->entity); 1791 media_entity_cleanup(&sd->entity);
1793 mutex_destroy(&imx274->lock); 1792 mutex_destroy(&imx274->lock);
1794 return 0; 1793 return 0;
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 251a2aaf98c3..b600e03aa94b 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -50,6 +50,7 @@ enum led_enable {
50/** 50/**
51 * struct lm3560_flash 51 * struct lm3560_flash
52 * 52 *
53 * @dev: pointer to &struct device
53 * @pdata: platform data 54 * @pdata: platform data
54 * @regmap: reg. map for i2c 55 * @regmap: reg. map for i2c
55 * @lock: muxtex for serial access. 56 * @lock: muxtex for serial access.
diff --git a/drivers/media/i2c/m5mols/m5mols_capture.c b/drivers/media/i2c/m5mols/m5mols_capture.c
index a0cd6dc32eb0..0fb457f57995 100644
--- a/drivers/media/i2c/m5mols/m5mols_capture.c
+++ b/drivers/media/i2c/m5mols/m5mols_capture.c
@@ -33,6 +33,10 @@
33 33
34/** 34/**
35 * m5mols_read_rational - I2C read of a rational number 35 * m5mols_read_rational - I2C read of a rational number
36 * @sd: sub-device, as pointed by struct v4l2_subdev
37 * @addr_num: numerator register
38 * @addr_den: denominator register
39 * @val: place to store the division result
36 * 40 *
37 * Read numerator and denominator from registers @addr_num and @addr_den 41 * Read numerator and denominator from registers @addr_num and @addr_den
38 * respectively and return the division result in @val. 42 * respectively and return the division result in @val.
@@ -53,6 +57,7 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
53 57
54/** 58/**
55 * m5mols_capture_info - Gather captured image information 59 * m5mols_capture_info - Gather captured image information
60 * @info: M-5MOLS driver data structure
56 * 61 *
57 * For now it gathers only EXIF information and file size. 62 * For now it gathers only EXIF information and file size.
58 */ 63 */
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index c2218c0a9e6f..82eab7c2bc8c 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -126,6 +126,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
126 126
127/** 127/**
128 * m5mols_do_scenemode() - Change current scenemode 128 * m5mols_do_scenemode() - Change current scenemode
129 * @info: M-5MOLS driver data structure
129 * @mode: Desired mode of the scenemode 130 * @mode: Desired mode of the scenemode
130 * 131 *
131 * WARNING: The execution order is important. Do not change the order. 132 * WARNING: The execution order is important. Do not change the order.
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 463534d44756..12e79f9e32d5 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -114,7 +114,8 @@ static const struct m5mols_resolution m5mols_reg_res[] = {
114 114
115/** 115/**
116 * m5mols_swap_byte - an byte array to integer conversion function 116 * m5mols_swap_byte - an byte array to integer conversion function
117 * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet 117 * @data: byte array
118 * @length: size in bytes of I2C packet defined in the M-5MOLS datasheet
118 * 119 *
119 * Convert I2C data byte array with performing any required byte 120 * Convert I2C data byte array with performing any required byte
120 * reordering to assure proper values for each data type, regardless 121 * reordering to assure proper values for each data type, regardless
@@ -132,8 +133,9 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
132 133
133/** 134/**
134 * m5mols_read - I2C read function 135 * m5mols_read - I2C read function
135 * @reg: combination of size, category and command for the I2C packet 136 * @sd: sub-device, as pointed by struct v4l2_subdev
136 * @size: desired size of I2C packet 137 * @size: desired size of I2C packet
138 * @reg: combination of size, category and command for the I2C packet
137 * @val: read value 139 * @val: read value
138 * 140 *
139 * Returns 0 on success, or else negative errno. 141 * Returns 0 on success, or else negative errno.
@@ -232,6 +234,7 @@ int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
232 234
233/** 235/**
234 * m5mols_write - I2C command write function 236 * m5mols_write - I2C command write function
237 * @sd: sub-device, as pointed by struct v4l2_subdev
235 * @reg: combination of size, category and command for the I2C packet 238 * @reg: combination of size, category and command for the I2C packet
236 * @val: value to write 239 * @val: value to write
237 * 240 *
@@ -284,6 +287,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
284 287
285/** 288/**
286 * m5mols_busy_wait - Busy waiting with I2C register polling 289 * m5mols_busy_wait - Busy waiting with I2C register polling
290 * @sd: sub-device, as pointed by struct v4l2_subdev
287 * @reg: the I2C_REG() address of an 8-bit status register to check 291 * @reg: the I2C_REG() address of an 8-bit status register to check
288 * @value: expected status register value 292 * @value: expected status register value
289 * @mask: bit mask for the read status register value 293 * @mask: bit mask for the read status register value
@@ -316,6 +320,8 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
316 320
317/** 321/**
318 * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts 322 * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
323 * @sd: sub-device, as pointed by struct v4l2_subdev
324 * @reg: combination of size, category and command for the I2C packet
319 * 325 *
320 * Before writing desired interrupt value the INT_FACTOR register should 326 * Before writing desired interrupt value the INT_FACTOR register should
321 * be read to clear pending interrupts. 327 * be read to clear pending interrupts.
@@ -349,6 +355,8 @@ int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
349 355
350/** 356/**
351 * m5mols_reg_mode - Write the mode and check busy status 357 * m5mols_reg_mode - Write the mode and check busy status
358 * @sd: sub-device, as pointed by struct v4l2_subdev
359 * @mode: the required operation mode
352 * 360 *
353 * It always accompanies a little delay changing the M-5MOLS mode, so it is 361 * It always accompanies a little delay changing the M-5MOLS mode, so it is
354 * needed checking current busy status to guarantee right mode. 362 * needed checking current busy status to guarantee right mode.
@@ -364,6 +372,7 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
364 372
365/** 373/**
366 * m5mols_set_mode - set the M-5MOLS controller mode 374 * m5mols_set_mode - set the M-5MOLS controller mode
375 * @info: M-5MOLS driver data structure
367 * @mode: the required operation mode 376 * @mode: the required operation mode
368 * 377 *
369 * The commands of M-5MOLS are grouped into specific modes. Each functionality 378 * The commands of M-5MOLS are grouped into specific modes. Each functionality
@@ -421,6 +430,7 @@ int m5mols_set_mode(struct m5mols_info *info, u8 mode)
421 430
422/** 431/**
423 * m5mols_get_version - retrieve full revisions information of M-5MOLS 432 * m5mols_get_version - retrieve full revisions information of M-5MOLS
433 * @sd: sub-device, as pointed by struct v4l2_subdev
424 * 434 *
425 * The version information includes revisions of hardware and firmware, 435 * The version information includes revisions of hardware and firmware,
426 * AutoFocus alghorithm version and the version string. 436 * AutoFocus alghorithm version and the version string.
@@ -489,6 +499,7 @@ static enum m5mols_restype __find_restype(u32 code)
489 499
490/** 500/**
491 * __find_resolution - Lookup preset and type of M-5MOLS's resolution 501 * __find_resolution - Lookup preset and type of M-5MOLS's resolution
502 * @sd: sub-device, as pointed by struct v4l2_subdev
492 * @mf: pixel format to find/negotiate the resolution preset for 503 * @mf: pixel format to find/negotiate the resolution preset for
493 * @type: M-5MOLS resolution type 504 * @type: M-5MOLS resolution type
494 * @resolution: M-5MOLS resolution preset register value 505 * @resolution: M-5MOLS resolution preset register value
@@ -662,6 +673,7 @@ static const struct v4l2_subdev_pad_ops m5mols_pad_ops = {
662 673
663/** 674/**
664 * m5mols_restore_controls - Apply current control values to the registers 675 * m5mols_restore_controls - Apply current control values to the registers
676 * @info: M-5MOLS driver data structure
665 * 677 *
666 * m5mols_do_scenemode() handles all parameters for which there is yet no 678 * m5mols_do_scenemode() handles all parameters for which there is yet no
667 * individual control. It should be replaced at some point by setting each 679 * individual control. It should be replaced at some point by setting each
@@ -686,6 +698,7 @@ int m5mols_restore_controls(struct m5mols_info *info)
686 698
687/** 699/**
688 * m5mols_start_monitor - Start the monitor mode 700 * m5mols_start_monitor - Start the monitor mode
701 * @info: M-5MOLS driver data structure
689 * 702 *
690 * Before applying the controls setup the resolution and frame rate 703 * Before applying the controls setup the resolution and frame rate
691 * in PARAMETER mode, and then switch over to MONITOR mode. 704 * in PARAMETER mode, and then switch over to MONITOR mode.
@@ -789,6 +802,7 @@ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
789 802
790/** 803/**
791 * m5mols_fw_start - M-5MOLS internal ARM controller initialization 804 * m5mols_fw_start - M-5MOLS internal ARM controller initialization
805 * @sd: sub-device, as pointed by struct v4l2_subdev
792 * 806 *
793 * Execute the M-5MOLS internal ARM controller initialization sequence. 807 * Execute the M-5MOLS internal ARM controller initialization sequence.
794 * This function should be called after the supply voltage has been 808 * This function should be called after the supply voltage has been
@@ -844,6 +858,8 @@ static int m5mols_auto_focus_stop(struct m5mols_info *info)
844 858
845/** 859/**
846 * m5mols_s_power - Main sensor power control function 860 * m5mols_s_power - Main sensor power control function
861 * @sd: sub-device, as pointed by struct v4l2_subdev
862 * @on: if true, powers on the device; powers off otherwise.
847 * 863 *
848 * To prevent breaking the lens when the sensor is powered off the Soft-Landing 864 * To prevent breaking the lens when the sensor is powered off the Soft-Landing
849 * algorithm is called where available. The Soft-Landing algorithm availability 865 * algorithm is called where available. The Soft-Landing algorithm availability
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 34179d232a35..da39c49de503 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -428,8 +428,8 @@ static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
428} 428}
429#endif 429#endif
430 430
431/** 431/*
432 * @short Subdev core operations registration 432 * Subdev core operations registration
433 */ 433 */
434static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = { 434static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
435 .s_power = ov5647_sensor_power, 435 .s_power = ov5647_sensor_power,
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 67dcca76f981..2e140272794b 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -53,6 +53,9 @@ enum {
53 * @gpio_reset: GPIO connected to the sensor's reset pin 53 * @gpio_reset: GPIO connected to the sensor's reset pin
54 * @lock: mutex protecting the structure's members below 54 * @lock: mutex protecting the structure's members below
55 * @format: media bus format at the sensor's source pad 55 * @format: media bus format at the sensor's source pad
56 * @clock: pointer to &struct clk.
57 * @clock_frequency: clock frequency
58 * @power_count: stores state if device is powered
56 */ 59 */
57struct s5k6a3 { 60struct s5k6a3 {
58 struct device *dev; 61 struct device *dev;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 9fd254a8e20d..13c10b5e2b45 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
421 421
422/** 422/**
423 * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration 423 * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
424 * @s5k6aa: pointer to &struct s5k6aa describing the device
424 * 425 *
425 * Configure the internal ISP PLL for the required output frequency. 426 * Configure the internal ISP PLL for the required output frequency.
426 * Locking: called with s5k6aa.lock mutex held. 427 * Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
669 670
670/** 671/**
671 * s5k6aa_configure_video_bus - configure the video output interface 672 * s5k6aa_configure_video_bus - configure the video output interface
673 * @s5k6aa: pointer to &struct s5k6aa describing the device
672 * @bus_type: video bus type: parallel or MIPI-CSI 674 * @bus_type: video bus type: parallel or MIPI-CSI
673 * @nlanes: number of MIPI lanes to be used (MIPI-CSI only) 675 * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
674 * 676 *
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
724 726
725/** 727/**
726 * s5k6aa_set_prev_config - write user preview register set 728 * s5k6aa_set_prev_config - write user preview register set
729 * @s5k6aa: pointer to &struct s5k6aa describing the device
730 * @preset: s5kaa preset to be applied
727 * 731 *
728 * Configure output resolution and color fromat, pixel clock 732 * Configure output resolution and color fromat, pixel clock
729 * frequency range, device frame rate type and frame period range. 733 * frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
777 781
778/** 782/**
779 * s5k6aa_initialize_isp - basic ISP MCU initialization 783 * s5k6aa_initialize_isp - basic ISP MCU initialization
784 * @sd: pointer to V4L2 sub-device descriptor
780 * 785 *
781 * Configure AHB addresses for registers read/write; configure PLLs for 786 * Configure AHB addresses for registers read/write; configure PLLs for
782 * required output pixel clock. The ISP power supply needs to be already 787 * required output pixel clock. The ISP power supply needs to be already
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index ad2df998f9c5..d575b3e7e835 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -86,6 +86,7 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
86/** 86/**
87 * struct tvp514x_decoder - TVP5146/47 decoder object 87 * struct tvp514x_decoder - TVP5146/47 decoder object
88 * @sd: Subdevice Slave handle 88 * @sd: Subdevice Slave handle
89 * @hdl: embedded &struct v4l2_ctrl_handler
89 * @tvp514x_regs: copy of hw's regs with preset values. 90 * @tvp514x_regs: copy of hw's regs with preset values.
90 * @pdata: Board specific 91 * @pdata: Board specific
91 * @ver: Chip version 92 * @ver: Chip version
@@ -98,6 +99,9 @@ static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
98 * @std_list: Standards list 99 * @std_list: Standards list
99 * @input: Input routing at chip level 100 * @input: Input routing at chip level
100 * @output: Output routing at chip level 101 * @output: Output routing at chip level
102 * @pad: subdev media pad associated with the decoder
103 * @format: media bus frame format
104 * @int_seq: driver's register init sequence
101 */ 105 */
102struct tvp514x_decoder { 106struct tvp514x_decoder {
103 struct v4l2_subdev sd; 107 struct v4l2_subdev sd;
@@ -211,7 +215,7 @@ static struct tvp514x_reg tvp514x_reg_list_default[] = {
211 {TOK_TERM, 0, 0}, 215 {TOK_TERM, 0, 0},
212}; 216};
213 217
214/** 218/*
215 * List of image formats supported by TVP5146/47 decoder 219 * List of image formats supported by TVP5146/47 decoder
216 * Currently we are using 8 bit mode only, but can be 220 * Currently we are using 8 bit mode only, but can be
217 * extended to 10/20 bit mode. 221 * extended to 10/20 bit mode.
@@ -226,7 +230,7 @@ static const struct v4l2_fmtdesc tvp514x_fmt_list[] = {
226 }, 230 },
227}; 231};
228 232
229/** 233/*
230 * Supported standards - 234 * Supported standards -
231 * 235 *
232 * Currently supports two standards only, need to add support for rest of the 236 * Currently supports two standards only, need to add support for rest of the
@@ -931,7 +935,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
931 * tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format 935 * tvp514x_set_pad_format() - V4L2 decoder interface handler for set pad format
932 * @sd: pointer to standard V4L2 sub-device structure 936 * @sd: pointer to standard V4L2 sub-device structure
933 * @cfg: pad configuration 937 * @cfg: pad configuration
934 * @format: pointer to v4l2_subdev_format structure 938 * @fmt: pointer to v4l2_subdev_format structure
935 * 939 *
936 * Set pad format for the output pad 940 * Set pad format for the output pad
937 */ 941 */
@@ -1199,7 +1203,7 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
1199 {TOK_TERM, 0, 0}, 1203 {TOK_TERM, 0, 0},
1200}; 1204};
1201 1205
1202/** 1206/*
1203 * I2C Device Table - 1207 * I2C Device Table -
1204 * 1208 *
1205 * name - Name of the actual device/chip. 1209 * name - Name of the actual device/chip.
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 11829c0fa138..509d69e6ca4a 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -82,11 +82,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
82 * @start_addr_lo: DMA ring buffer start address, lower part 82 * @start_addr_lo: DMA ring buffer start address, lower part
83 * @start_addr_hi: DMA ring buffer start address, higher part 83 * @start_addr_hi: DMA ring buffer start address, higher part
84 * @size: DMA ring buffer size register 84 * @size: DMA ring buffer size register
85 Bits [0-7]: DMA packet size, 188 bytes 85 * * Bits [0-7]: DMA packet size, 188 bytes
86 Bits [16-23]: packets count in block, 128 packets 86 * * Bits [16-23]: packets count in block, 128 packets
87 Bits [24-31]: blocks count, 8 blocks 87 * * Bits [24-31]: blocks count, 8 blocks
88 * @timeout: DMA timeout in units of 8ns 88 * @timeout: DMA timeout in units of 8ns
89 For example, value of 375000000 equals to 3 sec 89 * For example, value of 375000000 equals to 3 sec
90 * @curr_addr_lo: Current ring buffer head address, lower part 90 * @curr_addr_lo: Current ring buffer head address, lower part
91 * @curr_addr_hi: Current ring buffer head address, higher part 91 * @curr_addr_hi: Current ring buffer head address, higher part
92 * @stat_pkt_received: Statistic register, not tested 92 * @stat_pkt_received: Statistic register, not tested
diff --git a/drivers/media/pci/solo6x10/solo6x10-enc.c b/drivers/media/pci/solo6x10/solo6x10-enc.c
index d28211bb9674..58d6b5131dd0 100644
--- a/drivers/media/pci/solo6x10/solo6x10-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-enc.c
@@ -175,7 +175,7 @@ out:
175 return 0; 175 return 0;
176} 176}
177 177
178/** 178/*
179 * Set channel Quality Profile (0-3). 179 * Set channel Quality Profile (0-3).
180 */ 180 */
181void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, 181void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch,
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index eb5a9eae7c8e..dd199bfc1d45 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -404,6 +404,7 @@ static const struct v4l2_file_operations vip_fops = {
404 * vidioc_querycap - return capabilities of device 404 * vidioc_querycap - return capabilities of device
405 * @file: descriptor of device 405 * @file: descriptor of device
406 * @cap: contains return values 406 * @cap: contains return values
407 * @priv: unused
407 * 408 *
408 * the capabilities of the device are returned 409 * the capabilities of the device are returned
409 * 410 *
@@ -429,6 +430,7 @@ static int vidioc_querycap(struct file *file, void *priv,
429 * vidioc_s_std - set video standard 430 * vidioc_s_std - set video standard
430 * @file: descriptor of device 431 * @file: descriptor of device
431 * @std: contains standard to be set 432 * @std: contains standard to be set
433 * @priv: unused
432 * 434 *
433 * the video standard is set 435 * the video standard is set
434 * 436 *
@@ -466,6 +468,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
466/** 468/**
467 * vidioc_g_std - get video standard 469 * vidioc_g_std - get video standard
468 * @file: descriptor of device 470 * @file: descriptor of device
471 * @priv: unused
469 * @std: contains return values 472 * @std: contains return values
470 * 473 *
471 * the current video standard is returned 474 * the current video standard is returned
@@ -483,6 +486,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
483/** 486/**
484 * vidioc_querystd - get possible video standards 487 * vidioc_querystd - get possible video standards
485 * @file: descriptor of device 488 * @file: descriptor of device
489 * @priv: unused
486 * @std: contains return values 490 * @std: contains return values
487 * 491 *
488 * all possible video standards are returned 492 * all possible video standards are returned
@@ -512,6 +516,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
512/** 516/**
513 * vidioc_s_input - set input line 517 * vidioc_s_input - set input line
514 * @file: descriptor of device 518 * @file: descriptor of device
519 * @priv: unused
515 * @i: new input line number 520 * @i: new input line number
516 * 521 *
517 * the current active input line is set 522 * the current active input line is set
@@ -538,6 +543,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
538/** 543/**
539 * vidioc_g_input - return input line 544 * vidioc_g_input - return input line
540 * @file: descriptor of device 545 * @file: descriptor of device
546 * @priv: unused
541 * @i: returned input line number 547 * @i: returned input line number
542 * 548 *
543 * the current active input line is returned 549 * the current active input line is returned
@@ -554,6 +560,8 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
554 560
555/** 561/**
556 * vidioc_enum_fmt_vid_cap - return video capture format 562 * vidioc_enum_fmt_vid_cap - return video capture format
563 * @file: descriptor of device
564 * @priv: unused
557 * @f: returned format information 565 * @f: returned format information
558 * 566 *
559 * returns name and format of video capture 567 * returns name and format of video capture
@@ -577,6 +585,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
577/** 585/**
578 * vidioc_try_fmt_vid_cap - set video capture format 586 * vidioc_try_fmt_vid_cap - set video capture format
579 * @file: descriptor of device 587 * @file: descriptor of device
588 * @priv: unused
580 * @f: new format 589 * @f: new format
581 * 590 *
582 * new video format is set which includes width and 591 * new video format is set which includes width and
@@ -639,6 +648,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
639/** 648/**
640 * vidioc_s_fmt_vid_cap - set current video format parameters 649 * vidioc_s_fmt_vid_cap - set current video format parameters
641 * @file: descriptor of device 650 * @file: descriptor of device
651 * @priv: unused
642 * @f: returned format information 652 * @f: returned format information
643 * 653 *
644 * set new capture format 654 * set new capture format
@@ -706,6 +716,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
706/** 716/**
707 * vidioc_g_fmt_vid_cap - get current video format parameters 717 * vidioc_g_fmt_vid_cap - get current video format parameters
708 * @file: descriptor of device 718 * @file: descriptor of device
719 * @priv: unused
709 * @f: contains format information 720 * @f: contains format information
710 * 721 *
711 * returns current video format parameters 722 * returns current video format parameters
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
index 7439db212a69..82ff9c9494f3 100644
--- a/drivers/media/pci/tw68/tw68-risc.c
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -29,14 +29,15 @@
29#include "tw68.h" 29#include "tw68.h"
30 30
31/** 31/**
32 * @rp pointer to current risc program position 32 * tw68_risc_field
33 * @sglist pointer to "scatter-gather list" of buffer pointers 33 * @rp: pointer to current risc program position
34 * @offset offset to target memory buffer 34 * @sglist: pointer to "scatter-gather list" of buffer pointers
35 * @sync_line 0 -> no sync, 1 -> odd sync, 2 -> even sync 35 * @offset: offset to target memory buffer
36 * @bpl number of bytes per scan line 36 * @sync_line: 0 -> no sync, 1 -> odd sync, 2 -> even sync
37 * @padding number of bytes of padding to add 37 * @bpl: number of bytes per scan line
38 * @lines number of lines in field 38 * @padding: number of bytes of padding to add
39 * @jump insert a jump at the start 39 * @lines: number of lines in field
40 * @jump: insert a jump at the start
40 */ 41 */
41static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist, 42static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
42 unsigned int offset, u32 sync_line, 43 unsigned int offset, u32 sync_line,
@@ -120,18 +121,18 @@ static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
120 * memory for the dma controller "program" and then fills in that 121 * memory for the dma controller "program" and then fills in that
121 * memory with the appropriate "instructions". 122 * memory with the appropriate "instructions".
122 * 123 *
123 * @pci_dev structure with info about the pci 124 * @pci: structure with info about the pci
124 * slot which our device is in. 125 * slot which our device is in.
125 * @risc structure with info about the memory 126 * @buf: structure with info about the memory
126 * used for our controller program. 127 * used for our controller program.
127 * @sglist scatter-gather list entry 128 * @sglist: scatter-gather list entry
128 * @top_offset offset within the risc program area for the 129 * @top_offset: offset within the risc program area for the
129 * first odd frame line 130 * first odd frame line
130 * @bottom_offset offset within the risc program area for the 131 * @bottom_offset: offset within the risc program area for the
131 * first even frame line 132 * first even frame line
132 * @bpl number of data bytes per scan line 133 * @bpl: number of data bytes per scan line
133 * @padding number of extra bytes to add at end of line 134 * @padding: number of extra bytes to add at end of line
134 * @lines number of scan lines 135 * @lines: number of scan lines
135 */ 136 */
136int tw68_risc_buffer(struct pci_dev *pci, 137int tw68_risc_buffer(struct pci_dev *pci,
137 struct tw68_buf *buf, 138 struct tw68_buf *buf,
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 07e89a4985a6..16352e2263d2 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -47,8 +47,9 @@ EXPORT_SYMBOL_GPL(vpif_lock);
47void __iomem *vpif_base; 47void __iomem *vpif_base;
48EXPORT_SYMBOL_GPL(vpif_base); 48EXPORT_SYMBOL_GPL(vpif_base);
49 49
50/** 50/*
51 * vpif_ch_params: video standard configuration parameters for vpif 51 * vpif_ch_params: video standard configuration parameters for vpif
52 *
52 * The table must include all presets from supported subdevices. 53 * The table must include all presets from supported subdevices.
53 */ 54 */
54const struct vpif_channel_config_params vpif_ch_params[] = { 55const struct vpif_channel_config_params vpif_ch_params[] = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a89367ab1e06..fca4dc829f73 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -109,7 +109,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
109 * @vq: vb2_queue ptr 109 * @vq: vb2_queue ptr
110 * @nbuffers: ptr to number of buffers requested by application 110 * @nbuffers: ptr to number of buffers requested by application
111 * @nplanes:: contains number of distinct video planes needed to hold a frame 111 * @nplanes:: contains number of distinct video planes needed to hold a frame
112 * @sizes[]: contains the size (in bytes) of each plane. 112 * @sizes: contains the size (in bytes) of each plane.
113 * @alloc_devs: ptr to allocation context 113 * @alloc_devs: ptr to allocation context
114 * 114 *
115 * This callback function is called when reqbuf() is called to adjust 115 * This callback function is called when reqbuf() is called to adjust
@@ -167,7 +167,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
167 167
168/** 168/**
169 * vpif_start_streaming : Starts the DMA engine for streaming 169 * vpif_start_streaming : Starts the DMA engine for streaming
170 * @vb: ptr to vb2_buffer 170 * @vq: ptr to vb2_buffer
171 * @count: number of buffers 171 * @count: number of buffers
172 */ 172 */
173static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) 173static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -629,7 +629,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
629 629
630/** 630/**
631 * vpif_get_default_field() - Get default field type based on interface 631 * vpif_get_default_field() - Get default field type based on interface
632 * @vpif_params - ptr to vpif params 632 * @iface: ptr to vpif interface
633 */ 633 */
634static inline enum v4l2_field vpif_get_default_field( 634static inline enum v4l2_field vpif_get_default_field(
635 struct vpif_interface *iface) 635 struct vpif_interface *iface)
@@ -640,8 +640,8 @@ static inline enum v4l2_field vpif_get_default_field(
640 640
641/** 641/**
642 * vpif_config_addr() - function to configure buffer address in vpif 642 * vpif_config_addr() - function to configure buffer address in vpif
643 * @ch - channel ptr 643 * @ch: channel ptr
644 * @muxmode - channel mux mode 644 * @muxmode: channel mux mode
645 */ 645 */
646static void vpif_config_addr(struct channel_obj *ch, int muxmode) 646static void vpif_config_addr(struct channel_obj *ch, int muxmode)
647{ 647{
@@ -661,9 +661,9 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
661 661
662/** 662/**
663 * vpif_input_to_subdev() - Maps input to sub device 663 * vpif_input_to_subdev() - Maps input to sub device
664 * @vpif_cfg - global config ptr 664 * @vpif_cfg: global config ptr
665 * @chan_cfg - channel config ptr 665 * @chan_cfg: channel config ptr
666 * @input_index - Given input index from application 666 * @input_index: Given input index from application
667 * 667 *
668 * lookup the sub device information for a given input index. 668 * lookup the sub device information for a given input index.
669 * we report all the inputs to application. inputs table also 669 * we report all the inputs to application. inputs table also
@@ -699,9 +699,9 @@ static int vpif_input_to_subdev(
699 699
700/** 700/**
701 * vpif_set_input() - Select an input 701 * vpif_set_input() - Select an input
702 * @vpif_cfg - global config ptr 702 * @vpif_cfg: global config ptr
703 * @ch - channel 703 * @ch: channel
704 * @_index - Given input index from application 704 * @index: Given input index from application
705 * 705 *
706 * Select the given input. 706 * Select the given input.
707 */ 707 */
@@ -792,7 +792,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
792 * vpif_g_std() - get STD handler 792 * vpif_g_std() - get STD handler
793 * @file: file ptr 793 * @file: file ptr
794 * @priv: file handle 794 * @priv: file handle
795 * @std_id: ptr to std id 795 * @std: ptr to std id
796 */ 796 */
797static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) 797static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
798{ 798{
@@ -933,7 +933,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
933 * vpif_enum_fmt_vid_cap() - ENUM_FMT handler 933 * vpif_enum_fmt_vid_cap() - ENUM_FMT handler
934 * @file: file ptr 934 * @file: file ptr
935 * @priv: file handle 935 * @priv: file handle
936 * @index: input index 936 * @fmt: ptr to V4L2 format descriptor
937 */ 937 */
938static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, 938static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
939 struct v4l2_fmtdesc *fmt) 939 struct v4l2_fmtdesc *fmt)
@@ -1745,6 +1745,7 @@ static int vpif_remove(struct platform_device *device)
1745#ifdef CONFIG_PM_SLEEP 1745#ifdef CONFIG_PM_SLEEP
1746/** 1746/**
1747 * vpif_suspend: vpif device suspend 1747 * vpif_suspend: vpif device suspend
1748 * @dev: pointer to &struct device
1748 */ 1749 */
1749static int vpif_suspend(struct device *dev) 1750static int vpif_suspend(struct device *dev)
1750{ 1751{
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index ff2f75a328c9..7be636237acf 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -102,7 +102,7 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
102 * @vq: vb2_queue ptr 102 * @vq: vb2_queue ptr
103 * @nbuffers: ptr to number of buffers requested by application 103 * @nbuffers: ptr to number of buffers requested by application
104 * @nplanes:: contains number of distinct video planes needed to hold a frame 104 * @nplanes:: contains number of distinct video planes needed to hold a frame
105 * @sizes[]: contains the size (in bytes) of each plane. 105 * @sizes: contains the size (in bytes) of each plane.
106 * @alloc_devs: ptr to allocation context 106 * @alloc_devs: ptr to allocation context
107 * 107 *
108 * This callback function is called when reqbuf() is called to adjust 108 * This callback function is called when reqbuf() is called to adjust
@@ -158,7 +158,7 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
158 158
159/** 159/**
160 * vpif_start_streaming : Starts the DMA engine for streaming 160 * vpif_start_streaming : Starts the DMA engine for streaming
161 * @vb: ptr to vb2_buffer 161 * @vq: ptr to vb2_buffer
162 * @count: number of buffers 162 * @count: number of buffers
163 */ 163 */
164static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) 164static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -766,9 +766,9 @@ static int vpif_enum_output(struct file *file, void *fh,
766 766
767/** 767/**
768 * vpif_output_to_subdev() - Maps output to sub device 768 * vpif_output_to_subdev() - Maps output to sub device
769 * @vpif_cfg - global config ptr 769 * @vpif_cfg: global config ptr
770 * @chan_cfg - channel config ptr 770 * @chan_cfg: channel config ptr
771 * @index - Given output index from application 771 * @index: Given output index from application
772 * 772 *
773 * lookup the sub device information for a given output index. 773 * lookup the sub device information for a given output index.
774 * we report all the output to application. output table also 774 * we report all the output to application. output table also
@@ -802,9 +802,9 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
802 802
803/** 803/**
804 * vpif_set_output() - Select an output 804 * vpif_set_output() - Select an output
805 * @vpif_cfg - global config ptr 805 * @vpif_cfg: global config ptr
806 * @ch - channel 806 * @ch: channel
807 * @index - Given output index from application 807 * @index: Given output index from application
808 * 808 *
809 * Select the given output. 809 * Select the given output.
810 */ 810 */
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 948fe01f6c96..ed9302caa004 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -146,6 +146,7 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
146 146
147/** 147/**
148 * fimc_capture_config_update - apply the camera interface configuration 148 * fimc_capture_config_update - apply the camera interface configuration
149 * @ctx: FIMC capture context
149 * 150 *
150 * To be called from within the interrupt handler with fimc.slock 151 * To be called from within the interrupt handler with fimc.slock
151 * spinlock held. It updates the camera pixel crop, rotation and 152 * spinlock held. It updates the camera pixel crop, rotation and
@@ -858,6 +859,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
858 * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters 859 * fimc_get_sensor_frame_desc - query the sensor for media bus frame parameters
859 * @sensor: pointer to the sensor subdev 860 * @sensor: pointer to the sensor subdev
860 * @plane_fmt: provides plane sizes corresponding to the frame layout entries 861 * @plane_fmt: provides plane sizes corresponding to the frame layout entries
862 * @num_planes: number of planes
861 * @try: true to set the frame parameters, false to query only 863 * @try: true to set the frame parameters, false to query only
862 * 864 *
863 * This function is used by this driver only for compressed/blob data formats. 865 * This function is used by this driver only for compressed/blob data formats.
@@ -1101,6 +1103,7 @@ static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i)
1101/** 1103/**
1102 * fimc_pipeline_validate - check for formats inconsistencies 1104 * fimc_pipeline_validate - check for formats inconsistencies
1103 * between source and sink pad of each link 1105 * between source and sink pad of each link
1106 * @fimc: the FIMC device this context applies to
1104 * 1107 *
1105 * Return 0 if all formats match or -EPIPE otherwise. 1108 * Return 0 if all formats match or -EPIPE otherwise.
1106 */ 1109 */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index c15596b56dc9..0ef583cfc424 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -60,6 +60,7 @@ static void __setup_sensor_notification(struct fimc_md *fmd,
60 60
61/** 61/**
62 * fimc_pipeline_prepare - update pipeline information with subdevice pointers 62 * fimc_pipeline_prepare - update pipeline information with subdevice pointers
63 * @p: fimc pipeline
63 * @me: media entity terminating the pipeline 64 * @me: media entity terminating the pipeline
64 * 65 *
65 * Caller holds the graph mutex. 66 * Caller holds the graph mutex.
@@ -151,8 +152,8 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
151 152
152/** 153/**
153 * fimc_pipeline_s_power - change power state of all pipeline subdevs 154 * fimc_pipeline_s_power - change power state of all pipeline subdevs
154 * @fimc: fimc device terminating the pipeline 155 * @p: fimc device terminating the pipeline
155 * @state: true to power on, false to power off 156 * @on: true to power on, false to power off
156 * 157 *
157 * Needs to be called with the graph mutex held. 158 * Needs to be called with the graph mutex held.
158 */ 159 */
@@ -219,6 +220,7 @@ static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
219/** 220/**
220 * __fimc_pipeline_open - update the pipeline information, enable power 221 * __fimc_pipeline_open - update the pipeline information, enable power
221 * of all pipeline subdevs and the sensor clock 222 * of all pipeline subdevs and the sensor clock
223 * @ep: fimc device terminating the pipeline
222 * @me: media entity to start graph walk with 224 * @me: media entity to start graph walk with
223 * @prepare: true to walk the current pipeline and acquire all subdevs 225 * @prepare: true to walk the current pipeline and acquire all subdevs
224 * 226 *
@@ -252,7 +254,7 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
252 254
253/** 255/**
254 * __fimc_pipeline_close - disable the sensor clock and pipeline power 256 * __fimc_pipeline_close - disable the sensor clock and pipeline power
255 * @fimc: fimc device terminating the pipeline 257 * @ep: fimc device terminating the pipeline
256 * 258 *
257 * Disable power of all subdevs and turn the external sensor clock off. 259 * Disable power of all subdevs and turn the external sensor clock off.
258 */ 260 */
@@ -281,7 +283,7 @@ static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
281 283
282/** 284/**
283 * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs 285 * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
284 * @pipeline: video pipeline structure 286 * @ep: video pipeline structure
285 * @on: passed as the s_stream() callback argument 287 * @on: passed as the s_stream() callback argument
286 */ 288 */
287static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on) 289static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
@@ -902,6 +904,7 @@ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
902 904
903/** 905/**
904 * fimc_md_create_links - create default links between registered entities 906 * fimc_md_create_links - create default links between registered entities
907 * @fmd: fimc media device
905 * 908 *
906 * Parallel interface sensor entities are connected directly to FIMC capture 909 * Parallel interface sensor entities are connected directly to FIMC capture
907 * entities. The sensors using MIPI CSIS bus are connected through immutable 910 * entities. The sensors using MIPI CSIS bus are connected through immutable
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 560aadabcb11..cba46a656338 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -189,7 +189,7 @@ struct csis_drvdata {
189 * @irq: requested s5p-mipi-csis irq number 189 * @irq: requested s5p-mipi-csis irq number
190 * @interrupt_mask: interrupt mask of the all used interrupts 190 * @interrupt_mask: interrupt mask of the all used interrupts
191 * @flags: the state variable for power and streaming control 191 * @flags: the state variable for power and streaming control
192 * @clock_frequency: device bus clock frequency 192 * @clk_frequency: device bus clock frequency
193 * @hs_settle: HS-RX settle time 193 * @hs_settle: HS-RX settle time
194 * @num_lanes: number of MIPI-CSI data lanes used 194 * @num_lanes: number of MIPI-CSI data lanes used
195 * @max_num_lanes: maximum number of MIPI-CSI data lanes supported 195 * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index b7731b18ecae..aa3ce41898bc 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -59,6 +59,7 @@ struct h264_fb {
59 * @read_idx : read index 59 * @read_idx : read index
60 * @write_idx : write index 60 * @write_idx : write index
61 * @count : buffer count in list 61 * @count : buffer count in list
62 * @reserved : for 8 bytes alignment
62 */ 63 */
63struct h264_ring_fb_list { 64struct h264_ring_fb_list {
64 struct h264_fb fb_list[H264_MAX_FB_NUM]; 65 struct h264_fb fb_list[H264_MAX_FB_NUM];
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index b9fad6a48879..3e84a761db3a 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -155,7 +155,6 @@ struct vdec_vp8_vpu_inst {
155 * @reg_base : HW register base address 155 * @reg_base : HW register base address
156 * @frm_cnt : decode frame count 156 * @frm_cnt : decode frame count
157 * @ctx : V4L2 context 157 * @ctx : V4L2 context
158 * @dev : platform device
159 * @vpu : VPU instance for decoder 158 * @vpu : VPU instance for decoder
160 * @vsi : VPU share information 159 * @vsi : VPU share information
161 */ 160 */
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 4eb3be37ba14..6cf31b366aad 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -34,7 +34,7 @@ static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
34#define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker) 34#define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
35#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098 35#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
36 36
37/** 37/*
38 * enum venc_h264_vpu_work_buf - h264 encoder buffer index 38 * enum venc_h264_vpu_work_buf - h264 encoder buffer index
39 */ 39 */
40enum venc_h264_vpu_work_buf { 40enum venc_h264_vpu_work_buf {
@@ -50,7 +50,7 @@ enum venc_h264_vpu_work_buf {
50 VENC_H264_VPU_WORK_BUF_MAX, 50 VENC_H264_VPU_WORK_BUF_MAX,
51}; 51};
52 52
53/** 53/*
54 * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode 54 * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
55 */ 55 */
56enum venc_h264_bs_mode { 56enum venc_h264_bs_mode {
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index acb639c4abd2..957420dd60de 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -34,7 +34,7 @@
34/* This ac_tag is vp8 frame tag. */ 34/* This ac_tag is vp8 frame tag. */
35#define MAX_AC_TAG_SIZE 10 35#define MAX_AC_TAG_SIZE 10
36 36
37/** 37/*
38 * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index 38 * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
39 */ 39 */
40enum venc_vp8_vpu_work_buf { 40enum venc_vp8_vpu_work_buf {
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index 853d598937f6..1ff6a93262b7 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -181,6 +181,7 @@ struct share_obj {
181 * @extmem: VPU extended memory information 181 * @extmem: VPU extended memory information
182 * @reg: VPU TCM and configuration registers 182 * @reg: VPU TCM and configuration registers
183 * @run: VPU initialization status 183 * @run: VPU initialization status
184 * @wdt: VPU watchdog workqueue
184 * @ipi_desc: VPU IPI descriptor 185 * @ipi_desc: VPU IPI descriptor
185 * @recv_buf: VPU DTCM share buffer for receiving. The 186 * @recv_buf: VPU DTCM share buffer for receiving. The
186 * receive buffer is only accessed in interrupt context. 187 * receive buffer is only accessed in interrupt context.
@@ -194,7 +195,7 @@ struct share_obj {
194 * suppose a client is using VPU to decode VP8. 195 * suppose a client is using VPU to decode VP8.
195 * If the other client wants to encode VP8, 196 * If the other client wants to encode VP8,
196 * it has to wait until VP8 decode completes. 197 * it has to wait until VP8 decode completes.
197 * @wdt_refcnt WDT reference count to make sure the watchdog can be 198 * @wdt_refcnt: WDT reference count to make sure the watchdog can be
198 * disabled if no other client is using VPU service 199 * disabled if no other client is using VPU service
199 * @ack_wq: The wait queue for each codec and mdp. When sleeping 200 * @ack_wq: The wait queue for each codec and mdp. When sleeping
200 * processes wake up, they will check the condition 201 * processes wake up, they will check the condition
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 9d3f0cb1d95a..295f34ad1080 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -235,6 +235,7 @@ enum pxa_mbus_layout {
235 * stored in memory in the following way: 235 * stored in memory in the following way:
236 * @packing: Type of sample-packing, that has to be used 236 * @packing: Type of sample-packing, that has to be used
237 * @order: Sample order when storing in memory 237 * @order: Sample order when storing in memory
238 * @layout: Planes layout in memory
238 * @bits_per_sample: How many bits the bridge has to sample 239 * @bits_per_sample: How many bits the bridge has to sample
239 */ 240 */
240struct pxa_mbus_pixelfmt { 241struct pxa_mbus_pixelfmt {
@@ -852,10 +853,10 @@ static void pxa_camera_dma_irq_v(void *data)
852/** 853/**
853 * pxa_init_dma_channel - init dma descriptors 854 * pxa_init_dma_channel - init dma descriptors
854 * @pcdev: pxa camera device 855 * @pcdev: pxa camera device
855 * @vb: videobuffer2 buffer 856 * @buf: pxa camera buffer
856 * @dma: dma video buffer
857 * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V') 857 * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
858 * @cibr: camera Receive Buffer Register 858 * @sg: dma scatter list
859 * @sglen: dma scatter list length
859 * 860 *
860 * Prepares the pxa dma descriptors to transfer one camera channel. 861 * Prepares the pxa dma descriptors to transfer one camera channel.
861 * 862 *
@@ -1010,6 +1011,8 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
1010/** 1011/**
1011 * pxa_camera_check_link_miss - check missed DMA linking 1012 * pxa_camera_check_link_miss - check missed DMA linking
1012 * @pcdev: camera device 1013 * @pcdev: camera device
1014 * @last_submitted: an opaque DMA cookie for last submitted
1015 * @last_issued: an opaque DMA cookie for last issued
1013 * 1016 *
1014 * The DMA chaining is done with DMA running. This means a tiny temporal window 1017 * The DMA chaining is done with DMA running. This means a tiny temporal window
1015 * remains, where a buffer is queued on the chain, while the chain is already 1018 * remains, where a buffer is queued on the chain, while the chain is already
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 3245bc45f4a0..b13dec3081e5 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1132,7 +1132,7 @@ static int fdp1_device_process(struct fdp1_ctx *ctx)
1132 * mem2mem callbacks 1132 * mem2mem callbacks
1133 */ 1133 */
1134 1134
1135/** 1135/*
1136 * job_ready() - check whether an instance is ready to be scheduled to run 1136 * job_ready() - check whether an instance is ready to be scheduled to run
1137 */ 1137 */
1138static int fdp1_m2m_job_ready(void *priv) 1138static int fdp1_m2m_job_ready(void *priv)
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 070bac36d766..f6092ae45912 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -257,7 +257,7 @@ struct jpu_fmt {
257}; 257};
258 258
259/** 259/**
260 * jpu_q_data - parameters of one queue 260 * struct jpu_q_data - parameters of one queue
261 * @fmtinfo: driver-specific format of this queue 261 * @fmtinfo: driver-specific format of this queue
262 * @format: multiplanar format of this queue 262 * @format: multiplanar format of this queue
263 * @sequence: sequence number 263 * @sequence: sequence number
@@ -269,7 +269,7 @@ struct jpu_q_data {
269}; 269};
270 270
271/** 271/**
272 * jpu_ctx - the device context data 272 * struct jpu_ctx - the device context data
273 * @jpu: JPEG IP device for this context 273 * @jpu: JPEG IP device for this context
274 * @encoder: compression (encode) operation or decompression (decode) 274 * @encoder: compression (encode) operation or decompression (decode)
275 * @compr_quality: destination image quality in compression (encode) mode 275 * @compr_quality: destination image quality in compression (encode) mode
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index c4ab63986c8f..79bc0ef6bb41 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -103,6 +103,7 @@ static const struct camif_fmt camif_formats[] = {
103 103
104/** 104/**
105 * s3c_camif_find_format() - lookup camif color format by fourcc or an index 105 * s3c_camif_find_format() - lookup camif color format by fourcc or an index
106 * @vp: video path (DMA) description (codec/preview)
106 * @pixelformat: fourcc to match, ignored if null 107 * @pixelformat: fourcc to match, ignored if null
107 * @index: index to the camif_formats array, ignored if negative 108 * @index: index to the camif_formats array, ignored if negative
108 */ 109 */
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 15a562af13c7..dedc1b024f6f 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -267,7 +267,7 @@ static void sh_veu_process(struct sh_veu_dev *veu,
267 sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */ 267 sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
268} 268}
269 269
270/** 270/*
271 * sh_veu_device_run() - prepares and starts the device 271 * sh_veu_device_run() - prepares and starts the device
272 * 272 *
273 * This will be called by the framework when it decides to schedule a particular 273 * This will be called by the framework when it decides to schedule a particular
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index 0116097c0c0f..270ec613c27c 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -306,16 +306,17 @@ update_cache:
306} 306}
307 307
308/** 308/**
309 * @icd - soc-camera device 309 * soc_camera_client_scale
310 * @rect - camera cropping window 310 * @icd: soc-camera device
311 * @subrect - part of rect, sent to the user 311 * @rect: camera cropping window
312 * @mf - in- / output camera output window 312 * @subrect: part of rect, sent to the user
313 * @width - on input: max host input width 313 * @mf: in- / output camera output window
314 * on output: user width, mapped back to input 314 * @width: on input: max host input width;
315 * @height - on input: max host input height 315 * on output: user width, mapped back to input
316 * on output: user height, mapped back to input 316 * @height: on input: max host input height;
317 * @host_can_scale - host can scale this pixel format 317 * on output: user height, mapped back to input
318 * @shift - shift, used for scaling 318 * @host_can_scale: host can scale this pixel format
319 * @shift: shift, used for scaling
319 */ 320 */
320int soc_camera_client_scale(struct soc_camera_device *icd, 321int soc_camera_client_scale(struct soc_camera_device *icd,
321 struct v4l2_rect *rect, struct v4l2_rect *subrect, 322 struct v4l2_rect *rect, struct v4l2_rect *subrect,
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index a7e5eed17ada..17f1eb0ba957 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -134,7 +134,7 @@ enum hva_h264_sei_payload_type {
134 SEI_FRAME_PACKING_ARRANGEMENT = 45 134 SEI_FRAME_PACKING_ARRANGEMENT = 45
135}; 135};
136 136
137/** 137/*
138 * stereo Video Info struct 138 * stereo Video Info struct
139 */ 139 */
140struct hva_h264_stereo_video_sei { 140struct hva_h264_stereo_video_sei {
@@ -146,7 +146,9 @@ struct hva_h264_stereo_video_sei {
146 u8 right_view_self_contained_flag; 146 u8 right_view_self_contained_flag;
147}; 147};
148 148
149/** 149/*
150 * struct hva_h264_td
151 *
150 * @frame_width: width in pixels of the buffer containing the input frame 152 * @frame_width: width in pixels of the buffer containing the input frame
151 * @frame_height: height in pixels of the buffer containing the input frame 153 * @frame_height: height in pixels of the buffer containing the input frame
152 * @frame_num: the parameter to be written in the slice header 154 * @frame_num: the parameter to be written in the slice header
@@ -352,7 +354,9 @@ struct hva_h264_td {
352 u32 addr_brc_in_out_parameter; 354 u32 addr_brc_in_out_parameter;
353}; 355};
354 356
355/** 357/*
358 * struct hva_h264_slice_po
359 *
356 * @ slice_size: slice size 360 * @ slice_size: slice size
357 * @ slice_start_time: start time 361 * @ slice_start_time: start time
358 * @ slice_stop_time: stop time 362 * @ slice_stop_time: stop time
@@ -365,7 +369,9 @@ struct hva_h264_slice_po {
365 u32 slice_num; 369 u32 slice_num;
366}; 370};
367 371
368/** 372/*
373 * struct hva_h264_po
374 *
369 * @ bitstream_size: bitstream size 375 * @ bitstream_size: bitstream size
370 * @ dct_bitstream_size: dtc bitstream size 376 * @ dct_bitstream_size: dtc bitstream size
371 * @ stuffing_bits: number of stuffing bits inserted by the encoder 377 * @ stuffing_bits: number of stuffing bits inserted by the encoder
@@ -391,7 +397,9 @@ struct hva_h264_task {
391 struct hva_h264_po po; 397 struct hva_h264_po po;
392}; 398};
393 399
394/** 400/*
401 * struct hva_h264_ctx
402 *
395 * @seq_info: sequence information buffer 403 * @seq_info: sequence information buffer
396 * @ref_frame: reference frame buffer 404 * @ref_frame: reference frame buffer
397 * @rec_frame: reconstructed frame buffer 405 * @rec_frame: reconstructed frame buffer
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 45bd10544189..e395aa85c8ad 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -926,7 +926,7 @@ static struct vpe_ctx *file2ctx(struct file *file)
926 * mem2mem callbacks 926 * mem2mem callbacks
927 */ 927 */
928 928
929/** 929/*
930 * job_ready() - check whether an instance is ready to be scheduled to run 930 * job_ready() - check whether an instance is ready to be scheduled to run
931 */ 931 */
932static int job_ready(void *priv) 932static int job_ready(void *priv)
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 7bf9fa2f8534..065483e62db4 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -343,7 +343,7 @@ static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
343 * mem2mem callbacks 343 * mem2mem callbacks
344 */ 344 */
345 345
346/** 346/*
347 * job_ready() - check whether an instance is ready to be scheduled to run 347 * job_ready() - check whether an instance is ready to be scheduled to run
348 */ 348 */
349static int job_ready(void *priv) 349static int job_ready(void *priv)
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 8b5cbb6b7a70..4257451f1bd8 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -70,6 +70,7 @@ struct vsp1_dl_body {
70 * @dma: DMA address for the header 70 * @dma: DMA address for the header
71 * @body0: first display list body 71 * @body0: first display list body
72 * @fragments: list of extra display list bodies 72 * @fragments: list of extra display list bodies
73 * @has_chain: if true, indicates that there's a partition chain
73 * @chain: entry in the display list partition chain 74 * @chain: entry in the display list partition chain
74 */ 75 */
75struct vsp1_dl_list { 76struct vsp1_dl_list {
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 271f725b17e8..540ac887a63c 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -158,7 +158,7 @@ enum si476x_ctrl_idx {
158}; 158};
159static struct v4l2_ctrl_config si476x_ctrls[] = { 159static struct v4l2_ctrl_config si476x_ctrls[] = {
160 160
161 /** 161 /*
162 * SI476X during its station seeking(or tuning) process uses several 162 * SI476X during its station seeking(or tuning) process uses several
163 * parameters to detrmine if "the station" is valid: 163 * parameters to detrmine if "the station" is valid:
164 * 164 *
@@ -197,7 +197,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
197 .step = 2, 197 .step = 2,
198 }, 198 },
199 199
200 /** 200 /*
201 * #V4L2_CID_SI476X_HARMONICS_COUNT -- number of harmonics 201 * #V4L2_CID_SI476X_HARMONICS_COUNT -- number of harmonics
202 * built-in power-line noise supression filter is to reject 202 * built-in power-line noise supression filter is to reject
203 * during AM-mode operation. 203 * during AM-mode operation.
@@ -213,7 +213,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
213 .step = 1, 213 .step = 1,
214 }, 214 },
215 215
216 /** 216 /*
217 * #V4L2_CID_SI476X_DIVERSITY_MODE -- configuration which 217 * #V4L2_CID_SI476X_DIVERSITY_MODE -- configuration which
218 * two tuners working in diversity mode are to work in. 218 * two tuners working in diversity mode are to work in.
219 * 219 *
@@ -237,7 +237,7 @@ static struct v4l2_ctrl_config si476x_ctrls[] = {
237 .max = ARRAY_SIZE(phase_diversity_modes) - 1, 237 .max = ARRAY_SIZE(phase_diversity_modes) - 1,
238 }, 238 },
239 239
240 /** 240 /*
241 * #V4L2_CID_SI476X_INTERCHIP_LINK -- inter-chip link in 241 * #V4L2_CID_SI476X_INTERCHIP_LINK -- inter-chip link in
242 * diversity mode indicator. Allows user to determine if two 242 * diversity mode indicator. Allows user to determine if two
243 * chips working in diversity mode have established a link 243 * chips working in diversity mode have established a link
@@ -296,11 +296,15 @@ struct si476x_radio_ops {
296/** 296/**
297 * struct si476x_radio - radio device 297 * struct si476x_radio - radio device
298 * 298 *
299 * @core: Pointer to underlying core device 299 * @v4l2dev: Pointer to V4L2 device created by V4L2 subsystem
300 * @videodev: Pointer to video device created by V4L2 subsystem 300 * @videodev: Pointer to video device created by V4L2 subsystem
301 * @ctrl_handler: V4L2 controls handler
302 * @core: Pointer to underlying core device
301 * @ops: Vtable of functions. See struct si476x_radio_ops for details 303 * @ops: Vtable of functions. See struct si476x_radio_ops for details
302 * @kref: Reference counter 304 * @debugfs: pointer to &strucd dentry for debugfs
303 * @core_lock: An r/w semaphore to brebvent the deletion of underlying 305 * @audmode: audio mode, as defined for the rxsubchans field
306 * at videodev2.h
307 *
304 * core structure is the radio device is being used 308 * core structure is the radio device is being used
305 */ 309 */
306struct si476x_radio { 310struct si476x_radio {
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 903fcd5e99c0..3cbdc085c65d 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1330,7 +1330,7 @@ static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
1330 1330
1331/** 1331/**
1332 * wl1273_fm_set_tx_power() - Set the transmission power value. 1332 * wl1273_fm_set_tx_power() - Set the transmission power value.
1333 * @core: A pointer to the device struct. 1333 * @radio: A pointer to the device struct.
1334 * @power: The new power value. 1334 * @power: The new power value.
1335 */ 1335 */
1336static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power) 1336static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index f54bc5d23893..ec4ded84cd17 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -339,7 +339,7 @@ static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
339/** 339/**
340 * img_ir_decoder_convert() - Generate internal timings in decoder. 340 * img_ir_decoder_convert() - Generate internal timings in decoder.
341 * @decoder: Decoder to be converted to internal timings. 341 * @decoder: Decoder to be converted to internal timings.
342 * @timings: Timing register values. 342 * @reg_timings: Timing register values.
343 * @clock_hz: IR clock rate in Hz. 343 * @clock_hz: IR clock rate in Hz.
344 * 344 *
345 * Fills out the repeat timings and timing register values for a specific clock 345 * Fills out the repeat timings and timing register values for a specific clock
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index b25b35b3f6da..eb943e862515 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -492,7 +492,7 @@ static void free_imon_context(struct imon_context *ictx)
492 dev_dbg(dev, "%s: iMON context freed\n", __func__); 492 dev_dbg(dev, "%s: iMON context freed\n", __func__);
493} 493}
494 494
495/** 495/*
496 * Called when the Display device (e.g. /dev/lcd0) 496 * Called when the Display device (e.g. /dev/lcd0)
497 * is opened by the application. 497 * is opened by the application.
498 */ 498 */
@@ -542,7 +542,7 @@ exit:
542 return retval; 542 return retval;
543} 543}
544 544
545/** 545/*
546 * Called when the display device (e.g. /dev/lcd0) 546 * Called when the display device (e.g. /dev/lcd0)
547 * is closed by the application. 547 * is closed by the application.
548 */ 548 */
@@ -575,7 +575,7 @@ static int display_close(struct inode *inode, struct file *file)
575 return retval; 575 return retval;
576} 576}
577 577
578/** 578/*
579 * Sends a packet to the device -- this function must be called with 579 * Sends a packet to the device -- this function must be called with
580 * ictx->lock held, or its unlock/lock sequence while waiting for tx 580 * ictx->lock held, or its unlock/lock sequence while waiting for tx
581 * to complete can/will lead to a deadlock. 581 * to complete can/will lead to a deadlock.
@@ -664,7 +664,7 @@ static int send_packet(struct imon_context *ictx)
664 return retval; 664 return retval;
665} 665}
666 666
667/** 667/*
668 * Sends an associate packet to the iMON 2.4G. 668 * Sends an associate packet to the iMON 2.4G.
669 * 669 *
670 * This might not be such a good idea, since it has an id collision with 670 * This might not be such a good idea, since it has an id collision with
@@ -694,7 +694,7 @@ static int send_associate_24g(struct imon_context *ictx)
694 return retval; 694 return retval;
695} 695}
696 696
697/** 697/*
698 * Sends packets to setup and show clock on iMON display 698 * Sends packets to setup and show clock on iMON display
699 * 699 *
700 * Arguments: year - last 2 digits of year, month - 1..12, 700 * Arguments: year - last 2 digits of year, month - 1..12,
@@ -781,7 +781,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
781 return retval; 781 return retval;
782} 782}
783 783
784/** 784/*
785 * These are the sysfs functions to handle the association on the iMON 2.4G LT. 785 * These are the sysfs functions to handle the association on the iMON 2.4G LT.
786 */ 786 */
787static ssize_t show_associate_remote(struct device *d, 787static ssize_t show_associate_remote(struct device *d,
@@ -823,7 +823,7 @@ static ssize_t store_associate_remote(struct device *d,
823 return count; 823 return count;
824} 824}
825 825
826/** 826/*
827 * sysfs functions to control internal imon clock 827 * sysfs functions to control internal imon clock
828 */ 828 */
829static ssize_t show_imon_clock(struct device *d, 829static ssize_t show_imon_clock(struct device *d,
@@ -923,7 +923,7 @@ static const struct attribute_group imon_rf_attr_group = {
923 .attrs = imon_rf_sysfs_entries 923 .attrs = imon_rf_sysfs_entries
924}; 924};
925 925
926/** 926/*
927 * Writes data to the VFD. The iMON VFD is 2x16 characters 927 * Writes data to the VFD. The iMON VFD is 2x16 characters
928 * and requires data in 5 consecutive USB interrupt packets, 928 * and requires data in 5 consecutive USB interrupt packets,
929 * each packet but the last carrying 7 bytes. 929 * each packet but the last carrying 7 bytes.
@@ -1008,7 +1008,7 @@ exit:
1008 return (!retval) ? n_bytes : retval; 1008 return (!retval) ? n_bytes : retval;
1009} 1009}
1010 1010
1011/** 1011/*
1012 * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte 1012 * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
1013 * packets. We accept data as 16 hexadecimal digits, followed by a 1013 * packets. We accept data as 16 hexadecimal digits, followed by a
1014 * newline (to make it easy to drive the device from a command-line 1014 * newline (to make it easy to drive the device from a command-line
@@ -1066,7 +1066,7 @@ exit:
1066 return (!retval) ? n_bytes : retval; 1066 return (!retval) ? n_bytes : retval;
1067} 1067}
1068 1068
1069/** 1069/*
1070 * Callback function for USB core API: transmit data 1070 * Callback function for USB core API: transmit data
1071 */ 1071 */
1072static void usb_tx_callback(struct urb *urb) 1072static void usb_tx_callback(struct urb *urb)
@@ -1087,7 +1087,7 @@ static void usb_tx_callback(struct urb *urb)
1087 complete(&ictx->tx.finished); 1087 complete(&ictx->tx.finished);
1088} 1088}
1089 1089
1090/** 1090/*
1091 * report touchscreen input 1091 * report touchscreen input
1092 */ 1092 */
1093static void imon_touch_display_timeout(struct timer_list *t) 1093static void imon_touch_display_timeout(struct timer_list *t)
@@ -1103,7 +1103,7 @@ static void imon_touch_display_timeout(struct timer_list *t)
1103 input_sync(ictx->touch); 1103 input_sync(ictx->touch);
1104} 1104}
1105 1105
1106/** 1106/*
1107 * iMON IR receivers support two different signal sets -- those used by 1107 * iMON IR receivers support two different signal sets -- those used by
1108 * the iMON remotes, and those used by the Windows MCE remotes (which is 1108 * the iMON remotes, and those used by the Windows MCE remotes (which is
1109 * really just RC-6), but only one or the other at a time, as the signals 1109 * really just RC-6), but only one or the other at a time, as the signals
@@ -1191,7 +1191,7 @@ static inline int tv2int(const struct timeval *a, const struct timeval *b)
1191 return sec; 1191 return sec;
1192} 1192}
1193 1193
1194/** 1194/*
1195 * The directional pad behaves a bit differently, depending on whether this is 1195 * The directional pad behaves a bit differently, depending on whether this is
1196 * one of the older ffdc devices or a newer device. Newer devices appear to 1196 * one of the older ffdc devices or a newer device. Newer devices appear to
1197 * have a higher resolution matrix for more precise mouse movement, but it 1197 * have a higher resolution matrix for more precise mouse movement, but it
@@ -1543,7 +1543,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
1543 } 1543 }
1544} 1544}
1545 1545
1546/** 1546/*
1547 * figure out if these is a press or a release. We don't actually 1547 * figure out if these is a press or a release. We don't actually
1548 * care about repeats, as those will be auto-generated within the IR 1548 * care about repeats, as those will be auto-generated within the IR
1549 * subsystem for repeating scancodes. 1549 * subsystem for repeating scancodes.
@@ -1592,10 +1592,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
1592 return press_type; 1592 return press_type;
1593} 1593}
1594 1594
1595/** 1595/*
1596 * Process the incoming packet 1596 * Process the incoming packet
1597 */ 1597 */
1598/** 1598/*
1599 * Convert bit count to time duration (in us) and submit 1599 * Convert bit count to time duration (in us) and submit
1600 * the value to lirc_dev. 1600 * the value to lirc_dev.
1601 */ 1601 */
@@ -1608,7 +1608,7 @@ static void submit_data(struct imon_context *context)
1608 ir_raw_event_store_with_filter(context->rdev, &ev); 1608 ir_raw_event_store_with_filter(context->rdev, &ev);
1609} 1609}
1610 1610
1611/** 1611/*
1612 * Process the incoming packet 1612 * Process the incoming packet
1613 */ 1613 */
1614static void imon_incoming_ir_raw(struct imon_context *context, 1614static void imon_incoming_ir_raw(struct imon_context *context,
@@ -1831,7 +1831,7 @@ not_input_data:
1831 } 1831 }
1832} 1832}
1833 1833
1834/** 1834/*
1835 * Callback function for USB core API: receive data 1835 * Callback function for USB core API: receive data
1836 */ 1836 */
1837static void usb_rx_callback_intf0(struct urb *urb) 1837static void usb_rx_callback_intf0(struct urb *urb)
@@ -2485,7 +2485,7 @@ static void imon_init_display(struct imon_context *ictx,
2485 2485
2486} 2486}
2487 2487
2488/** 2488/*
2489 * Callback function for USB core API: Probe 2489 * Callback function for USB core API: Probe
2490 */ 2490 */
2491static int imon_probe(struct usb_interface *interface, 2491static int imon_probe(struct usb_interface *interface,
@@ -2583,7 +2583,7 @@ fail:
2583 return ret; 2583 return ret;
2584} 2584}
2585 2585
2586/** 2586/*
2587 * Callback function for USB core API: disconnect 2587 * Callback function for USB core API: disconnect
2588 */ 2588 */
2589static void imon_disconnect(struct usb_interface *interface) 2589static void imon_disconnect(struct usb_interface *interface)
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index e2bd68c42edf..22c8aee3df4f 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -39,7 +39,7 @@ enum jvc_state {
39/** 39/**
40 * ir_jvc_decode() - Decode one JVC pulse or space 40 * ir_jvc_decode() - Decode one JVC pulse or space
41 * @dev: the struct rc_dev descriptor of the device 41 * @dev: the struct rc_dev descriptor of the device
42 * @duration: the struct ir_raw_event descriptor of the pulse/space 42 * @ev: the struct ir_raw_event descriptor of the pulse/space
43 * 43 *
44 * This function returns -EINVAL if the pulse violates the state machine 44 * This function returns -EINVAL if the pulse violates the state machine
45 */ 45 */
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 8f2f37412fc5..4fd4521693d9 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -25,8 +25,8 @@
25/** 25/**
26 * ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the 26 * ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the
27 * lircd userspace daemon for decoding. 27 * lircd userspace daemon for decoding.
28 * @input_dev: the struct rc_dev descriptor of the device 28 * @dev: the struct rc_dev descriptor of the device
29 * @duration: the struct ir_raw_event descriptor of the pulse/space 29 * @ev: the struct ir_raw_event descriptor of the pulse/space
30 * 30 *
31 * This function returns -EINVAL if the lirc interfaces aren't wired up. 31 * This function returns -EINVAL if the lirc interfaces aren't wired up.
32 */ 32 */
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index a95d09acc22a..6880c190dcd2 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -41,7 +41,7 @@ enum nec_state {
41/** 41/**
42 * ir_nec_decode() - Decode one NEC pulse or space 42 * ir_nec_decode() - Decode one NEC pulse or space
43 * @dev: the struct rc_dev descriptor of the device 43 * @dev: the struct rc_dev descriptor of the device
44 * @duration: the struct ir_raw_event descriptor of the pulse/space 44 * @ev: the struct ir_raw_event descriptor of the pulse/space
45 * 45 *
46 * This function returns -EINVAL if the pulse violates the state machine 46 * This function returns -EINVAL if the pulse violates the state machine
47 */ 47 */
@@ -183,7 +183,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
183 * ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation. 183 * ir_nec_scancode_to_raw() - encode an NEC scancode ready for modulation.
184 * @protocol: specific protocol to use 184 * @protocol: specific protocol to use
185 * @scancode: a single NEC scancode. 185 * @scancode: a single NEC scancode.
186 * @raw: raw data to be modulated.
187 */ 186 */
188static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode) 187static u32 ir_nec_scancode_to_raw(enum rc_proto protocol, u32 scancode)
189{ 188{
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 758c60956850..d94e07b02f3b 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -48,7 +48,7 @@ enum sanyo_state {
48/** 48/**
49 * ir_sanyo_decode() - Decode one SANYO pulse or space 49 * ir_sanyo_decode() - Decode one SANYO pulse or space
50 * @dev: the struct rc_dev descriptor of the device 50 * @dev: the struct rc_dev descriptor of the device
51 * @duration: the struct ir_raw_event descriptor of the pulse/space 51 * @ev: the struct ir_raw_event descriptor of the pulse/space
52 * 52 *
53 * This function returns -EINVAL if the pulse violates the state machine 53 * This function returns -EINVAL if the pulse violates the state machine
54 */ 54 */
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 129b558acc92..7140dd6160ee 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -39,7 +39,7 @@ enum sharp_state {
39/** 39/**
40 * ir_sharp_decode() - Decode one Sharp pulse or space 40 * ir_sharp_decode() - Decode one Sharp pulse or space
41 * @dev: the struct rc_dev descriptor of the device 41 * @dev: the struct rc_dev descriptor of the device
42 * @duration: the struct ir_raw_event descriptor of the pulse/space 42 * @ev: the struct ir_raw_event descriptor of the pulse/space
43 * 43 *
44 * This function returns -EINVAL if the pulse violates the state machine 44 * This function returns -EINVAL if the pulse violates the state machine
45 */ 45 */
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index 6f464be1c8d7..712bc6d76e92 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -35,7 +35,7 @@ enum xmp_state {
35/** 35/**
36 * ir_xmp_decode() - Decode one XMP pulse or space 36 * ir_xmp_decode() - Decode one XMP pulse or space
37 * @dev: the struct rc_dev descriptor of the device 37 * @dev: the struct rc_dev descriptor of the device
38 * @duration: the struct ir_raw_event descriptor of the pulse/space 38 * @ev: the struct ir_raw_event descriptor of the pulse/space
39 * 39 *
40 * This function returns -EINVAL if the pulse violates the state machine 40 * This function returns -EINVAL if the pulse violates the state machine
41 */ 41 */
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index f6e5ba4fbb49..d78483a504c9 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
128/** 128/**
129 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing 129 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
130 * @dev: the struct rc_dev device descriptor 130 * @dev: the struct rc_dev device descriptor
131 * @type: the type of the event that has occurred 131 * @ev: the event that has occurred
132 * 132 *
133 * This routine (which may be called from an interrupt context) works 133 * This routine (which may be called from an interrupt context) works
134 * in similar manner to ir_raw_event_store_edge. 134 * in similar manner to ir_raw_event_store_edge.
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 17950e29d4e3..c144b77eac98 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -39,41 +39,41 @@ static const struct {
39 [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 }, 39 [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
40 [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 }, 40 [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
41 [RC_PROTO_RC5] = { .name = "rc-5", 41 [RC_PROTO_RC5] = { .name = "rc-5",
42 .scancode_bits = 0x1f7f, .repeat_period = 164 }, 42 .scancode_bits = 0x1f7f, .repeat_period = 250 },
43 [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", 43 [RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
44 .scancode_bits = 0x1f7f3f, .repeat_period = 164 }, 44 .scancode_bits = 0x1f7f3f, .repeat_period = 250 },
45 [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", 45 [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
46 .scancode_bits = 0x2fff, .repeat_period = 164 }, 46 .scancode_bits = 0x2fff, .repeat_period = 250 },
47 [RC_PROTO_JVC] = { .name = "jvc", 47 [RC_PROTO_JVC] = { .name = "jvc",
48 .scancode_bits = 0xffff, .repeat_period = 250 }, 48 .scancode_bits = 0xffff, .repeat_period = 250 },
49 [RC_PROTO_SONY12] = { .name = "sony-12", 49 [RC_PROTO_SONY12] = { .name = "sony-12",
50 .scancode_bits = 0x1f007f, .repeat_period = 100 }, 50 .scancode_bits = 0x1f007f, .repeat_period = 250 },
51 [RC_PROTO_SONY15] = { .name = "sony-15", 51 [RC_PROTO_SONY15] = { .name = "sony-15",
52 .scancode_bits = 0xff007f, .repeat_period = 100 }, 52 .scancode_bits = 0xff007f, .repeat_period = 250 },
53 [RC_PROTO_SONY20] = { .name = "sony-20", 53 [RC_PROTO_SONY20] = { .name = "sony-20",
54 .scancode_bits = 0x1fff7f, .repeat_period = 100 }, 54 .scancode_bits = 0x1fff7f, .repeat_period = 250 },
55 [RC_PROTO_NEC] = { .name = "nec", 55 [RC_PROTO_NEC] = { .name = "nec",
56 .scancode_bits = 0xffff, .repeat_period = 160 }, 56 .scancode_bits = 0xffff, .repeat_period = 250 },
57 [RC_PROTO_NECX] = { .name = "nec-x", 57 [RC_PROTO_NECX] = { .name = "nec-x",
58 .scancode_bits = 0xffffff, .repeat_period = 160 }, 58 .scancode_bits = 0xffffff, .repeat_period = 250 },
59 [RC_PROTO_NEC32] = { .name = "nec-32", 59 [RC_PROTO_NEC32] = { .name = "nec-32",
60 .scancode_bits = 0xffffffff, .repeat_period = 160 }, 60 .scancode_bits = 0xffffffff, .repeat_period = 250 },
61 [RC_PROTO_SANYO] = { .name = "sanyo", 61 [RC_PROTO_SANYO] = { .name = "sanyo",
62 .scancode_bits = 0x1fffff, .repeat_period = 250 }, 62 .scancode_bits = 0x1fffff, .repeat_period = 250 },
63 [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", 63 [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
64 .scancode_bits = 0xffff, .repeat_period = 150 }, 64 .scancode_bits = 0xffff, .repeat_period = 250 },
65 [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", 65 [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
66 .scancode_bits = 0x1fffff, .repeat_period = 150 }, 66 .scancode_bits = 0x1fffff, .repeat_period = 250 },
67 [RC_PROTO_RC6_0] = { .name = "rc-6-0", 67 [RC_PROTO_RC6_0] = { .name = "rc-6-0",
68 .scancode_bits = 0xffff, .repeat_period = 164 }, 68 .scancode_bits = 0xffff, .repeat_period = 250 },
69 [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", 69 [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
70 .scancode_bits = 0xfffff, .repeat_period = 164 }, 70 .scancode_bits = 0xfffff, .repeat_period = 250 },
71 [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", 71 [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
72 .scancode_bits = 0xffffff, .repeat_period = 164 }, 72 .scancode_bits = 0xffffff, .repeat_period = 250 },
73 [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", 73 [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
74 .scancode_bits = 0xffffffff, .repeat_period = 164 }, 74 .scancode_bits = 0xffffffff, .repeat_period = 250 },
75 [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", 75 [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
76 .scancode_bits = 0xffff7fff, .repeat_period = 164 }, 76 .scancode_bits = 0xffff7fff, .repeat_period = 250 },
77 [RC_PROTO_SHARP] = { .name = "sharp", 77 [RC_PROTO_SHARP] = { .name = "sharp",
78 .scancode_bits = 0x1fff, .repeat_period = 250 }, 78 .scancode_bits = 0x1fff, .repeat_period = 250 },
79 [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 }, 79 [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
@@ -170,10 +170,11 @@ static struct rc_map_list empty_map = {
170 * @name: name to assign to the table 170 * @name: name to assign to the table
171 * @rc_proto: ir type to assign to the new table 171 * @rc_proto: ir type to assign to the new table
172 * @size: initial size of the table 172 * @size: initial size of the table
173 * @return: zero on success or a negative error code
174 * 173 *
175 * This routine will initialize the rc_map and will allocate 174 * This routine will initialize the rc_map and will allocate
176 * memory to hold at least the specified number of elements. 175 * memory to hold at least the specified number of elements.
176 *
177 * return: zero on success or a negative error code
177 */ 178 */
178static int ir_create_table(struct rc_map *rc_map, 179static int ir_create_table(struct rc_map *rc_map,
179 const char *name, u64 rc_proto, size_t size) 180 const char *name, u64 rc_proto, size_t size)
@@ -216,10 +217,11 @@ static void ir_free_table(struct rc_map *rc_map)
216 * ir_resize_table() - resizes a scancode table if necessary 217 * ir_resize_table() - resizes a scancode table if necessary
217 * @rc_map: the rc_map to resize 218 * @rc_map: the rc_map to resize
218 * @gfp_flags: gfp flags to use when allocating memory 219 * @gfp_flags: gfp flags to use when allocating memory
219 * @return: zero on success or a negative error code
220 * 220 *
221 * This routine will shrink the rc_map if it has lots of 221 * This routine will shrink the rc_map if it has lots of
222 * unused entries and grow it if it is full. 222 * unused entries and grow it if it is full.
223 *
224 * return: zero on success or a negative error code
223 */ 225 */
224static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags) 226static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
225{ 227{
@@ -265,11 +267,13 @@ static int ir_resize_table(struct rc_map *rc_map, gfp_t gfp_flags)
265 * @dev: the struct rc_dev device descriptor 267 * @dev: the struct rc_dev device descriptor
266 * @rc_map: scancode table to be adjusted 268 * @rc_map: scancode table to be adjusted
267 * @index: index of the mapping that needs to be updated 269 * @index: index of the mapping that needs to be updated
268 * @keycode: the desired keycode 270 * @new_keycode: the desired keycode
269 * @return: previous keycode assigned to the mapping
270 * 271 *
271 * This routine is used to update scancode->keycode mapping at given 272 * This routine is used to update scancode->keycode mapping at given
272 * position. 273 * position.
274 *
275 * return: previous keycode assigned to the mapping
276 *
273 */ 277 */
274static unsigned int ir_update_mapping(struct rc_dev *dev, 278static unsigned int ir_update_mapping(struct rc_dev *dev,
275 struct rc_map *rc_map, 279 struct rc_map *rc_map,
@@ -320,12 +324,13 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
320 * @scancode: the desired scancode 324 * @scancode: the desired scancode
321 * @resize: controls whether we allowed to resize the table to 325 * @resize: controls whether we allowed to resize the table to
322 * accommodate not yet present scancodes 326 * accommodate not yet present scancodes
323 * @return: index of the mapping containing scancode in question
324 * or -1U in case of failure.
325 * 327 *
326 * This routine is used to locate given scancode in rc_map. 328 * This routine is used to locate given scancode in rc_map.
327 * If scancode is not yet present the routine will allocate a new slot 329 * If scancode is not yet present the routine will allocate a new slot
328 * for it. 330 * for it.
331 *
332 * return: index of the mapping containing scancode in question
333 * or -1U in case of failure.
329 */ 334 */
330static unsigned int ir_establish_scancode(struct rc_dev *dev, 335static unsigned int ir_establish_scancode(struct rc_dev *dev,
331 struct rc_map *rc_map, 336 struct rc_map *rc_map,
@@ -375,11 +380,12 @@ static unsigned int ir_establish_scancode(struct rc_dev *dev,
375/** 380/**
376 * ir_setkeycode() - set a keycode in the scancode->keycode table 381 * ir_setkeycode() - set a keycode in the scancode->keycode table
377 * @idev: the struct input_dev device descriptor 382 * @idev: the struct input_dev device descriptor
378 * @scancode: the desired scancode 383 * @ke: Input keymap entry
379 * @keycode: result 384 * @old_keycode: result
380 * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
381 * 385 *
382 * This routine is used to handle evdev EVIOCSKEY ioctl. 386 * This routine is used to handle evdev EVIOCSKEY ioctl.
387 *
388 * return: -EINVAL if the keycode could not be inserted, otherwise zero.
383 */ 389 */
384static int ir_setkeycode(struct input_dev *idev, 390static int ir_setkeycode(struct input_dev *idev,
385 const struct input_keymap_entry *ke, 391 const struct input_keymap_entry *ke,
@@ -422,11 +428,11 @@ out:
422/** 428/**
423 * ir_setkeytable() - sets several entries in the scancode->keycode table 429 * ir_setkeytable() - sets several entries in the scancode->keycode table
424 * @dev: the struct rc_dev device descriptor 430 * @dev: the struct rc_dev device descriptor
425 * @to: the struct rc_map to copy entries to
426 * @from: the struct rc_map to copy entries from 431 * @from: the struct rc_map to copy entries from
427 * @return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
428 * 432 *
429 * This routine is used to handle table initialization. 433 * This routine is used to handle table initialization.
434 *
435 * return: -ENOMEM if all keycodes could not be inserted, otherwise zero.
430 */ 436 */
431static int ir_setkeytable(struct rc_dev *dev, 437static int ir_setkeytable(struct rc_dev *dev,
432 const struct rc_map *from) 438 const struct rc_map *from)
@@ -474,10 +480,11 @@ static int rc_map_cmp(const void *key, const void *elt)
474 * ir_lookup_by_scancode() - locate mapping by scancode 480 * ir_lookup_by_scancode() - locate mapping by scancode
475 * @rc_map: the struct rc_map to search 481 * @rc_map: the struct rc_map to search
476 * @scancode: scancode to look for in the table 482 * @scancode: scancode to look for in the table
477 * @return: index in the table, -1U if not found
478 * 483 *
479 * This routine performs binary search in RC keykeymap table for 484 * This routine performs binary search in RC keykeymap table for
480 * given scancode. 485 * given scancode.
486 *
487 * return: index in the table, -1U if not found
481 */ 488 */
482static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map, 489static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
483 unsigned int scancode) 490 unsigned int scancode)
@@ -495,11 +502,11 @@ static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
495/** 502/**
496 * ir_getkeycode() - get a keycode from the scancode->keycode table 503 * ir_getkeycode() - get a keycode from the scancode->keycode table
497 * @idev: the struct input_dev device descriptor 504 * @idev: the struct input_dev device descriptor
498 * @scancode: the desired scancode 505 * @ke: Input keymap entry
499 * @keycode: used to return the keycode, if found, or KEY_RESERVED
500 * @return: always returns zero.
501 * 506 *
502 * This routine is used to handle evdev EVIOCGKEY ioctl. 507 * This routine is used to handle evdev EVIOCGKEY ioctl.
508 *
509 * return: always returns zero.
503 */ 510 */
504static int ir_getkeycode(struct input_dev *idev, 511static int ir_getkeycode(struct input_dev *idev,
505 struct input_keymap_entry *ke) 512 struct input_keymap_entry *ke)
@@ -556,11 +563,12 @@ out:
556 * rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode 563 * rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode
557 * @dev: the struct rc_dev descriptor of the device 564 * @dev: the struct rc_dev descriptor of the device
558 * @scancode: the scancode to look for 565 * @scancode: the scancode to look for
559 * @return: the corresponding keycode, or KEY_RESERVED
560 * 566 *
561 * This routine is used by drivers which need to convert a scancode to a 567 * This routine is used by drivers which need to convert a scancode to a
562 * keycode. Normally it should not be used since drivers should have no 568 * keycode. Normally it should not be used since drivers should have no
563 * interest in keycodes. 569 * interest in keycodes.
570 *
571 * return: the corresponding keycode, or KEY_RESERVED
564 */ 572 */
565u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode) 573u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode)
566{ 574{
@@ -625,7 +633,8 @@ EXPORT_SYMBOL_GPL(rc_keyup);
625 633
626/** 634/**
627 * ir_timer_keyup() - generates a keyup event after a timeout 635 * ir_timer_keyup() - generates a keyup event after a timeout
628 * @cookie: a pointer to the struct rc_dev for the device 636 *
637 * @t: a pointer to the struct timer_list
629 * 638 *
630 * This routine will generate a keyup event some time after a keydown event 639 * This routine will generate a keyup event some time after a keydown event
631 * is generated when no further activity has been detected. 640 * is generated when no further activity has been detected.
@@ -780,7 +789,8 @@ EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
780 * provides sensible defaults 789 * provides sensible defaults
781 * @dev: the struct rc_dev descriptor of the device 790 * @dev: the struct rc_dev descriptor of the device
782 * @filter: the scancode and mask 791 * @filter: the scancode and mask
783 * @return: 0 or -EINVAL if the filter is not valid 792 *
793 * return: 0 or -EINVAL if the filter is not valid
784 */ 794 */
785static int rc_validate_filter(struct rc_dev *dev, 795static int rc_validate_filter(struct rc_dev *dev,
786 struct rc_scancode_filter *filter) 796 struct rc_scancode_filter *filter)
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index 76120664b700..9ee2c9196b4d 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
57static irqreturn_t sir_interrupt(int irq, void *dev_id); 57static irqreturn_t sir_interrupt(int irq, void *dev_id);
58static void send_space(unsigned long len); 58static void send_space(unsigned long len);
59static void send_pulse(unsigned long len); 59static void send_pulse(unsigned long len);
60static void init_hardware(void); 60static int init_hardware(void);
61static void drop_hardware(void); 61static void drop_hardware(void);
62/* Initialisation */ 62/* Initialisation */
63 63
@@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
263 } 263 }
264} 264}
265 265
266static void init_hardware(void) 266static int init_hardware(void)
267{ 267{
268 u8 scratch, scratch2, scratch3;
268 unsigned long flags; 269 unsigned long flags;
269 270
270 spin_lock_irqsave(&hardware_lock, flags); 271 spin_lock_irqsave(&hardware_lock, flags);
272
273 /*
274 * This is a simple port existence test, borrowed from the autoconfig
275 * function in drivers/tty/serial/8250/8250_port.c
276 */
277 scratch = sinp(UART_IER);
278 soutp(UART_IER, 0);
279#ifdef __i386__
280 outb(0xff, 0x080);
281#endif
282 scratch2 = sinp(UART_IER) & 0x0f;
283 soutp(UART_IER, 0x0f);
284#ifdef __i386__
285 outb(0x00, 0x080);
286#endif
287 scratch3 = sinp(UART_IER) & 0x0f;
288 soutp(UART_IER, scratch);
289 if (scratch2 != 0 || scratch3 != 0x0f) {
290 /* we fail, there's nothing here */
291 spin_unlock_irqrestore(&hardware_lock, flags);
292 pr_err("port existence test failed, cannot continue\n");
293 return -ENODEV;
294 }
295
271 /* reset UART */ 296 /* reset UART */
272 outb(0, io + UART_MCR); 297 outb(0, io + UART_MCR);
273 outb(0, io + UART_IER); 298 outb(0, io + UART_IER);
@@ -285,6 +310,8 @@ static void init_hardware(void)
285 /* turn on UART */ 310 /* turn on UART */
286 outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR); 311 outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
287 spin_unlock_irqrestore(&hardware_lock, flags); 312 spin_unlock_irqrestore(&hardware_lock, flags);
313
314 return 0;
288} 315}
289 316
290static void drop_hardware(void) 317static void drop_hardware(void)
@@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
334 pr_err("IRQ %d already in use.\n", irq); 361 pr_err("IRQ %d already in use.\n", irq);
335 return retval; 362 return retval;
336 } 363 }
364
365 retval = init_hardware();
366 if (retval) {
367 del_timer_sync(&timerlist);
368 return retval;
369 }
370
337 pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq); 371 pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
338 372
339 retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev); 373 retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
340 if (retval < 0) 374 if (retval < 0)
341 return retval; 375 return retval;
342 376
343 init_hardware();
344
345 return 0; 377 return 0;
346} 378}
347 379
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index a8e39c635f34..d2efd7b2c3bc 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -49,7 +49,7 @@ struct st_rc_device {
49#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */ 49#define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */
50#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */ 50#define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */
51 51
52/** 52/*
53 * IRQ set: Enable full FIFO 1 -> bit 3; 53 * IRQ set: Enable full FIFO 1 -> bit 3;
54 * Enable overrun IRQ 1 -> bit 2; 54 * Enable overrun IRQ 1 -> bit 2;
55 * Enable last symbol IRQ 1 -> bit 1: 55 * Enable last symbol IRQ 1 -> bit 1:
@@ -72,7 +72,7 @@ static void st_rc_send_lirc_timeout(struct rc_dev *rdev)
72 ir_raw_event_store(rdev, &ev); 72 ir_raw_event_store(rdev, &ev);
73} 73}
74 74
75/** 75/*
76 * RX graphical example to better understand the difference between ST IR block 76 * RX graphical example to better understand the difference between ST IR block
77 * output and standard definition used by LIRC (and most of the world!) 77 * output and standard definition used by LIRC (and most of the world!)
78 * 78 *
@@ -317,7 +317,7 @@ static int st_rc_probe(struct platform_device *pdev)
317 device_init_wakeup(dev, true); 317 device_init_wakeup(dev, true);
318 dev_pm_set_wake_irq(dev, rc_dev->irq); 318 dev_pm_set_wake_irq(dev, rc_dev->irq);
319 319
320 /** 320 /*
321 * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW 321 * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW
322 * lircd expects a long space first before a signal train to sync. 322 * lircd expects a long space first before a signal train to sync.
323 */ 323 */
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 4eebfcfc10f3..c9a70fda88a8 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -191,7 +191,7 @@ static void sz_push_half_space(struct streamzap_ir *sz,
191 sz_push_full_space(sz, value & SZ_SPACE_MASK); 191 sz_push_full_space(sz, value & SZ_SPACE_MASK);
192} 192}
193 193
194/** 194/*
195 * streamzap_callback - usb IRQ handler callback 195 * streamzap_callback - usb IRQ handler callback
196 * 196 *
197 * This procedure is invoked on reception of data from 197 * This procedure is invoked on reception of data from
@@ -321,7 +321,7 @@ out:
321 return NULL; 321 return NULL;
322} 322}
323 323
324/** 324/*
325 * streamzap_probe 325 * streamzap_probe
326 * 326 *
327 * Called by usb-core to associated with a candidate device 327 * Called by usb-core to associated with a candidate device
@@ -450,7 +450,7 @@ free_sz:
450 return retval; 450 return retval;
451} 451}
452 452
453/** 453/*
454 * streamzap_disconnect 454 * streamzap_disconnect
455 * 455 *
456 * Called by the usb core when the device is removed from the system. 456 * Called by the usb core when the device is removed from the system.
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 8b39d8dc97a0..5c87c5c6a455 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1397,9 +1397,9 @@ static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
1397 * risk of overflow. It accurately calculates 1397 * risk of overflow. It accurately calculates
1398 * f_ref * num / denom to within 1 HZ with fixed math. 1398 * f_ref * num / denom to within 1 HZ with fixed math.
1399 * 1399 *
1400 * @num : Fractional portion of the multiplier 1400 * @f_ref: SRO frequency.
1401 * @num: Fractional portion of the multiplier
1401 * @denom: denominator portion of the ratio 1402 * @denom: denominator portion of the ratio
1402 * @f_Ref: SRO frequency.
1403 * 1403 *
1404 * This calculation handles f_ref as two separate 14-bit fields. 1404 * This calculation handles f_ref as two separate 14-bit fields.
1405 * Therefore, a maximum value of 2^28-1 may safely be used for f_ref. 1405 * Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
@@ -1464,8 +1464,6 @@ static u32 MT2063_CalcLO1Mult(u32 *Div,
1464 * @f_LO: desired LO frequency. 1464 * @f_LO: desired LO frequency.
1465 * @f_LO_Step: Minimum step size for the LO (in Hz). 1465 * @f_LO_Step: Minimum step size for the LO (in Hz).
1466 * @f_Ref: SRO frequency. 1466 * @f_Ref: SRO frequency.
1467 * @f_Avoid: Range of PLL frequencies to avoid near
1468 * integer multiples of f_Ref (in Hz).
1469 * 1467 *
1470 * Returns: Recalculated LO frequency. 1468 * Returns: Recalculated LO frequency.
1471 */ 1469 */
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index f9772ad0a2a5..5a2f81311fb7 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -26,7 +26,7 @@
26#include "cinergyT2.h" 26#include "cinergyT2.h"
27 27
28 28
29/** 29/*
30 * convert linux-dvb frontend parameter set into TPS. 30 * convert linux-dvb frontend parameter set into TPS.
31 * See ETSI ETS-300744, section 4.6.2, table 9 for details. 31 * See ETSI ETS-300744, section 4.6.2, table 9 for details.
32 * 32 *
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 92098c1b78e5..366b05529915 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1677,10 +1677,10 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
1679 1679
1680 /** Update PLL if needed ratio **/ 1680 /* Update PLL if needed ratio */
1681 state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0); 1681 state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0);
1682 1682
1683 /** Get optimize PLL ratio to remove spurious **/ 1683 /* Get optimize PLL ratio to remove spurious */
1684 pll_ratio = dib8090_compute_pll_parameters(fe); 1684 pll_ratio = dib8090_compute_pll_parameters(fe);
1685 if (pll_ratio == 17) 1685 if (pll_ratio == 17)
1686 timf = 21387946; 1686 timf = 21387946;
@@ -1691,7 +1691,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
1691 else 1691 else
1692 timf = 18179756; 1692 timf = 18179756;
1693 1693
1694 /** Update ratio **/ 1694 /* Update ratio */
1695 state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio); 1695 state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio);
1696 1696
1697 state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf); 1697 state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf);
@@ -3357,7 +3357,7 @@ static int novatd_sleep_override(struct dvb_frontend* fe)
3357 return state->sleep(fe); 3357 return state->sleep(fe);
3358} 3358}
3359 3359
3360/** 3360/*
3361 * novatd_frontend_attach - Nova-TD specific attach 3361 * novatd_frontend_attach - Nova-TD specific attach
3362 * 3362 *
3363 * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for 3363 * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 8207e6900656..bcacb0f22028 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
223 223
224int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) 224int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
225{ 225{
226 u8 wbuf[1] = { offs }; 226 u8 *buf;
227 return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1); 227 int rc;
228
229 buf = kmalloc(2, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232
233 buf[0] = offs;
234
235 rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
236 *val = buf[1];
237 kfree(buf);
238
239 return rc;
228} 240}
229EXPORT_SYMBOL(dibusb_read_eeprom_byte); 241EXPORT_SYMBOL(dibusb_read_eeprom_byte);
230 242
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 41261317bd5c..b6046e0e07f6 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -297,7 +297,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
297} 297}
298 298
299 299
300/** 300/*
301 * (reg, val) commad list to initialize this module. 301 * (reg, val) commad list to initialize this module.
302 * captured on a Windows box. 302 * captured on a Windows box.
303 */ 303 */
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
index 62abe6c43a32..16875945e662 100644
--- a/drivers/media/usb/dvb-usb/friio.c
+++ b/drivers/media/usb/dvb-usb/friio.c
@@ -21,7 +21,7 @@ MODULE_PARM_DESC(debug,
21 21
22DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 22DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
23 23
24/** 24/*
25 * Indirect I2C access to the PLL via FE. 25 * Indirect I2C access to the PLL via FE.
26 * whole I2C protocol data to the PLL is sent via the FE's I2C register. 26 * whole I2C protocol data to the PLL is sent via the FE's I2C register.
27 * This is done by a control msg to the FE with the I2C data accompanied, and 27 * This is done by a control msg to the FE with the I2C data accompanied, and
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index f1537daf4e2e..1b30434b72ef 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -1,4 +1,4 @@
1/** 1/*
2 * OV519 driver 2 * OV519 driver
3 * 3 *
4 * Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr> 4 * Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr>
diff --git a/drivers/media/usb/pwc/pwc-dec23.c b/drivers/media/usb/pwc/pwc-dec23.c
index 3792fedff951..1283b3bd9800 100644
--- a/drivers/media/usb/pwc/pwc-dec23.c
+++ b/drivers/media/usb/pwc/pwc-dec23.c
@@ -649,11 +649,10 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
649} 649}
650 650
651/** 651/**
652 *
653 * Uncompress a pwc23 buffer. 652 * Uncompress a pwc23 buffer.
654 * 653 * @pdev: pointer to pwc device's internal struct
655 * src: raw data 654 * @src: raw data
656 * dst: image output 655 * @dst: image output
657 */ 656 */
658void pwc_dec23_decompress(struct pwc_device *pdev, 657void pwc_dec23_decompress(struct pwc_device *pdev,
659 const void *src, 658 const void *src,
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 8c1f926567ec..d07349cf9489 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -74,7 +74,7 @@ struct smsusb_device_t {
74static int smsusb_submit_urb(struct smsusb_device_t *dev, 74static int smsusb_submit_urb(struct smsusb_device_t *dev,
75 struct smsusb_urb_t *surb); 75 struct smsusb_urb_t *surb);
76 76
77/** 77/*
78 * Completing URB's callback handler - bottom half (proccess context) 78 * Completing URB's callback handler - bottom half (proccess context)
79 * submits the URB prepared on smsusb_onresponse() 79 * submits the URB prepared on smsusb_onresponse()
80 */ 80 */
@@ -86,7 +86,7 @@ static void do_submit_urb(struct work_struct *work)
86 smsusb_submit_urb(dev, surb); 86 smsusb_submit_urb(dev, surb);
87} 87}
88 88
89/** 89/*
90 * Completing URB's callback handler - top half (interrupt context) 90 * Completing URB's callback handler - top half (interrupt context)
91 * adds completing sms urb to the global surbs list and activtes the worker 91 * adds completing sms urb to the global surbs list and activtes the worker
92 * thread the surb 92 * thread the surb
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index b842f367249f..a142b9dc0feb 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -76,7 +76,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
76#define TTUSB_REV_2_2 0x22 76#define TTUSB_REV_2_2 0x22
77#define TTUSB_BUDGET_NAME "ttusb_stc_fw" 77#define TTUSB_BUDGET_NAME "ttusb_stc_fw"
78 78
79/** 79/*
80 * since we're casting (struct ttusb*) <-> (struct dvb_demux*) around 80 * since we're casting (struct ttusb*) <-> (struct dvb_demux*) around
81 * the dvb_demux field must be the first in struct!! 81 * the dvb_demux field must be the first in struct!!
82 */ 82 */
@@ -713,7 +713,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
713 } 713 }
714 } 714 }
715 715
716 /** 716 /*
717 * if length is valid and we reached the end: 717 * if length is valid and we reached the end:
718 * goto next muxpack 718 * goto next muxpack
719 */ 719 */
@@ -729,7 +729,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
729 /* maximum bytes, until we know the length */ 729 /* maximum bytes, until we know the length */
730 ttusb->muxpack_len = 2; 730 ttusb->muxpack_len = 2;
731 731
732 /** 732 /*
733 * no muxpacks left? 733 * no muxpacks left?
734 * return to search-sync state 734 * return to search-sync state
735 */ 735 */
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index b55b79b8e921..127f8a0c098b 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
144 144
145static const struct usb_device_id usbtv_id_table[] = { 145static const struct usb_device_id usbtv_id_table[] = {
146 { USB_DEVICE(0x1b71, 0x3002) }, 146 { USB_DEVICE(0x1b71, 0x3002) },
147 { USB_DEVICE(0x1f71, 0x3301) },
147 {} 148 {}
148}; 149};
149MODULE_DEVICE_TABLE(usb, usbtv_id_table); 150MODULE_DEVICE_TABLE(usb, usbtv_id_table);
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 8db45dfc271b..82852f23a3b6 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -239,7 +239,7 @@ static const struct analog_demod_ops tuner_analog_ops = {
239 * @type: type of the tuner (e. g. tuner number) 239 * @type: type of the tuner (e. g. tuner number)
240 * @new_mode_mask: Indicates if tuner supports TV and/or Radio 240 * @new_mode_mask: Indicates if tuner supports TV and/or Radio
241 * @new_config: an optional parameter used by a few tuners to adjust 241 * @new_config: an optional parameter used by a few tuners to adjust
242 internal parameters, like LNA mode 242 * internal parameters, like LNA mode
243 * @tuner_callback: an optional function to be called when switching 243 * @tuner_callback: an optional function to be called when switching
244 * to analog mode 244 * to analog mode
245 * 245 *
@@ -750,6 +750,7 @@ static int tuner_remove(struct i2c_client *client)
750/** 750/**
751 * check_mode - Verify if tuner supports the requested mode 751 * check_mode - Verify if tuner supports the requested mode
752 * @t: a pointer to the module's internal struct_tuner 752 * @t: a pointer to the module's internal struct_tuner
753 * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
753 * 754 *
754 * This function checks if the tuner is capable of tuning analog TV, 755 * This function checks if the tuner is capable of tuning analog TV,
755 * digital TV or radio, depending on what the caller wants. If the 756 * digital TV or radio, depending on what the caller wants. If the
@@ -757,6 +758,7 @@ static int tuner_remove(struct i2c_client *client)
757 * returns 0. 758 * returns 0.
758 * This function is needed for boards that have a separate tuner for 759 * This function is needed for boards that have a separate tuner for
759 * radio (like devices with tea5767). 760 * radio (like devices with tea5767).
761 *
760 * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to 762 * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
761 * select a TV frequency. So, t_mode = T_ANALOG_TV could actually 763 * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
762 * be used to represent a Digital TV too. 764 * be used to represent a Digital TV too.
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index a7c3464976f2..e5acfab470a5 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -558,8 +558,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
558 if (!asd) 558 if (!asd)
559 continue; 559 continue;
560 560
561 ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd, 561 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
562 asd);
563 if (ret) 562 if (ret)
564 goto err_unbind; 563 goto err_unbind;
565 564
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 5c8c49d240d1..930f9c53a64e 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -245,11 +245,11 @@ EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
245 245
246/** 246/**
247 * v4l2_match_dv_timings - check if two timings match 247 * v4l2_match_dv_timings - check if two timings match
248 * @t1 - compare this v4l2_dv_timings struct... 248 * @t1: compare this v4l2_dv_timings struct...
249 * @t2 - with this struct. 249 * @t2: with this struct.
250 * @pclock_delta - the allowed pixelclock deviation. 250 * @pclock_delta: the allowed pixelclock deviation.
251 * @match_reduced_fps - if true, then fail if V4L2_DV_FL_REDUCED_FPS does not 251 * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
252 * match. 252 * match.
253 * 253 *
254 * Compare t1 with t2 with a given margin of error for the pixelclock. 254 * Compare t1 with t2 with a given margin of error for the pixelclock.
255 */ 255 */
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 681b192420d9..fb72c7ac04d4 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -458,11 +458,6 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
458 if (!is_available) 458 if (!is_available)
459 continue; 459 continue;
460 460
461 if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
462 ret = -EINVAL;
463 break;
464 }
465
466 if (has_port) { 461 if (has_port) {
467 struct fwnode_endpoint ep; 462 struct fwnode_endpoint ep;
468 463
@@ -474,6 +469,11 @@ static int __v4l2_async_notifier_parse_fwnode_endpoints(
474 continue; 469 continue;
475 } 470 }
476 471
472 if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
473 ret = -EINVAL;
474 break;
475 }
476
477 ret = v4l2_async_notifier_fwnode_parse_endpoint( 477 ret = v4l2_async_notifier_fwnode_parse_endpoint(
478 dev, notifier, fwnode, asd_struct_size, parse_endpoint); 478 dev, notifier, fwnode, asd_struct_size, parse_endpoint);
479 if (ret < 0) 479 if (ret < 0)
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index f62e68aa04c4..bc580fbe18fa 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -183,6 +183,7 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
183 183
184/** 184/**
185 * v4l2_m2m_try_run() - select next job to perform and run it if possible 185 * v4l2_m2m_try_run() - select next job to perform and run it if possible
186 * @m2m_dev: per-device context
186 * 187 *
187 * Get next transaction (if present) from the waiting jobs list and run it. 188 * Get next transaction (if present) from the waiting jobs list and run it.
188 */ 189 */
@@ -281,6 +282,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
281 282
282/** 283/**
283 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 284 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
285 * @m2m_ctx: m2m context with jobs to be canceled
284 * 286 *
285 * In case of streamoff or release called on any context, 287 * In case of streamoff or release called on any context,
286 * 1] If the context is currently running, then abort job will be called 288 * 1] If the context is currently running, then abort job will be called
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 1dbf6f7785bb..e87fb13b22dc 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -222,7 +222,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
222} 222}
223EXPORT_SYMBOL_GPL(videobuf_queue_is_busy); 223EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
224 224
225/** 225/*
226 * __videobuf_free() - free all the buffers and their control structures 226 * __videobuf_free() - free all the buffers and their control structures
227 * 227 *
228 * This function can only be called if streaming/reading is off, i.e. no buffers 228 * This function can only be called if streaming/reading is off, i.e. no buffers
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index cb115ba6a1d2..a8589d96ef72 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -188,7 +188,7 @@ module_param(debug, int, 0644);
188static void __vb2_queue_cancel(struct vb2_queue *q); 188static void __vb2_queue_cancel(struct vb2_queue *q);
189static void __enqueue_in_driver(struct vb2_buffer *vb); 189static void __enqueue_in_driver(struct vb2_buffer *vb);
190 190
191/** 191/*
192 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 192 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
193 */ 193 */
194static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 194static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
@@ -229,7 +229,7 @@ free:
229 return ret; 229 return ret;
230} 230}
231 231
232/** 232/*
233 * __vb2_buf_mem_free() - free memory of the given buffer 233 * __vb2_buf_mem_free() - free memory of the given buffer
234 */ 234 */
235static void __vb2_buf_mem_free(struct vb2_buffer *vb) 235static void __vb2_buf_mem_free(struct vb2_buffer *vb)
@@ -243,7 +243,7 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
243 } 243 }
244} 244}
245 245
246/** 246/*
247 * __vb2_buf_userptr_put() - release userspace memory associated with 247 * __vb2_buf_userptr_put() - release userspace memory associated with
248 * a USERPTR buffer 248 * a USERPTR buffer
249 */ 249 */
@@ -258,7 +258,7 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
258 } 258 }
259} 259}
260 260
261/** 261/*
262 * __vb2_plane_dmabuf_put() - release memory associated with 262 * __vb2_plane_dmabuf_put() - release memory associated with
263 * a DMABUF shared plane 263 * a DMABUF shared plane
264 */ 264 */
@@ -277,7 +277,7 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
277 p->dbuf_mapped = 0; 277 p->dbuf_mapped = 0;
278} 278}
279 279
280/** 280/*
281 * __vb2_buf_dmabuf_put() - release memory associated with 281 * __vb2_buf_dmabuf_put() - release memory associated with
282 * a DMABUF shared buffer 282 * a DMABUF shared buffer
283 */ 283 */
@@ -289,7 +289,7 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
289 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 289 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
290} 290}
291 291
292/** 292/*
293 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 293 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
294 * the buffer. 294 * the buffer.
295 */ 295 */
@@ -317,7 +317,7 @@ static void __setup_offsets(struct vb2_buffer *vb)
317 } 317 }
318} 318}
319 319
320/** 320/*
321 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) 321 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
322 * video buffer memory for all buffers/planes on the queue and initializes the 322 * video buffer memory for all buffers/planes on the queue and initializes the
323 * queue 323 * queue
@@ -386,7 +386,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
386 return buffer; 386 return buffer;
387} 387}
388 388
389/** 389/*
390 * __vb2_free_mem() - release all video buffer memory for a given queue 390 * __vb2_free_mem() - release all video buffer memory for a given queue
391 */ 391 */
392static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) 392static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
@@ -410,7 +410,7 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
410 } 410 }
411} 411}
412 412
413/** 413/*
414 * __vb2_queue_free() - free buffers at the end of the queue - video memory and 414 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
415 * related information, if no buffers are left return the queue to an 415 * related information, if no buffers are left return the queue to an
416 * uninitialized state. Might be called even if the queue has already been freed. 416 * uninitialized state. Might be called even if the queue has already been freed.
@@ -544,7 +544,7 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
544} 544}
545EXPORT_SYMBOL(vb2_buffer_in_use); 545EXPORT_SYMBOL(vb2_buffer_in_use);
546 546
547/** 547/*
548 * __buffers_in_use() - return true if any buffers on the queue are in use and 548 * __buffers_in_use() - return true if any buffers on the queue are in use and
549 * the queue cannot be freed (by the means of REQBUFS(0)) call 549 * the queue cannot be freed (by the means of REQBUFS(0)) call
550 */ 550 */
@@ -564,7 +564,7 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
564} 564}
565EXPORT_SYMBOL_GPL(vb2_core_querybuf); 565EXPORT_SYMBOL_GPL(vb2_core_querybuf);
566 566
567/** 567/*
568 * __verify_userptr_ops() - verify that all memory operations required for 568 * __verify_userptr_ops() - verify that all memory operations required for
569 * USERPTR queue type have been provided 569 * USERPTR queue type have been provided
570 */ 570 */
@@ -577,7 +577,7 @@ static int __verify_userptr_ops(struct vb2_queue *q)
577 return 0; 577 return 0;
578} 578}
579 579
580/** 580/*
581 * __verify_mmap_ops() - verify that all memory operations required for 581 * __verify_mmap_ops() - verify that all memory operations required for
582 * MMAP queue type have been provided 582 * MMAP queue type have been provided
583 */ 583 */
@@ -590,7 +590,7 @@ static int __verify_mmap_ops(struct vb2_queue *q)
590 return 0; 590 return 0;
591} 591}
592 592
593/** 593/*
594 * __verify_dmabuf_ops() - verify that all memory operations required for 594 * __verify_dmabuf_ops() - verify that all memory operations required for
595 * DMABUF queue type have been provided 595 * DMABUF queue type have been provided
596 */ 596 */
@@ -953,7 +953,7 @@ void vb2_discard_done(struct vb2_queue *q)
953} 953}
954EXPORT_SYMBOL_GPL(vb2_discard_done); 954EXPORT_SYMBOL_GPL(vb2_discard_done);
955 955
956/** 956/*
957 * __prepare_mmap() - prepare an MMAP buffer 957 * __prepare_mmap() - prepare an MMAP buffer
958 */ 958 */
959static int __prepare_mmap(struct vb2_buffer *vb, const void *pb) 959static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
@@ -966,7 +966,7 @@ static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
966 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 966 return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
967} 967}
968 968
969/** 969/*
970 * __prepare_userptr() - prepare a USERPTR buffer 970 * __prepare_userptr() - prepare a USERPTR buffer
971 */ 971 */
972static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) 972static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
@@ -1082,7 +1082,7 @@ err:
1082 return ret; 1082 return ret;
1083} 1083}
1084 1084
1085/** 1085/*
1086 * __prepare_dmabuf() - prepare a DMABUF buffer 1086 * __prepare_dmabuf() - prepare a DMABUF buffer
1087 */ 1087 */
1088static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) 1088static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
@@ -1215,7 +1215,7 @@ err:
1215 return ret; 1215 return ret;
1216} 1216}
1217 1217
1218/** 1218/*
1219 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1219 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1220 */ 1220 */
1221static void __enqueue_in_driver(struct vb2_buffer *vb) 1221static void __enqueue_in_driver(struct vb2_buffer *vb)
@@ -1298,7 +1298,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
1298} 1298}
1299EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1299EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1300 1300
1301/** 1301/*
1302 * vb2_start_streaming() - Attempt to start streaming. 1302 * vb2_start_streaming() - Attempt to start streaming.
1303 * @q: videobuf2 queue 1303 * @q: videobuf2 queue
1304 * 1304 *
@@ -1427,7 +1427,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
1427} 1427}
1428EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1428EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1429 1429
1430/** 1430/*
1431 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1431 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1432 * for dequeuing 1432 * for dequeuing
1433 * 1433 *
@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1502 return 0; 1502 return 0;
1503} 1503}
1504 1504
1505/** 1505/*
1506 * __vb2_get_done_vb() - get a buffer ready for dequeuing 1506 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1507 * 1507 *
1508 * Will sleep if required for nonblocking == false. 1508 * Will sleep if required for nonblocking == false.
@@ -1553,7 +1553,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
1553} 1553}
1554EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1554EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1555 1555
1556/** 1556/*
1557 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 1557 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1558 */ 1558 */
1559static void __vb2_dqbuf(struct vb2_buffer *vb) 1559static void __vb2_dqbuf(struct vb2_buffer *vb)
@@ -1625,7 +1625,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1625} 1625}
1626EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 1626EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
1627 1627
1628/** 1628/*
1629 * __vb2_queue_cancel() - cancel and stop (pause) streaming 1629 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1630 * 1630 *
1631 * Removes all queued buffers from driver's queue and all buffers queued by 1631 * Removes all queued buffers from driver's queue and all buffers queued by
@@ -1773,7 +1773,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
1773} 1773}
1774EXPORT_SYMBOL_GPL(vb2_core_streamoff); 1774EXPORT_SYMBOL_GPL(vb2_core_streamoff);
1775 1775
1776/** 1776/*
1777 * __find_plane_by_offset() - find plane associated with the given offset off 1777 * __find_plane_by_offset() - find plane associated with the given offset off
1778 */ 1778 */
1779static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, 1779static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
@@ -2104,7 +2104,7 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
2104} 2104}
2105EXPORT_SYMBOL_GPL(vb2_core_poll); 2105EXPORT_SYMBOL_GPL(vb2_core_poll);
2106 2106
2107/** 2107/*
2108 * struct vb2_fileio_buf - buffer context used by file io emulator 2108 * struct vb2_fileio_buf - buffer context used by file io emulator
2109 * 2109 *
2110 * vb2 provides a compatibility layer and emulator of file io (read and 2110 * vb2 provides a compatibility layer and emulator of file io (read and
@@ -2118,7 +2118,7 @@ struct vb2_fileio_buf {
2118 unsigned int queued:1; 2118 unsigned int queued:1;
2119}; 2119};
2120 2120
2121/** 2121/*
2122 * struct vb2_fileio_data - queue context used by file io emulator 2122 * struct vb2_fileio_data - queue context used by file io emulator
2123 * 2123 *
2124 * @cur_index: the index of the buffer currently being read from or 2124 * @cur_index: the index of the buffer currently being read from or
@@ -2155,7 +2155,7 @@ struct vb2_fileio_data {
2155 unsigned write_immediately:1; 2155 unsigned write_immediately:1;
2156}; 2156};
2157 2157
2158/** 2158/*
2159 * __vb2_init_fileio() - initialize file io emulator 2159 * __vb2_init_fileio() - initialize file io emulator
2160 * @q: videobuf2 queue 2160 * @q: videobuf2 queue
2161 * @read: mode selector (1 means read, 0 means write) 2161 * @read: mode selector (1 means read, 0 means write)
@@ -2274,7 +2274,7 @@ err_kfree:
2274 return ret; 2274 return ret;
2275} 2275}
2276 2276
2277/** 2277/*
2278 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2278 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2279 * @q: videobuf2 queue 2279 * @q: videobuf2 queue
2280 */ 2280 */
@@ -2293,7 +2293,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
2293 return 0; 2293 return 0;
2294} 2294}
2295 2295
2296/** 2296/*
2297 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2297 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2298 * @q: videobuf2 queue 2298 * @q: videobuf2 queue
2299 * @data: pointed to target userspace buffer 2299 * @data: pointed to target userspace buffer
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 4bb8424114ce..89e51989332b 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -120,7 +120,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
120 h->put(h->arg); 120 h->put(h->arg);
121} 121}
122 122
123/** 123/*
124 * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped 124 * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
125 * video buffers 125 * video buffers
126 */ 126 */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 0c0669976bdc..4075314a6989 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -49,7 +49,7 @@ module_param(debug, int, 0644);
49#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ 49#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
50 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE) 50 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
51 51
52/** 52/*
53 * __verify_planes_array() - verify that the planes array passed in struct 53 * __verify_planes_array() - verify that the planes array passed in struct
54 * v4l2_buffer from userspace can be safely used 54 * v4l2_buffer from userspace can be safely used
55 */ 55 */
@@ -78,7 +78,7 @@ static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
78 return __verify_planes_array(vb, pb); 78 return __verify_planes_array(vb, pb);
79} 79}
80 80
81/** 81/*
82 * __verify_length() - Verify that the bytesused value for each plane fits in 82 * __verify_length() - Verify that the bytesused value for each plane fits in
83 * the plane length and that the data offset doesn't exceed the bytesused value. 83 * the plane length and that the data offset doesn't exceed the bytesused value.
84 */ 84 */
@@ -181,7 +181,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
181 return __verify_planes_array(q->bufs[b->index], b); 181 return __verify_planes_array(q->bufs[b->index], b);
182} 182}
183 183
184/** 184/*
185 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be 185 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
186 * returned to userspace 186 * returned to userspace
187 */ 187 */
@@ -286,7 +286,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
286 q->last_buffer_dequeued = true; 286 q->last_buffer_dequeued = true;
287} 287}
288 288
289/** 289/*
290 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a 290 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
291 * v4l2_buffer by the userspace. It also verifies that struct 291 * v4l2_buffer by the userspace. It also verifies that struct
292 * v4l2_buffer has a valid number of planes. 292 * v4l2_buffer has a valid number of planes.
@@ -446,7 +446,7 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
446 .copy_timestamp = __copy_timestamp, 446 .copy_timestamp = __copy_timestamp,
447}; 447};
448 448
449/** 449/*
450 * vb2_querybuf() - query video buffer information 450 * vb2_querybuf() - query video buffer information
451 * @q: videobuf queue 451 * @q: videobuf queue
452 * @b: buffer struct passed from userspace to vidioc_querybuf handler 452 * @b: buffer struct passed from userspace to vidioc_querybuf handler
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 09cf3699e354..a307832d7e45 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
184}; 184};
185 185
186static struct lock_class_key arizona_irq_lock_class; 186static struct lock_class_key arizona_irq_lock_class;
187static struct lock_class_key arizona_irq_request_class;
187 188
188static int arizona_irq_map(struct irq_domain *h, unsigned int virq, 189static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
189 irq_hw_number_t hw) 190 irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
191 struct arizona *data = h->host_data; 192 struct arizona *data = h->host_data;
192 193
193 irq_set_chip_data(virq, data); 194 irq_set_chip_data(virq, data);
194 irq_set_lockdep_class(virq, &arizona_irq_lock_class); 195 irq_set_lockdep_class(virq, &arizona_irq_lock_class,
196 &arizona_irq_request_class);
195 irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); 197 irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
196 irq_set_nested_thread(virq, 1); 198 irq_set_nested_thread(virq, 1);
197 irq_set_noprobe(virq); 199 irq_set_noprobe(virq);
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index c9714072e224..59c82cdcf48d 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
377 u8 *ptr; 377 u8 *ptr;
378 u8 *rx_buf; 378 u8 *rx_buf;
379 u8 sum; 379 u8 sum;
380 u8 rx_byte;
380 int ret = 0, final_ret; 381 int ret = 0, final_ret;
381 382
382 len = cros_ec_prepare_tx(ec_dev, ec_msg); 383 len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
421 if (!ret) { 422 if (!ret) {
422 /* Verify that EC can process command */ 423 /* Verify that EC can process command */
423 for (i = 0; i < len; i++) { 424 for (i = 0; i < len; i++) {
424 switch (rx_buf[i]) { 425 rx_byte = rx_buf[i];
425 case EC_SPI_PAST_END: 426 if (rx_byte == EC_SPI_PAST_END ||
426 case EC_SPI_RX_BAD_DATA: 427 rx_byte == EC_SPI_RX_BAD_DATA ||
427 case EC_SPI_NOT_READY: 428 rx_byte == EC_SPI_NOT_READY) {
428 ret = -EAGAIN; 429 ret = -EREMOTEIO;
429 ec_msg->result = EC_RES_IN_PROGRESS;
430 default:
431 break; 430 break;
432 } 431 }
433 if (ret)
434 break;
435 } 432 }
436 if (!ret)
437 ret = cros_ec_spi_receive_packet(ec_dev,
438 ec_msg->insize + sizeof(*response));
439 } else {
440 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
441 } 433 }
442 434
435 if (!ret)
436 ret = cros_ec_spi_receive_packet(ec_dev,
437 ec_msg->insize + sizeof(*response));
438 else
439 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
440
443 final_ret = terminate_request(ec_dev); 441 final_ret = terminate_request(ec_dev);
444 442
445 spi_bus_unlock(ec_spi->spi->master); 443 spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
508 int i, len; 506 int i, len;
509 u8 *ptr; 507 u8 *ptr;
510 u8 *rx_buf; 508 u8 *rx_buf;
509 u8 rx_byte;
511 int sum; 510 int sum;
512 int ret = 0, final_ret; 511 int ret = 0, final_ret;
513 512
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
544 if (!ret) { 543 if (!ret) {
545 /* Verify that EC can process command */ 544 /* Verify that EC can process command */
546 for (i = 0; i < len; i++) { 545 for (i = 0; i < len; i++) {
547 switch (rx_buf[i]) { 546 rx_byte = rx_buf[i];
548 case EC_SPI_PAST_END: 547 if (rx_byte == EC_SPI_PAST_END ||
549 case EC_SPI_RX_BAD_DATA: 548 rx_byte == EC_SPI_RX_BAD_DATA ||
550 case EC_SPI_NOT_READY: 549 rx_byte == EC_SPI_NOT_READY) {
551 ret = -EAGAIN; 550 ret = -EREMOTEIO;
552 ec_msg->result = EC_RES_IN_PROGRESS;
553 default:
554 break; 551 break;
555 } 552 }
556 if (ret)
557 break;
558 } 553 }
559 if (!ret)
560 ret = cros_ec_spi_receive_response(ec_dev,
561 ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
562 } else {
563 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
564 } 554 }
565 555
556 if (!ret)
557 ret = cros_ec_spi_receive_response(ec_dev,
558 ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
559 else
560 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
561
566 final_ret = terminate_request(ec_dev); 562 final_ret = terminate_request(ec_dev);
567 563
568 spi_bus_unlock(ec_spi->spi->master); 564 spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
667 sizeof(struct ec_response_get_protocol_info); 663 sizeof(struct ec_response_get_protocol_info);
668 ec_dev->dout_size = sizeof(struct ec_host_request); 664 ec_dev->dout_size = sizeof(struct ec_host_request);
669 665
666 ec_spi->last_transfer_ns = ktime_get_ns();
670 667
671 err = cros_ec_register(ec_dev); 668 err = cros_ec_register(ec_dev);
672 if (err) { 669 if (err) {
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 590fb9aad77d..c3ed885c155c 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1543,6 +1543,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1544 1544
1545 pci_disable_device(pcidev); 1545 pci_disable_device(pcidev);
1546 free_irq(pcr->irq, (void *)pcr);
1547 if (pcr->msi_en)
1548 pci_disable_msi(pcr->pci);
1546} 1549}
1547 1550
1548#else /* CONFIG_PM */ 1551#else /* CONFIG_PM */
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index da16bf45fab4..dc94ffc6321a 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
159EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); 159EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
160 160
161static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, 161static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
162 struct device_node *node) 162 struct device_node *parent)
163{ 163{
164 struct device_node *node;
165
164 if (pdata && pdata->codec) 166 if (pdata && pdata->codec)
165 return true; 167 return true;
166 168
167 if (of_find_node_by_name(node, "codec")) 169 node = of_get_child_by_name(parent, "codec");
170 if (node) {
171 of_node_put(node);
168 return true; 172 return true;
173 }
169 174
170 return false; 175 return false;
171} 176}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d36ba0..dd19f17a1b63 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
97}; 97};
98 98
99 99
100static bool twl6040_has_vibra(struct device_node *node) 100static bool twl6040_has_vibra(struct device_node *parent)
101{ 101{
102#ifdef CONFIG_OF 102 struct device_node *node;
103 if (of_find_node_by_name(node, "vibra")) 103
104 node = of_get_child_by_name(parent, "vibra");
105 if (node) {
106 of_node_put(node);
104 return true; 107 return true;
105#endif 108 }
109
106 return false; 110 return false;
107} 111}
108 112
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 305a7a464d09..4d63ac8a82e0 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
562static int at24_read(void *priv, unsigned int off, void *val, size_t count) 562static int at24_read(void *priv, unsigned int off, void *val, size_t count)
563{ 563{
564 struct at24_data *at24 = priv; 564 struct at24_data *at24 = priv;
565 struct i2c_client *client; 565 struct device *dev = &at24->client[0]->dev;
566 char *buf = val; 566 char *buf = val;
567 int ret; 567 int ret;
568 568
@@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
572 if (off + count > at24->chip.byte_len) 572 if (off + count > at24->chip.byte_len)
573 return -EINVAL; 573 return -EINVAL;
574 574
575 client = at24_translate_offset(at24, &off); 575 ret = pm_runtime_get_sync(dev);
576
577 ret = pm_runtime_get_sync(&client->dev);
578 if (ret < 0) { 576 if (ret < 0) {
579 pm_runtime_put_noidle(&client->dev); 577 pm_runtime_put_noidle(dev);
580 return ret; 578 return ret;
581 } 579 }
582 580
@@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
592 status = at24->read_func(at24, buf, off, count); 590 status = at24->read_func(at24, buf, off, count);
593 if (status < 0) { 591 if (status < 0) {
594 mutex_unlock(&at24->lock); 592 mutex_unlock(&at24->lock);
595 pm_runtime_put(&client->dev); 593 pm_runtime_put(dev);
596 return status; 594 return status;
597 } 595 }
598 buf += status; 596 buf += status;
@@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
602 600
603 mutex_unlock(&at24->lock); 601 mutex_unlock(&at24->lock);
604 602
605 pm_runtime_put(&client->dev); 603 pm_runtime_put(dev);
606 604
607 return 0; 605 return 0;
608} 606}
@@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
610static int at24_write(void *priv, unsigned int off, void *val, size_t count) 608static int at24_write(void *priv, unsigned int off, void *val, size_t count)
611{ 609{
612 struct at24_data *at24 = priv; 610 struct at24_data *at24 = priv;
613 struct i2c_client *client; 611 struct device *dev = &at24->client[0]->dev;
614 char *buf = val; 612 char *buf = val;
615 int ret; 613 int ret;
616 614
@@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
620 if (off + count > at24->chip.byte_len) 618 if (off + count > at24->chip.byte_len)
621 return -EINVAL; 619 return -EINVAL;
622 620
623 client = at24_translate_offset(at24, &off); 621 ret = pm_runtime_get_sync(dev);
624
625 ret = pm_runtime_get_sync(&client->dev);
626 if (ret < 0) { 622 if (ret < 0) {
627 pm_runtime_put_noidle(&client->dev); 623 pm_runtime_put_noidle(dev);
628 return ret; 624 return ret;
629 } 625 }
630 626
@@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
640 status = at24->write_func(at24, buf, off, count); 636 status = at24->write_func(at24, buf, off, count);
641 if (status < 0) { 637 if (status < 0) {
642 mutex_unlock(&at24->lock); 638 mutex_unlock(&at24->lock);
643 pm_runtime_put(&client->dev); 639 pm_runtime_put(dev);
644 return status; 640 return status;
645 } 641 }
646 buf += status; 642 buf += status;
@@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
650 646
651 mutex_unlock(&at24->lock); 647 mutex_unlock(&at24->lock);
652 648
653 pm_runtime_put(&client->dev); 649 pm_runtime_put(dev);
654 650
655 return 0; 651 return 0;
656} 652}
@@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
880 at24->nvmem_config.reg_read = at24_read; 876 at24->nvmem_config.reg_read = at24_read;
881 at24->nvmem_config.reg_write = at24_write; 877 at24->nvmem_config.reg_write = at24_write;
882 at24->nvmem_config.priv = at24; 878 at24->nvmem_config.priv = at24;
883 at24->nvmem_config.stride = 4; 879 at24->nvmem_config.stride = 1;
884 at24->nvmem_config.word_size = 1; 880 at24->nvmem_config.word_size = 1;
885 at24->nvmem_config.size = chip.byte_len; 881 at24->nvmem_config.size = chip.byte_len;
886 882
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index eda38cbe8530..41f2a9f6851d 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -32,7 +32,7 @@
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/pti.h> 35#include <linux/intel-pti.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38 38
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91964ce..79a5b985ccf5 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
75#define EXT_CSD_REV_ANY (-1u) 75#define EXT_CSD_REV_ANY (-1u)
76 76
77#define CID_MANFID_SANDISK 0x2 77#define CID_MANFID_SANDISK 0x2
78#define CID_MANFID_ATP 0x9
78#define CID_MANFID_TOSHIBA 0x11 79#define CID_MANFID_TOSHIBA 0x11
79#define CID_MANFID_MICRON 0x13 80#define CID_MANFID_MICRON 0x13
80#define CID_MANFID_SAMSUNG 0x15 81#define CID_MANFID_SAMSUNG 0x15
82#define CID_MANFID_APACER 0x27
81#define CID_MANFID_KINGSTON 0x70 83#define CID_MANFID_KINGSTON 0x70
82#define CID_MANFID_HYNIX 0x90 84#define CID_MANFID_HYNIX 0x90
83 85
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d209fb466979..208a762b87ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1290,7 +1290,7 @@ out_err:
1290 1290
1291static void mmc_select_driver_type(struct mmc_card *card) 1291static void mmc_select_driver_type(struct mmc_card *card)
1292{ 1292{
1293 int card_drv_type, drive_strength, drv_type; 1293 int card_drv_type, drive_strength, drv_type = 0;
1294 int fixed_drv_type = card->host->fixed_drv_type; 1294 int fixed_drv_type = card->host->fixed_drv_type;
1295 1295
1296 card_drv_type = card->ext_csd.raw_driver_strength | 1296 card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9cbc9f8..75d317623852 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
53 MMC_QUIRK_BLK_NO_CMD23), 53 MMC_QUIRK_BLK_NO_CMD23),
54 54
55 /* 55 /*
56 * Some SD cards lockup while using CMD23 multiblock transfers.
57 */
58 MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
59 MMC_QUIRK_BLK_NO_CMD23),
60 MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
61 MMC_QUIRK_BLK_NO_CMD23),
62
63 /*
56 * Some MMC cards need longer data read timeout than indicated in CSD. 64 * Some MMC cards need longer data read timeout than indicated in CSD.
57 */ 65 */
58 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 66 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235d5742..157e1d9e7725 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27#include <linux/of_device.h> 28#include <linux/of_device.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
667 return 0; 668 return 0;
668} 669}
669EXPORT_SYMBOL_GPL(renesas_sdhi_remove); 670EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
671
672MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a62a4a..555c7f133eb8 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
1424struct s3cmci_reg { 1424struct s3cmci_reg {
1425 unsigned short addr; 1425 unsigned short addr;
1426 unsigned char *name; 1426 unsigned char *name;
1427} debug_regs[] = { 1427};
1428
1429static const struct s3cmci_reg debug_regs[] = {
1428 DBG_REG(CON), 1430 DBG_REG(CON),
1429 DBG_REG(PRE), 1431 DBG_REG(PRE),
1430 DBG_REG(CMDARG), 1432 DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
1446static int s3cmci_regs_show(struct seq_file *seq, void *v) 1448static int s3cmci_regs_show(struct seq_file *seq, void *v)
1447{ 1449{
1448 struct s3cmci_host *host = seq->private; 1450 struct s3cmci_host *host = seq->private;
1449 struct s3cmci_reg *rptr = debug_regs; 1451 const struct s3cmci_reg *rptr = debug_regs;
1450 1452
1451 for (; rptr->name; rptr++) 1453 for (; rptr->name; rptr++)
1452 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, 1454 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 85140c9af581..8b941f814472 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
687 return; 687 return;
688 } 688 }
689 689
690 /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
691 if (is_imx53_esdhc(imx_data)) {
692 /*
693 * According to the i.MX53 reference manual, if DLLCTRL[10] can
694 * be set, then the controller is eSDHCv3, else it is eSDHCv2.
695 */
696 val = readl(host->ioaddr + ESDHC_DLL_CTRL);
697 writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
698 temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
699 writel(val, host->ioaddr + ESDHC_DLL_CTRL);
700 if (temp & BIT(10))
701 pre_div = 2;
702 }
703
690 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 704 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
691 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 705 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
692 | ESDHC_CLOCK_MASK); 706 | ESDHC_CLOCK_MASK);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f80e911b8843..73b605577447 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1114,7 +1114,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1114 if (!ops->oobbuf) 1114 if (!ops->oobbuf)
1115 ops->ooblen = 0; 1115 ops->ooblen = 0;
1116 1116
1117 if (offs < 0 || offs + ops->len >= mtd->size) 1117 if (offs < 0 || offs + ops->len > mtd->size)
1118 return -EINVAL; 1118 return -EINVAL;
1119 1119
1120 if (ops->ooblen) { 1120 if (ops->ooblen) {
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d8c012..dd56a671ea42 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1763,7 +1763,7 @@ try_dmaread:
1763 err = brcmstb_nand_verify_erased_page(mtd, chip, buf, 1763 err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
1764 addr); 1764 addr);
1765 /* erased page bitflips corrected */ 1765 /* erased page bitflips corrected */
1766 if (err > 0) 1766 if (err >= 0)
1767 return err; 1767 return err;
1768 } 1768 }
1769 1769
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 484f7fbc3f7d..a8bde6665c24 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
253 goto out_ce; 253 goto out_ce;
254 } 254 }
255 255
256 gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW); 256 gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
257 if (IS_ERR(gpiomtd->nwp)) { 257 if (IS_ERR(gpiomtd->ale)) {
258 ret = PTR_ERR(gpiomtd->nwp); 258 ret = PTR_ERR(gpiomtd->ale);
259 goto out_ce; 259 goto out_ce;
260 } 260 }
261 261
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a1b983..d4d824ef64e9 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1067 return ret; 1067 return ret;
1068 } 1068 }
1069 1069
1070 /* handle the block mark swapping */
1071 block_mark_swapping(this, payload_virt, auxiliary_virt);
1072
1073 /* Loop over status bytes, accumulating ECC status. */ 1070 /* Loop over status bytes, accumulating ECC status. */
1074 status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 1071 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
1075 1072
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1158 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1155 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1159 } 1156 }
1160 1157
1158 /* handle the block mark swapping */
1159 block_mark_swapping(this, buf, auxiliary_virt);
1160
1161 if (oob_required) { 1161 if (oob_required) {
1162 /* 1162 /*
1163 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() 1163 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..9285f60e5783 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
963 963
964 switch (command) { 964 switch (command) {
965 case NAND_CMD_READ0: 965 case NAND_CMD_READ0:
966 case NAND_CMD_READOOB:
966 case NAND_CMD_PAGEPROG: 967 case NAND_CMD_PAGEPROG:
967 info->use_ecc = 1; 968 info->use_ecc = 1;
968 break; 969 break;
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063b0ea8..6e5cf9d9cd99 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
413 return dev->of_node == data; 413 return dev->of_node == data;
414} 414}
415 415
416/* Note this function returns a reference to the mux_chip dev. */
416static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) 417static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
417{ 418{
418 struct device *dev; 419 struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
466 (!args.args_count && (mux_chip->controllers > 1))) { 467 (!args.args_count && (mux_chip->controllers > 1))) {
467 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", 468 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
468 np, args.np); 469 np, args.np);
470 put_device(&mux_chip->dev);
469 return ERR_PTR(-EINVAL); 471 return ERR_PTR(-EINVAL);
470 } 472 }
471 473
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
476 if (controller >= mux_chip->controllers) { 478 if (controller >= mux_chip->controllers) {
477 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", 479 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
478 np, controller, args.np); 480 np, controller, args.np);
481 put_device(&mux_chip->dev);
479 return ERR_PTR(-EINVAL); 482 return ERR_PTR(-EINVAL);
480 } 483 }
481 484
482 get_device(&mux_chip->dev);
483 return &mux_chip->mux[controller]; 485 return &mux_chip->mux[controller];
484} 486}
485EXPORT_SYMBOL_GPL(mux_control_get); 487EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a13a4896a8bd..760d2c07e3a2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -184,12 +184,12 @@
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? Passive detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no ? no no 187 * MX25 FlexCAN2 03.00.00.00 no no no no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no ? no no 189 * MX35 FlexCAN2 03.00.00.00 no no no no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes ? yes yes? 192 * VF610 FlexCAN3 ? no yes no yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
@@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
297 297
298static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
299 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 299 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
300 FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 300 FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
301 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
301}; 302};
302 303
303static const struct can_bittiming_const flexcan_bittiming_const = { 304static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
525 data = be32_to_cpup((__be32 *)&cf->data[0]); 526 data = be32_to_cpup((__be32 *)&cf->data[0]);
526 flexcan_write(data, &priv->tx_mb->data[0]); 527 flexcan_write(data, &priv->tx_mb->data[0]);
527 } 528 }
528 if (cf->can_dlc > 3) { 529 if (cf->can_dlc > 4) {
529 data = be32_to_cpup((__be32 *)&cf->data[4]); 530 data = be32_to_cpup((__be32 *)&cf->data[4]);
530 flexcan_write(data, &priv->tx_mb->data[1]); 531 flexcan_write(data, &priv->tx_mb->data[1]);
531 } 532 }
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 85268be0c913..55513411a82e 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
258 /* if this frame is an echo, */ 258 /* if this frame is an echo, */
259 if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && 259 if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
260 !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { 260 !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
261 int n;
262 unsigned long flags; 261 unsigned long flags;
263 262
264 spin_lock_irqsave(&priv->echo_lock, flags); 263 spin_lock_irqsave(&priv->echo_lock, flags);
265 n = can_get_echo_skb(priv->ndev, msg->client); 264 can_get_echo_skb(priv->ndev, msg->client);
266 spin_unlock_irqrestore(&priv->echo_lock, flags); 265 spin_unlock_irqrestore(&priv->echo_lock, flags);
267 266
268 /* count bytes of the echo instead of skb */ 267 /* count bytes of the echo instead of skb */
269 stats->tx_bytes += cf_len; 268 stats->tx_bytes += cf_len;
270 stats->tx_packets++; 269 stats->tx_packets++;
271 270
272 if (n) { 271 /* restart tx queue (a slot is free) */
273 /* restart tx queue only if a slot is free */ 272 netif_wake_queue(priv->ndev);
274 netif_wake_queue(priv->ndev);
275 }
276 273
277 return 0; 274 return 0;
278 } 275 }
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b4efd711f824..788c3464a3b0 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -825,7 +825,10 @@ err_release_regions:
825err_disable_pci: 825err_disable_pci:
826 pci_disable_device(pdev); 826 pci_disable_device(pdev);
827 827
828 return err; 828 /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
829 * the probe() function must return a negative errno in case of failure
830 * (err is unchanged if negative) */
831 return pcibios_err_to_errno(err);
829} 832}
830 833
831/* free the board structure object, as well as its resources: */ 834/* free the board structure object, as well as its resources: */
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 131026fbc2d7..5adc95c922ee 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -717,7 +717,10 @@ failure_release_regions:
717failure_disable_pci: 717failure_disable_pci:
718 pci_disable_device(pdev); 718 pci_disable_device(pdev);
719 719
720 return err; 720 /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
721 * the probe() function must return a negative errno in case of failure
722 * (err is unchanged if negative) */
723 return pcibios_err_to_errno(err);
721} 724}
722 725
723static void peak_pci_remove(struct pci_dev *pdev) 726static void peak_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4d4941469cfc..db6ea936dc3f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
637 mbx_mask = hecc_read(priv, HECC_CANMIM); 637 mbx_mask = hecc_read(priv, HECC_CANMIM);
638 mbx_mask |= HECC_TX_MBOX_MASK; 638 mbx_mask |= HECC_TX_MBOX_MASK;
639 hecc_write(priv, HECC_CANMIM, mbx_mask); 639 hecc_write(priv, HECC_CANMIM, mbx_mask);
640 } else {
641 /* repoll is done only if whole budget is used */
642 num_pkts = quota;
640 } 643 }
641 644
642 return num_pkts; 645 return num_pkts;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b3d02759c226..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
288 288
289 case -ECONNRESET: /* unlink */ 289 case -ECONNRESET: /* unlink */
290 case -ENOENT: 290 case -ENOENT:
291 case -EPIPE:
292 case -EPROTO:
291 case -ESHUTDOWN: 293 case -ESHUTDOWN:
292 return; 294 return;
293 295
@@ -393,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
393 395
394 if (dev->can.state == CAN_STATE_ERROR_WARNING || 396 if (dev->can.state == CAN_STATE_ERROR_WARNING ||
395 dev->can.state == CAN_STATE_ERROR_PASSIVE) { 397 dev->can.state == CAN_STATE_ERROR_PASSIVE) {
398 cf->can_id |= CAN_ERR_CRTL;
396 cf->data[1] = (txerr > rxerr) ? 399 cf->data[1] = (txerr > rxerr) ?
397 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; 400 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
398 } 401 }
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9fdb0f0bfa06..c6dcf93675c0 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
393 break; 393 break;
394 394
395 case -ENOENT: 395 case -ENOENT:
396 case -EPIPE:
397 case -EPROTO:
396 case -ESHUTDOWN: 398 case -ESHUTDOWN:
397 return; 399 return;
398 400
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..8bf80ad9dc44 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", 449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
450 rc); 450 rc);
451 451
452 return rc; 452 return (rc > 0) ? 0 : rc;
453} 453}
454 454
455static void gs_usb_xmit_callback(struct urb *urb) 455static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 9b18d96ef526..63587b8e6825 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
609 } 609 }
610 610
611 if (pos + tmp->len > actual_len) { 611 if (pos + tmp->len > actual_len) {
612 dev_err(dev->udev->dev.parent, 612 dev_err_ratelimited(dev->udev->dev.parent,
613 "Format error\n"); 613 "Format error\n");
614 break; 614 break;
615 } 615 }
616 616
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
813 if (err) { 813 if (err) {
814 netdev_err(netdev, "Error transmitting URB\n"); 814 netdev_err(netdev, "Error transmitting URB\n");
815 usb_unanchor_urb(urb); 815 usb_unanchor_urb(urb);
816 kfree(buf);
816 usb_free_urb(urb); 817 usb_free_urb(urb);
817 return err; 818 return err;
818 } 819 }
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1325 case 0: 1326 case 0:
1326 break; 1327 break;
1327 case -ENOENT: 1328 case -ENOENT:
1329 case -EPIPE:
1330 case -EPROTO:
1328 case -ESHUTDOWN: 1331 case -ESHUTDOWN:
1329 return; 1332 return;
1330 default: 1333 default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1333 goto resubmit_urb; 1336 goto resubmit_urb;
1334 } 1337 }
1335 1338
1336 while (pos <= urb->actual_length - MSG_HEADER_LEN) { 1339 while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
1337 msg = urb->transfer_buffer + pos; 1340 msg = urb->transfer_buffer + pos;
1338 1341
1339 /* The Kvaser firmware can only read and write messages that 1342 /* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1352 } 1355 }
1353 1356
1354 if (pos + msg->len > urb->actual_length) { 1357 if (pos + msg->len > urb->actual_length) {
1355 dev_err(dev->udev->dev.parent, "Format error\n"); 1358 dev_err_ratelimited(dev->udev->dev.parent,
1359 "Format error\n");
1356 break; 1360 break;
1357 } 1361 }
1358 1362
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1768 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); 1772 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1769 1773
1770 usb_unanchor_urb(urb); 1774 usb_unanchor_urb(urb);
1775 kfree(buf);
1771 1776
1772 stats->tx_dropped++; 1777 stats->tx_dropped++;
1773 1778
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 7f0272558bef..8d8c2086424d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
592 break; 592 break;
593 593
594 case -ENOENT: 594 case -ENOENT:
595 case -EPIPE:
596 case -EPROTO:
595 case -ESHUTDOWN: 597 case -ESHUTDOWN:
596 return; 598 return;
597 599
@@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
862 goto cleanup_unregister_candev; 864 goto cleanup_unregister_candev;
863 } 865 }
864 866
865 dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n"); 867 dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");
866 868
867 return 0; 869 return 0;
868 870
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e30c98..53d6bb045e9e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev); 184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
185 int err = 0; 185 int err = 0;
186 u8 *packet_ptr; 186 u8 *packet_ptr;
187 int i, n = 1, packet_len; 187 int packet_len;
188 ptrdiff_t cmd_len; 188 ptrdiff_t cmd_len;
189 189
190 /* usb device unregistered? */ 190 /* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
201 } 201 }
202 202
203 packet_ptr = cmd_head; 203 packet_ptr = cmd_head;
204 packet_len = cmd_len;
204 205
205 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ 206 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
206 if ((dev->udev->speed != USB_SPEED_HIGH) && 207 if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
207 (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { 208 packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
208 packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
209 n += cmd_len / packet_len;
210 } else {
211 packet_len = cmd_len;
212 }
213 209
214 for (i = 0; i < n; i++) { 210 do {
215 err = usb_bulk_msg(dev->udev, 211 err = usb_bulk_msg(dev->udev,
216 usb_sndbulkpipe(dev->udev, 212 usb_sndbulkpipe(dev->udev,
217 PCAN_USBPRO_EP_CMDOUT), 213 PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
224 } 220 }
225 221
226 packet_ptr += packet_len; 222 packet_ptr += packet_len;
227 } 223 cmd_len -= packet_len;
224
225 if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
226 packet_len = cmd_len;
227
228 } while (packet_len > 0);
228 229
229 return err; 230 return err;
230} 231}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d000cb62d6ae..27861c417c94 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
524 break; 524 break;
525 525
526 case -ENOENT: 526 case -ENOENT:
527 case -EPIPE:
528 case -EPROTO:
527 case -ESHUTDOWN: 529 case -ESHUTDOWN:
528 return; 530 return;
529 531
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..b4c4a2c76437 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
194 tbp = peer_tb; 194 tbp = peer_tb;
195 } 195 }
196 196
197 if (tbp[IFLA_IFNAME]) { 197 if (ifmp && tbp[IFLA_IFNAME]) {
198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
199 name_assign_type = NET_NAME_USER; 199 name_assign_type = NET_NAME_USER;
200 } else { 200 } else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..4498ab897d94 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
1500{ 1500{
1501 struct b53_device *dev = ds->priv; 1501 struct b53_device *dev = ds->priv;
1502 1502
1503 /* Older models support a different tag format that we do not 1503 /* Older models (5325, 5365) support a different tag format that we do
1504 * support in net/dsa/tag_brcm.c yet. 1504 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
1505 * mode to be turned on which means we need to specifically manage ARL
1506 * misses on multicast addresses (TBD).
1505 */ 1507 */
1506 if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) 1508 if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
1509 !b53_can_enable_brcm_tags(ds, port))
1507 return DSA_TAG_PROTO_NONE; 1510 return DSA_TAG_PROTO_NONE;
1508 1511
1509 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 1512 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index ea01f24f15e7..b62d47210db8 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,7 +14,6 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/of.h>
18#include <linux/phy.h> 17#include <linux/phy.h>
19#include <linux/phy_fixed.h> 18#include <linux/phy_fixed.h>
20#include <linux/mii.h> 19#include <linux/mii.h>
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b721a2009b50..23b45da784cb 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
625 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, 625 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
626 slice_num, false); 626 slice_num, false);
627 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, 627 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
628 slice_num, true); 628 SLICE_NUM_MASK, true);
629 629
630 /* Insert into TCAM now because we need to insert a second rule */ 630 /* Insert into TCAM now because we need to insert a second rule */
631 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); 631 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
699 /* Insert into Action and policer RAMs now, set chain ID to 699 /* Insert into Action and policer RAMs now, set chain ID to
700 * the one we are chained to 700 * the one we are chained to
701 */ 701 */
702 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, 702 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
703 queue_num, true); 703 queue_num, true);
704 if (ret) 704 if (ret)
705 goto out_err; 705 goto out_err;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8171055fde7a..66d33e97cbc5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
339 u16 mask; 339 u16 mask;
340 340
341 mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask); 341 mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
342 mask |= GENMASK(chip->g1_irq.nirqs, 0); 342 mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
343 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); 343 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
344 344
345 free_irq(chip->irq, chip); 345 free_irq(chip->irq, chip);
@@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
395 return 0; 395 return 0;
396 396
397out_disable: 397out_disable:
398 mask |= GENMASK(chip->g1_irq.nirqs, 0); 398 mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
399 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); 399 mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
400 400
401out_mapping: 401out_mapping:
@@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
2177 { }, 2177 { },
2178}; 2178};
2179 2179
2180static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
2181
2182{
2183 struct mv88e6xxx_mdio_bus *mdio_bus;
2184 struct mii_bus *bus;
2185
2186 list_for_each_entry(mdio_bus, &chip->mdios, list) {
2187 bus = mdio_bus->bus;
2188
2189 mdiobus_unregister(bus);
2190 }
2191}
2192
2180static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, 2193static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
2181 struct device_node *np) 2194 struct device_node *np)
2182{ 2195{
@@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
2201 match = of_match_node(mv88e6xxx_mdio_external_match, child); 2214 match = of_match_node(mv88e6xxx_mdio_external_match, child);
2202 if (match) { 2215 if (match) {
2203 err = mv88e6xxx_mdio_register(chip, child, true); 2216 err = mv88e6xxx_mdio_register(chip, child, true);
2204 if (err) 2217 if (err) {
2218 mv88e6xxx_mdios_unregister(chip);
2205 return err; 2219 return err;
2220 }
2206 } 2221 }
2207 } 2222 }
2208 2223
2209 return 0; 2224 return 0;
2210} 2225}
2211 2226
2212static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
2213
2214{
2215 struct mv88e6xxx_mdio_bus *mdio_bus;
2216 struct mii_bus *bus;
2217
2218 list_for_each_entry(mdio_bus, &chip->mdios, list) {
2219 bus = mdio_bus->bus;
2220
2221 mdiobus_unregister(bus);
2222 }
2223}
2224
2225static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) 2227static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
2226{ 2228{
2227 struct mv88e6xxx_chip *chip = ds->priv; 2229 struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; 338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
339 break; 339 break;
340 case PHY_INTERFACE_MODE_XGMII: 340 case PHY_INTERFACE_MODE_XGMII:
341 case PHY_INTERFACE_MODE_XAUI:
341 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; 342 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
342 break; 343 break;
343 case PHY_INTERFACE_MODE_RXAUI: 344 case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
602 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 602 struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 604 unsigned int cur_rx, cur_tx; /* The next free ring entry */
605 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 605 unsigned int dirty_tx; /* The ring entries to be free()ed. */
606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
618 618
619 /* The remainder are related to chip state, mostly media selection. */ 619 /* The remainder are related to chip state, mostly media selection. */
620 struct timer_list timer; /* Media selection timer. */ 620 struct timer_list timer; /* Media selection timer. */
621 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
622 int options; /* User-settable misc. driver options. */ 621 int options; /* User-settable misc. driver options. */
623 unsigned int media_override:4, /* Passed-in media type. */ 622 unsigned int media_override:4, /* Passed-in media type. */
624 default_media:4, /* Read from the EEPROM/Wn3_Config. */ 623 default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
760static int mdio_read(struct net_device *dev, int phy_id, int location); 759static int mdio_read(struct net_device *dev, int phy_id, int location);
761static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 760static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
762static void vortex_timer(struct timer_list *t); 761static void vortex_timer(struct timer_list *t);
763static void rx_oom_timer(struct timer_list *t);
764static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, 762static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 763 struct net_device *dev);
766static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, 764static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
1601 1599
1602 timer_setup(&vp->timer, vortex_timer, 0); 1600 timer_setup(&vp->timer, vortex_timer, 0);
1603 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); 1601 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1604 timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
1605 1602
1606 if (vortex_debug > 1) 1603 if (vortex_debug > 1)
1607 pr_debug("%s: Initial media type %s.\n", 1604 pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
1676 window_write16(vp, 0x0040, 4, Wn4_NetDiag); 1673 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1677 1674
1678 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1675 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1679 vp->cur_rx = vp->dirty_rx = 0; 1676 vp->cur_rx = 0;
1680 /* Initialize the RxEarly register as recommended. */ 1677 /* Initialize the RxEarly register as recommended. */
1681 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); 1678 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1682 iowrite32(0x0020, ioaddr + PktStatus); 1679 iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
1729 struct vortex_private *vp = netdev_priv(dev); 1726 struct vortex_private *vp = netdev_priv(dev);
1730 int i; 1727 int i;
1731 int retval; 1728 int retval;
1729 dma_addr_t dma;
1732 1730
1733 /* Use the now-standard shared IRQ implementation. */ 1731 /* Use the now-standard shared IRQ implementation. */
1734 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1732 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
1753 break; /* Bad news! */ 1751 break; /* Bad news! */
1754 1752
1755 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1756 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1757 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma);
1757 } 1759 }
1758 if (i != RX_RING_SIZE) { 1760 if (i != RX_RING_SIZE) {
1759 pr_emerg("%s: no memory for rx ring\n", dev->name); 1761 pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 int len = (skb->len + 3) & ~3; 2069 int len = (skb->len + 3) & ~3;
2068 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2069 PCI_DMA_TODEVICE); 2071 PCI_DMA_TODEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK;
2076 }
2077
2070 spin_lock_irq(&vp->window_lock); 2078 spin_lock_irq(&vp->window_lock);
2071 window_set(vp, 7); 2079 window_set(vp, 7);
2072 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); 2080 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
2593 int entry = vp->cur_rx % RX_RING_SIZE; 2601 int entry = vp->cur_rx % RX_RING_SIZE;
2594 void __iomem *ioaddr = vp->ioaddr; 2602 void __iomem *ioaddr = vp->ioaddr;
2595 int rx_status; 2603 int rx_status;
2596 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; 2604 int rx_work_limit = RX_RING_SIZE;
2597 2605
2598 if (vortex_debug > 5) 2606 if (vortex_debug > 5)
2599 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); 2607 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
2614 } else { 2622 } else {
2615 /* The packet length: up to 4.5K!. */ 2623 /* The packet length: up to 4.5K!. */
2616 int pkt_len = rx_status & 0x1fff; 2624 int pkt_len = rx_status & 0x1fff;
2617 struct sk_buff *skb; 2625 struct sk_buff *skb, *newskb;
2626 dma_addr_t newdma;
2618 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); 2627 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2619 2628
2620 if (vortex_debug > 4) 2629 if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
2633 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2634 vp->rx_copy++; 2643 vp->rx_copy++;
2635 } else { 2644 } else {
2645 /* Pre-allocate the replacement skb. If it or its
2646 * mapping fails then recycle the buffer thats already
2647 * in place
2648 */
2649 newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2650 if (!newskb) {
2651 dev->stats.rx_dropped++;
2652 goto clear_complete;
2653 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2657 dev->stats.rx_dropped++;
2658 consume_skb(newskb);
2659 goto clear_complete;
2660 }
2661
2636 /* Pass up the skbuff already on the Rx ring. */ 2662 /* Pass up the skbuff already on the Rx ring. */
2637 skb = vp->rx_skbuff[entry]; 2663 skb = vp->rx_skbuff[entry];
2638 vp->rx_skbuff[entry] = NULL; 2664 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2639 skb_put(skb, pkt_len); 2666 skb_put(skb, pkt_len);
2640 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2641 vp->rx_nocopy++; 2668 vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
2653 netif_rx(skb); 2680 netif_rx(skb);
2654 dev->stats.rx_packets++; 2681 dev->stats.rx_packets++;
2655 } 2682 }
2656 entry = (++vp->cur_rx) % RX_RING_SIZE;
2657 }
2658 /* Refill the Rx ring buffers. */
2659 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2660 struct sk_buff *skb;
2661 entry = vp->dirty_rx % RX_RING_SIZE;
2662 if (vp->rx_skbuff[entry] == NULL) {
2663 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2664 if (skb == NULL) {
2665 static unsigned long last_jif;
2666 if (time_after(jiffies, last_jif + 10 * HZ)) {
2667 pr_warn("%s: memory shortage\n",
2668 dev->name);
2669 last_jif = jiffies;
2670 }
2671 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2672 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2673 break; /* Bad news! */
2674 }
2675 2683
2676 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2684clear_complete:
2677 vp->rx_skbuff[entry] = skb;
2678 }
2679 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2685 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2680 iowrite16(UpUnstall, ioaddr + EL3_CMD); 2686 iowrite16(UpUnstall, ioaddr + EL3_CMD);
2687 entry = (++vp->cur_rx) % RX_RING_SIZE;
2681 } 2688 }
2682 return 0; 2689 return 0;
2683} 2690}
2684 2691
2685/*
2686 * If we've hit a total OOM refilling the Rx ring we poll once a second
2687 * for some memory. Otherwise there is no way to restart the rx process.
2688 */
2689static void
2690rx_oom_timer(struct timer_list *t)
2691{
2692 struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
2693 struct net_device *dev = vp->mii.dev;
2694
2695 spin_lock_irq(&vp->lock);
2696 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2697 boomerang_rx(dev);
2698 if (vortex_debug > 1) {
2699 pr_debug("%s: rx_oom_timer %s\n", dev->name,
2700 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2701 }
2702 spin_unlock_irq(&vp->lock);
2703}
2704
2705static void 2692static void
2706vortex_down(struct net_device *dev, int final_down) 2693vortex_down(struct net_device *dev, int final_down)
2707{ 2694{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
2711 netdev_reset_queue(dev); 2698 netdev_reset_queue(dev);
2712 netif_stop_queue(dev); 2699 netif_stop_queue(dev);
2713 2700
2714 del_timer_sync(&vp->rx_oom_timer);
2715 del_timer_sync(&vp->timer); 2701 del_timer_sync(&vp->timer);
2716 2702
2717 /* Turn off statistics ASAP. We update dev->stats below. */ 2703 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..fbe21a817bd8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
75MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 75MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter);
80static int ena_restore_device(struct ena_adapter *adapter);
78 81
79static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
80{ 83{
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
1565 1568
1566static int ena_up_complete(struct ena_adapter *adapter) 1569static int ena_up_complete(struct ena_adapter *adapter)
1567{ 1570{
1568 int rc, i; 1571 int rc;
1569 1572
1570 rc = ena_rss_configure(adapter); 1573 rc = ena_rss_configure(adapter);
1571 if (rc) 1574 if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1584 1587
1585 ena_napi_enable_all(adapter); 1588 ena_napi_enable_all(adapter);
1586 1589
1587 /* Enable completion queues interrupt */
1588 for (i = 0; i < adapter->num_queues; i++)
1589 ena_unmask_interrupt(&adapter->tx_ring[i],
1590 &adapter->rx_ring[i]);
1591
1592 /* schedule napi in case we had pending packets
1593 * from the last time we disable napi
1594 */
1595 for (i = 0; i < adapter->num_queues; i++)
1596 napi_schedule(&adapter->ena_napi[i].napi);
1597
1598 return 0; 1590 return 0;
1599} 1591}
1600 1592
@@ -1731,7 +1723,7 @@ create_err:
1731 1723
1732static int ena_up(struct ena_adapter *adapter) 1724static int ena_up(struct ena_adapter *adapter)
1733{ 1725{
1734 int rc; 1726 int rc, i;
1735 1727
1736 netdev_dbg(adapter->netdev, "%s\n", __func__); 1728 netdev_dbg(adapter->netdev, "%s\n", __func__);
1737 1729
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
1774 1766
1775 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1767 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1776 1768
1769 /* Enable completion queues interrupt */
1770 for (i = 0; i < adapter->num_queues; i++)
1771 ena_unmask_interrupt(&adapter->tx_ring[i],
1772 &adapter->rx_ring[i]);
1773
1774 /* schedule napi in case we had pending packets
1775 * from the last time we disable napi
1776 */
1777 for (i = 0; i < adapter->num_queues; i++)
1778 napi_schedule(&adapter->ena_napi[i].napi);
1779
1777 return rc; 1780 return rc;
1778 1781
1779err_up: 1782err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
1884 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1887 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1885 ena_down(adapter); 1888 ena_down(adapter);
1886 1889
1890 /* Check for device status and issue reset if needed*/
1891 check_for_admin_com_state(adapter);
1892 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893 netif_err(adapter, ifdown, adapter->netdev,
1894 "Destroy failure, restarting device\n");
1895 ena_dump_stats_to_dmesg(adapter);
1896 /* rtnl lock already obtained in dev_ioctl() layer */
1897 ena_destroy_device(adapter);
1898 ena_restore_device(adapter);
1899 }
1900
1887 return 0; 1901 return 0;
1888} 1902}
1889 1903
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2544 2558
2545 ena_com_set_admin_running_state(ena_dev, false); 2559 ena_com_set_admin_running_state(ena_dev, false);
2546 2560
2547 ena_close(netdev); 2561 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2562 ena_down(adapter);
2548 2563
2549 /* Before releasing the ENA resources, a device reset is required. 2564 /* Before releasing the ENA resources, a device reset is required.
2550 * (to prevent the device from accessing them). 2565 * (to prevent the device from accessing them).
2551 * In case the reset flag is set and the device is up, ena_close 2566 * In case the reset flag is set and the device is up, ena_down()
2552 * already perform the reset, so it can be skipped. 2567 * already perform the reset, so it can be skipped.
2553 */ 2568 */
2554 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2569 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..105fdb958cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U 50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
51#define AQ_CFG_PCI_FUNC_PORTS 2U 51#define AQ_CFG_PCI_FUNC_PORTS 2U
52 52
53#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) 53#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) 54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
55 55
56#define AQ_CFG_SKB_FRAGS_MAX 32U 56#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ 80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\ 81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\
82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\ 82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\
83 __stringify(NIC_REVISION_DRIVER_VERSION) 83 __stringify(NIC_REVISION_DRIVER_VERSION) \
84 AQ_CFG_DRV_VERSION_SUFFIX
84 85
85#endif /* AQ_CFG_H */ 86#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
66 "OutUCast", 66 "OutUCast",
67 "OutMCast", 67 "OutMCast",
68 "OutBCast", 68 "OutBCast",
69 "InUCastOctects", 69 "InUCastOctets",
70 "OutUCastOctects", 70 "OutUCastOctets",
71 "InMCastOctects", 71 "InMCastOctets",
72 "OutMCastOctects", 72 "OutMCastOctets",
73 "InBCastOctects", 73 "InBCastOctets",
74 "OutBCastOctects", 74 "OutBCastOctets",
75 "InOctects", 75 "InOctets",
76 "OutOctects", 76 "OutOctets",
77 "InPacketsDma", 77 "InPacketsDma",
78 "OutPacketsDma", 78 "OutPacketsDma",
79 "InOctetsDma", 79 "InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..b3825de6cdfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
46 unsigned int mbps; 46 unsigned int mbps;
47}; 47};
48 48
49struct aq_stats_s {
50 u64 uprc;
51 u64 mprc;
52 u64 bprc;
53 u64 erpt;
54 u64 uptc;
55 u64 mptc;
56 u64 bptc;
57 u64 erpr;
58 u64 mbtc;
59 u64 bbtc;
60 u64 mbrc;
61 u64 bbrc;
62 u64 ubrc;
63 u64 ubtc;
64 u64 dpc;
65 u64 dma_pkt_rc;
66 u64 dma_pkt_tc;
67 u64 dma_oct_rc;
68 u64 dma_oct_tc;
69};
70
49#define AQ_HW_IRQ_INVALID 0U 71#define AQ_HW_IRQ_INVALID 0U
50#define AQ_HW_IRQ_LEGACY 1U 72#define AQ_HW_IRQ_LEGACY 1U
51#define AQ_HW_IRQ_MSI 2U 73#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
85 void (*destroy)(struct aq_hw_s *self); 107 void (*destroy)(struct aq_hw_s *self);
86 108
87 int (*get_hw_caps)(struct aq_hw_s *self, 109 int (*get_hw_caps)(struct aq_hw_s *self,
88 struct aq_hw_caps_s *aq_hw_caps); 110 struct aq_hw_caps_s *aq_hw_caps,
111 unsigned short device,
112 unsigned short subsystem_device);
89 113
90 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 114 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
91 unsigned int frags); 115 unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
164 188
165 int (*hw_update_stats)(struct aq_hw_s *self); 189 int (*hw_update_stats)(struct aq_hw_s *self);
166 190
167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 191 struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
168 unsigned int *p_count);
169 192
170 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 193 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
171 194
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2ab78ce..75a894a9251c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39 39
40static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
41
40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 42static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41{ 43{
42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 44 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
166static void aq_nic_service_timer_cb(struct timer_list *t) 168static void aq_nic_service_timer_cb(struct timer_list *t)
167{ 169{
168 struct aq_nic_s *self = from_timer(self, t, service_timer); 170 struct aq_nic_s *self = from_timer(self, t, service_timer);
169 struct net_device *ndev = aq_nic_get_ndev(self); 171 int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
170 int err = 0; 172 int err = 0;
171 unsigned int i = 0U;
172 struct aq_ring_stats_rx_s stats_rx;
173 struct aq_ring_stats_tx_s stats_tx;
174 173
175 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 174 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
176 goto err_exit; 175 goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
182 if (self->aq_hw_ops.hw_update_stats) 181 if (self->aq_hw_ops.hw_update_stats)
183 self->aq_hw_ops.hw_update_stats(self->aq_hw); 182 self->aq_hw_ops.hw_update_stats(self->aq_hw);
184 183
185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 184 aq_nic_update_ndev_stats(self);
186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
187 for (i = AQ_DIMOF(self->aq_vec); i--;) {
188 if (self->aq_vec[i])
189 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
190 }
191 185
192 ndev->stats.rx_packets = stats_rx.packets; 186 /* If no link - use faster timer rate to detect link up asap */
193 ndev->stats.rx_bytes = stats_rx.bytes; 187 if (!netif_carrier_ok(self->ndev))
194 ndev->stats.rx_errors = stats_rx.errors; 188 ctimer = max(ctimer / 2, 1);
195 ndev->stats.tx_packets = stats_tx.packets;
196 ndev->stats.tx_bytes = stats_tx.bytes;
197 ndev->stats.tx_errors = stats_tx.errors;
198 189
199err_exit: 190err_exit:
200 mod_timer(&self->service_timer, 191 mod_timer(&self->service_timer, jiffies + ctimer);
201 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
202} 192}
203 193
204static void aq_nic_polling_timer_cb(struct timer_list *t) 194static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
222 212
223struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 213struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
224 const struct ethtool_ops *et_ops, 214 const struct ethtool_ops *et_ops,
225 struct device *dev, 215 struct pci_dev *pdev,
226 struct aq_pci_func_s *aq_pci_func, 216 struct aq_pci_func_s *aq_pci_func,
227 unsigned int port, 217 unsigned int port,
228 const struct aq_hw_ops *aq_hw_ops) 218 const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
242 ndev->netdev_ops = ndev_ops; 232 ndev->netdev_ops = ndev_ops;
243 ndev->ethtool_ops = et_ops; 233 ndev->ethtool_ops = et_ops;
244 234
245 SET_NETDEV_DEV(ndev, dev); 235 SET_NETDEV_DEV(ndev, &pdev->dev);
246 236
247 ndev->if_port = port; 237 ndev->if_port = port;
248 self->ndev = ndev; 238 self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
254 244
255 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 245 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
256 &self->aq_hw_ops); 246 &self->aq_hw_ops);
257 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 247 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
248 pdev->device, pdev->subsystem_device);
258 if (err < 0) 249 if (err < 0)
259 goto err_exit; 250 goto err_exit;
260 251
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
749 740
750void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 741void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
751{ 742{
752 struct aq_vec_s *aq_vec = NULL;
753 unsigned int i = 0U; 743 unsigned int i = 0U;
754 unsigned int count = 0U; 744 unsigned int count = 0U;
755 int err = 0; 745 struct aq_vec_s *aq_vec = NULL;
746 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
756 747
757 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 748 if (!stats)
758 if (err < 0)
759 goto err_exit; 749 goto err_exit;
760 750
761 data += count; 751 data[i] = stats->uprc + stats->mprc + stats->bprc;
752 data[++i] = stats->uprc;
753 data[++i] = stats->mprc;
754 data[++i] = stats->bprc;
755 data[++i] = stats->erpt;
756 data[++i] = stats->uptc + stats->mptc + stats->bptc;
757 data[++i] = stats->uptc;
758 data[++i] = stats->mptc;
759 data[++i] = stats->bptc;
760 data[++i] = stats->ubrc;
761 data[++i] = stats->ubtc;
762 data[++i] = stats->mbrc;
763 data[++i] = stats->mbtc;
764 data[++i] = stats->bbrc;
765 data[++i] = stats->bbtc;
766 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
767 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
768 data[++i] = stats->dma_pkt_rc;
769 data[++i] = stats->dma_pkt_tc;
770 data[++i] = stats->dma_oct_rc;
771 data[++i] = stats->dma_oct_tc;
772 data[++i] = stats->dpc;
773
774 i++;
775
776 data += i;
762 count = 0U; 777 count = 0U;
763 778
764 for (i = 0U, aq_vec = self->aq_vec[0]; 779 for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
768 } 783 }
769 784
770err_exit:; 785err_exit:;
771 (void)err; 786}
787
788static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
789{
790 struct net_device *ndev = self->ndev;
791 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
792
793 ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
794 ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
795 ndev->stats.rx_errors = stats->erpr;
796 ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
797 ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
798 ndev->stats.tx_errors = stats->erpt;
799 ndev->stats.multicast = stats->mprc;
772} 800}
773 801
774void aq_nic_get_link_ksettings(struct aq_nic_s *self, 802void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..3c9f8db03d5f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
71 71
72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
73 const struct ethtool_ops *et_ops, 73 const struct ethtool_ops *et_ops,
74 struct device *dev, 74 struct pci_dev *pdev,
75 struct aq_pci_func_s *aq_pci_func, 75 struct aq_pci_func_s *aq_pci_func,
76 unsigned int port, 76 unsigned int port,
77 const struct aq_hw_ops *aq_hw_ops); 77 const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..58c29d04b186 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
51 pci_set_drvdata(pdev, self); 51 pci_set_drvdata(pdev, self);
52 self->pdev = pdev; 52 self->pdev = pdev;
53 53
54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); 54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
55 pdev->subsystem_device);
55 if (err < 0) 56 if (err < 0)
56 goto err_exit; 57 goto err_exit;
57 58
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
59 60
60 for (port = 0; port < self->ports; ++port) { 61 for (port = 0; port < self->ports; ++port) {
61 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, 62 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
62 &pdev->dev, self, 63 pdev, self,
63 port, aq_hw_ops); 64 port, aq_hw_ops);
64 65
65 if (!aq_nic) { 66 if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..f18dce14c93c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
18#include "hw_atl_a0_internal.h" 18#include "hw_atl_a0_internal.h"
19 19
20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, 20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 21 struct aq_hw_caps_s *aq_hw_caps,
22 unsigned short device,
23 unsigned short subsystem_device)
22{ 24{
23 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); 25 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
26
27 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
28 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
29
30 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
31 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
33 }
34
24 return 0; 35 return 0;
25} 36}
26 37
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
333 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 344 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
334 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 345 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
335 346
347 /* Reset link status and read out initial hardware counters */
348 self->aq_link_status.mbps = 0;
349 hw_atl_utils_update_stats(self);
350
336 err = aq_hw_err_from_flags(self); 351 err = aq_hw_err_from_flags(self);
337 if (err < 0) 352 if (err < 0)
338 goto err_exit; 353 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..e4a22ce7bf09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
16#include "hw_atl_utils.h" 16#include "hw_atl_utils.h"
17#include "hw_atl_llh.h" 17#include "hw_atl_llh.h"
18#include "hw_atl_b0_internal.h" 18#include "hw_atl_b0_internal.h"
19#include "hw_atl_llh_internal.h"
19 20
20static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, 21static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 22 struct aq_hw_caps_s *aq_hw_caps,
23 unsigned short device,
24 unsigned short subsystem_device)
22{ 25{
23 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); 26 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
27
28 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
29 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
30
31 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
33 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
34 }
35
24 return 0; 36 return 0;
25} 37}
26 38
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
357 }; 369 };
358 370
359 int err = 0; 371 int err = 0;
372 u32 val;
360 373
361 self->aq_nic_cfg = aq_nic_cfg; 374 self->aq_nic_cfg = aq_nic_cfg;
362 375
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
374 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 387 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
375 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 388 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
376 389
390 /* Force limit MRRS on RDM/TDM to 2K */
391 val = aq_hw_read_reg(self, pci_reg_control6_adr);
392 aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
393
394 /* TX DMA total request limit. B0 hardware is not capable to
395 * handle more than (8K-MRRS) incoming DMA data.
396 * Value 24 in 256byte units
397 */
398 aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
399
400 /* Reset link status and read out initial hardware counters */
401 self->aq_link_status.mbps = 0;
402 hw_atl_utils_update_stats(self);
403
377 err = aq_hw_err_from_flags(self); 404 err = aq_hw_err_from_flags(self);
378 if (err < 0) 405 if (err < 0)
379 goto err_exit; 406 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..93450ec930e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \ 2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \
2344 (0x00007c04u + (descriptor) * 0x40) 2344 (0x00007c04u + (descriptor) * 0x40)
2345 2345
2346/* tx dma total request limit */
2347#define tx_dma_total_req_limit_adr 0x00007b20u
2348
2346/* tx interrupt moderation control register definitions 2349/* tx interrupt moderation control register definitions
2347 * Preprocessor definitions for TX Interrupt Moderation Control Register 2350 * Preprocessor definitions for TX Interrupt Moderation Control Register
2348 * Base Address: 0x00008980 2351 * Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
2369/* default value of bitfield reg_res_dsbl */ 2372/* default value of bitfield reg_res_dsbl */
2370#define pci_reg_res_dsbl_default 0x1 2373#define pci_reg_res_dsbl_default 0x1
2371 2374
2375/* PCI core control register */
2376#define pci_reg_control6_adr 0x1014u
2377
2372/* global microprocessor scratch pad definitions */ 2378/* global microprocessor scratch pad definitions */
2373#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) 2379#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
2374 2380
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..f2ce12ed4218 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox; 504 struct hw_aq_atl_utils_mbox mbox;
505 505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox); 506 hw_atl_utils_mpi_read_stats(self, &mbox);
510 507
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ 508#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_) 509 mbox.stats._N_ - hw_self->last_stats._N_)
513 510 if (self->aq_link_status.mbps) {
514 AQ_SDELTA(uprc); 511 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc); 512 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc); 513 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt); 514 AQ_SDELTA(erpt);
518 515
519 AQ_SDELTA(uptc); 516 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc); 517 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc); 518 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr); 519 AQ_SDELTA(erpr);
523 520
524 AQ_SDELTA(ubrc); 521 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc); 522 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc); 523 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc); 524 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc); 525 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc); 526 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc); 527 AQ_SDELTA(dpc);
531 528 }
532#undef AQ_SDELTA 529#undef AQ_SDELTA
530 hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
531 hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
532 hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
533 hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
533 534
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); 535 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535 536
536 return 0; 537 return 0;
537} 538}
538 539
539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 540struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
540 u64 *data, unsigned int *p_count)
541{ 541{
542 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 542 return &PHAL_ATLANTIC->curr_stats;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
544 int i = 0;
545
546 data[i] = stats->uprc + stats->mprc + stats->bprc;
547 data[++i] = stats->uprc;
548 data[++i] = stats->mprc;
549 data[++i] = stats->bprc;
550 data[++i] = stats->erpt;
551 data[++i] = stats->uptc + stats->mptc + stats->bptc;
552 data[++i] = stats->uptc;
553 data[++i] = stats->mptc;
554 data[++i] = stats->bptc;
555 data[++i] = stats->ubrc;
556 data[++i] = stats->ubtc;
557 data[++i] = stats->mbrc;
558 data[++i] = stats->mbtc;
559 data[++i] = stats->bbrc;
560 data[++i] = stats->bbtc;
561 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
562 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
563 data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
564 data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
565 data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
566 data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
567 data[++i] = stats->dpc;
568
569 if (p_count)
570 *p_count = ++i;
571
572 return 0;
573} 543}
574 544
575static const u32 hw_atl_utils_hw_mac_regs[] = { 545static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..21aeca6908d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
129struct __packed hw_atl_s { 129struct __packed hw_atl_s {
130 struct aq_hw_s base; 130 struct aq_hw_s base;
131 struct hw_atl_stats_s last_stats; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats; 132 struct aq_stats_s curr_stats;
133 u64 speed; 133 u64 speed;
134 unsigned int chip_features; 134 unsigned int chip_features;
135 u32 fw_ver_actual; 135 u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
207 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self); 208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209 209
210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
211 u64 *data,
212 unsigned int *p_count);
213 211
214#endif /* HW_ATL_UTILS_H */ 212#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..9009f2651e70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
11#define VER_H 11#define VER_H
12 12
13#define NIC_MAJOR_DRIVER_VERSION 1 13#define NIC_MAJOR_DRIVER_VERSION 1
14#define NIC_MINOR_DRIVER_VERSION 5 14#define NIC_MINOR_DRIVER_VERSION 6
15#define NIC_BUILD_DRIVER_VERSION 345 15#define NIC_BUILD_DRIVER_VERSION 13
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 0
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19
18#endif /* VER_H */ 20#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 3c63b16d485f..d9efbc8d783b 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -159,6 +159,8 @@ struct arc_emac_priv {
159 unsigned int link; 159 unsigned int link;
160 unsigned int duplex; 160 unsigned int duplex;
161 unsigned int speed; 161 unsigned int speed;
162
163 unsigned int rx_missed_errors;
162}; 164};
163 165
164/** 166/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce718..bd277b0dc615 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,6 +26,8 @@
26 26
27#include "emac.h" 27#include "emac.h"
28 28
29static void arc_emac_restart(struct net_device *ndev);
30
29/** 31/**
30 * arc_emac_tx_avail - Return the number of available slots in the tx ring. 32 * arc_emac_tx_avail - Return the number of available slots in the tx ring.
31 * @priv: Pointer to ARC EMAC private data structure. 33 * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
210 continue; 212 continue;
211 } 213 }
212 214
213 pktlen = info & LEN_MASK; 215 /* Prepare the BD for next cycle. netif_receive_skb()
214 stats->rx_packets++; 216 * only if new skb was allocated and mapped to avoid holes
215 stats->rx_bytes += pktlen; 217 * in the RX fifo.
216 skb = rx_buff->skb; 218 */
217 skb_put(skb, pktlen); 219 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
218 skb->dev = ndev; 220 if (unlikely(!skb)) {
219 skb->protocol = eth_type_trans(skb, ndev); 221 if (net_ratelimit())
220 222 netdev_err(ndev, "cannot allocate skb\n");
221 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), 223 /* Return ownership to EMAC */
222 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); 224 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
223
224 /* Prepare the BD for next cycle */
225 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
226 EMAC_BUFFER_SIZE);
227 if (unlikely(!rx_buff->skb)) {
228 stats->rx_errors++; 225 stats->rx_errors++;
229 /* Because receive_skb is below, increment rx_dropped */
230 stats->rx_dropped++; 226 stats->rx_dropped++;
231 continue; 227 continue;
232 } 228 }
233 229
234 /* receive_skb only if new skb was allocated to avoid holes */ 230 addr = dma_map_single(&ndev->dev, (void *)skb->data,
235 netif_receive_skb(skb);
236
237 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
238 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); 231 EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
239 if (dma_mapping_error(&ndev->dev, addr)) { 232 if (dma_mapping_error(&ndev->dev, addr)) {
240 if (net_ratelimit()) 233 if (net_ratelimit())
241 netdev_err(ndev, "cannot dma map\n"); 234 netdev_err(ndev, "cannot map dma buffer\n");
242 dev_kfree_skb(rx_buff->skb); 235 dev_kfree_skb(skb);
236 /* Return ownership to EMAC */
237 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
243 stats->rx_errors++; 238 stats->rx_errors++;
239 stats->rx_dropped++;
244 continue; 240 continue;
245 } 241 }
242
243 /* unmap previosly mapped skb */
244 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
245 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
246
247 pktlen = info & LEN_MASK;
248 stats->rx_packets++;
249 stats->rx_bytes += pktlen;
250 skb_put(rx_buff->skb, pktlen);
251 rx_buff->skb->dev = ndev;
252 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
253
254 netif_receive_skb(rx_buff->skb);
255
256 rx_buff->skb = skb;
246 dma_unmap_addr_set(rx_buff, addr, addr); 257 dma_unmap_addr_set(rx_buff, addr, addr);
247 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); 258 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
248 259
@@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
259} 270}
260 271
261/** 272/**
273 * arc_emac_rx_miss_handle - handle R_MISS register
274 * @ndev: Pointer to the net_device structure.
275 */
276static void arc_emac_rx_miss_handle(struct net_device *ndev)
277{
278 struct arc_emac_priv *priv = netdev_priv(ndev);
279 struct net_device_stats *stats = &ndev->stats;
280 unsigned int miss;
281
282 miss = arc_reg_get(priv, R_MISS);
283 if (miss) {
284 stats->rx_errors += miss;
285 stats->rx_missed_errors += miss;
286 priv->rx_missed_errors += miss;
287 }
288}
289
290/**
291 * arc_emac_rx_stall_check - check RX stall
292 * @ndev: Pointer to the net_device structure.
293 * @budget: How many BDs requested to process on 1 call.
294 * @work_done: How many BDs processed
295 *
296 * Under certain conditions EMAC stop reception of incoming packets and
297 * continuously increment R_MISS register instead of saving data into
298 * provided buffer. This function detect that condition and restart
299 * EMAC.
300 */
301static void arc_emac_rx_stall_check(struct net_device *ndev,
302 int budget, unsigned int work_done)
303{
304 struct arc_emac_priv *priv = netdev_priv(ndev);
305 struct arc_emac_bd *rxbd;
306
307 if (work_done)
308 priv->rx_missed_errors = 0;
309
310 if (priv->rx_missed_errors && budget) {
311 rxbd = &priv->rxbd[priv->last_rx_bd];
312 if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
313 arc_emac_restart(ndev);
314 priv->rx_missed_errors = 0;
315 }
316 }
317}
318
319/**
262 * arc_emac_poll - NAPI poll handler. 320 * arc_emac_poll - NAPI poll handler.
263 * @napi: Pointer to napi_struct structure. 321 * @napi: Pointer to napi_struct structure.
264 * @budget: How many BDs to process on 1 call. 322 * @budget: How many BDs to process on 1 call.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
272 unsigned int work_done; 330 unsigned int work_done;
273 331
274 arc_emac_tx_clean(ndev); 332 arc_emac_tx_clean(ndev);
333 arc_emac_rx_miss_handle(ndev);
275 334
276 work_done = arc_emac_rx(ndev, budget); 335 work_done = arc_emac_rx(ndev, budget);
277 if (work_done < budget) { 336 if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
279 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); 338 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
280 } 339 }
281 340
341 arc_emac_rx_stall_check(ndev, budget, work_done);
342
282 return work_done; 343 return work_done;
283} 344}
284 345
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
320 if (status & MSER_MASK) { 381 if (status & MSER_MASK) {
321 stats->rx_missed_errors += 0x100; 382 stats->rx_missed_errors += 0x100;
322 stats->rx_errors += 0x100; 383 stats->rx_errors += 0x100;
384 priv->rx_missed_errors += 0x100;
385 napi_schedule(&priv->napi);
323 } 386 }
324 387
325 if (status & RXCR_MASK) { 388 if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
732} 795}
733 796
734 797
798/**
799 * arc_emac_restart - Restart EMAC
800 * @ndev: Pointer to net_device structure.
801 *
802 * This function do hardware reset of EMAC in order to restore
803 * network packets reception.
804 */
805static void arc_emac_restart(struct net_device *ndev)
806{
807 struct arc_emac_priv *priv = netdev_priv(ndev);
808 struct net_device_stats *stats = &ndev->stats;
809 int i;
810
811 if (net_ratelimit())
812 netdev_warn(ndev, "restarting stalled EMAC\n");
813
814 netif_stop_queue(ndev);
815
816 /* Disable interrupts */
817 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
818
819 /* Disable EMAC */
820 arc_reg_clr(priv, R_CTRL, EN_MASK);
821
822 /* Return the sk_buff to system */
823 arc_free_tx_queue(ndev);
824
825 /* Clean Tx BD's */
826 priv->txbd_curr = 0;
827 priv->txbd_dirty = 0;
828 memset(priv->txbd, 0, TX_RING_SZ);
829
830 for (i = 0; i < RX_BD_NUM; i++) {
831 struct arc_emac_bd *rxbd = &priv->rxbd[i];
832 unsigned int info = le32_to_cpu(rxbd->info);
833
834 if (!(info & FOR_EMAC)) {
835 stats->rx_errors++;
836 stats->rx_dropped++;
837 }
838 /* Return ownership to EMAC */
839 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
840 }
841 priv->last_rx_bd = 0;
842
843 /* Make sure info is visible to EMAC before enable */
844 wmb();
845
846 /* Enable interrupts */
847 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
848
849 /* Enable EMAC */
850 arc_reg_or(priv, R_CTRL, EN_MASK);
851
852 netif_start_queue(ndev);
853}
854
735static const struct net_device_ops arc_emac_netdev_ops = { 855static const struct net_device_ops arc_emac_netdev_ops = {
736 .ndo_open = arc_emac_open, 856 .ndo_open = arc_emac_open,
737 .ndo_stop = arc_emac_stop, 857 .ndo_stop = arc_emac_stop,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index e278e3d96ee0..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
199 199
200 /* RMII interface needs always a rate of 50MHz */ 200 /* RMII interface needs always a rate of 50MHz */
201 err = clk_set_rate(priv->refclk, 50000000); 201 err = clk_set_rate(priv->refclk, 50000000);
202 if (err) 202 if (err) {
203 dev_err(dev, 203 dev_err(dev,
204 "failed to change reference clock rate (%d)\n", err); 204 "failed to change reference clock rate (%d)\n", err);
205 goto out_regulator_disable;
206 }
205 207
206 if (priv->soc_data->need_div_macclk) { 208 if (priv->soc_data->need_div_macclk) {
207 priv->macclk = devm_clk_get(dev, "macclk"); 209 priv->macclk = devm_clk_get(dev, "macclk");
@@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)
220 222
221 /* RMII TX/RX needs always a rate of 25MHz */ 223 /* RMII TX/RX needs always a rate of 25MHz */
222 err = clk_set_rate(priv->macclk, 25000000); 224 err = clk_set_rate(priv->macclk, 25000000);
223 if (err) 225 if (err) {
224 dev_err(dev, 226 dev_err(dev,
225 "failed to change mac clock rate (%d)\n", err); 227 "failed to change mac clock rate (%d)\n", err);
228 goto out_clk_disable_macclk;
229 }
226 } 230 }
227 231
228 err = arc_emac_probe(ndev, interface); 232 err = arc_emac_probe(ndev, interface);
229 if (err) { 233 if (err) {
230 dev_err(dev, "failed to probe arc emac (%d)\n", err); 234 dev_err(dev, "failed to probe arc emac (%d)\n", err);
231 goto out_regulator_disable; 235 goto out_clk_disable_macclk;
232 } 236 }
233 237
234 return 0; 238 return 0;
235 239
240out_clk_disable_macclk:
241 if (priv->soc_data->need_div_macclk)
242 clk_disable_unprepare(priv->macclk);
236out_regulator_disable: 243out_regulator_disable:
237 if (priv->regulator) 244 if (priv->regulator)
238 regulator_disable(priv->regulator); 245 regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5355d2..8ae269ec17a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3030 3030
3031 del_timer_sync(&bp->timer); 3031 del_timer_sync(&bp->timer);
3032 3032
3033 if (IS_PF(bp)) { 3033 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3034 /* Set ALWAYS_ALIVE bit in shmem */ 3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp); 3036 bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3116 bp->cnic_loaded = false; 3116 bp->cnic_loaded = false;
3117 3117
3118 /* Clear driver version indication in shmem */ 3118 /* Clear driver version indication in shmem */
3119 if (IS_PF(bp)) 3119 if (IS_PF(bp) && !BP_NOMCP(bp))
3120 bnx2x_update_mng_version(bp); 3120 bnx2x_update_mng_version(bp);
3121 3121
3122 /* Check if there are pending parity attentions. If there are - set 3122 /* Check if there are pending parity attentions. If there are - set
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a7560b48..ddd5d3ebd201 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
9578 9578
9579 do { 9579 do {
9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9581
9582 /* If we read all 0xFFs, means we are in PCI error state and
9583 * should bail out to avoid crashes on adapter's FW reads.
9584 */
9585 if (bp->common.shmem_base == 0xFFFFFFFF) {
9586 bp->flags |= NO_MCP_FLAG;
9587 return -ENODEV;
9588 }
9589
9581 if (bp->common.shmem_base) { 9590 if (bp->common.shmem_base) {
9582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9591 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9583 if (val & SHR_MEM_VALIDITY_MB) 9592 if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14320 BNX2X_ERR("IO slot reset --> driver unload\n"); 14329 BNX2X_ERR("IO slot reset --> driver unload\n");
14321 14330
14322 /* MCP should have been reset; Need to wait for validity */ 14331 /* MCP should have been reset; Need to wait for validity */
14323 bnx2x_init_shmem(bp); 14332 if (bnx2x_init_shmem(bp)) {
14333 rtnl_unlock();
14334 return PCI_ERS_RESULT_DISCONNECT;
14335 }
14324 14336
14325 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14337 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14326 u32 v; 14338 u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c5c38d4b7d1c..61ca4eb7c6fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1883 * here forever if we consistently cannot allocate 1883 * here forever if we consistently cannot allocate
1884 * buffers. 1884 * buffers.
1885 */ 1885 */
1886 else if (rc == -ENOMEM) 1886 else if (rc == -ENOMEM && budget)
1887 rx_pkts++; 1887 rx_pkts++;
1888 else if (rc == -EBUSY) /* partial completion */ 1888 else if (rc == -EBUSY) /* partial completion */
1889 break; 1889 break;
@@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1969 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1969 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1970 1970
1971 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1971 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1972 if (likely(rc == -EIO)) 1972 if (likely(rc == -EIO) && budget)
1973 rx_pkts++; 1973 rx_pkts++;
1974 else if (rc == -EBUSY) /* partial completion */ 1974 else if (rc == -EBUSY) /* partial completion */
1975 break; 1975 break;
@@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3368 u16 cp_ring_id, len = 0; 3368 u16 cp_ring_id, len = 0;
3369 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3369 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3370 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3370 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3371 struct hwrm_short_input short_input = {0};
3371 3372
3372 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3373 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3373 memset(resp, 0, PAGE_SIZE); 3374 memset(resp, 0, PAGE_SIZE);
@@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3376 3377
3377 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 3378 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3378 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3379 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3379 struct hwrm_short_input short_input = {0};
3380 3380
3381 memcpy(short_cmd_req, req, msg_len); 3381 memcpy(short_cmd_req, req, msg_len);
3382 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - 3382 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
@@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)
8263 if (netif_running(dev)) 8263 if (netif_running(dev))
8264 dev_close(dev); 8264 dev_close(dev);
8265 8265
8266 bnxt_ulp_shutdown(bp);
8267
8266 if (system_state == SYSTEM_POWER_OFF) { 8268 if (system_state == SYSTEM_POWER_OFF) {
8267 bnxt_ulp_shutdown(bp);
8268 bnxt_clear_int_mode(bp); 8269 bnxt_clear_int_mode(bp);
8269 pci_wake_from_d3(pdev, bp->wol); 8270 pci_wake_from_d3(pdev, bp->wol);
8270 pci_set_power_state(pdev, PCI_D3hot); 8271 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..c9617675f934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 if (vf_id >= bp->pf.max_vfs) { 73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d5031f436f83..d8fee26cd45e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
56{ 56{
57 int ifindex = tcf_mirred_ifindex(tc_act); 57 int ifindex = tcf_mirred_ifindex(tc_act);
58 struct net_device *dev; 58 struct net_device *dev;
59 u16 dst_fid;
60 59
61 dev = __dev_get_by_index(dev_net(bp->dev), ifindex); 60 dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
62 if (!dev) { 61 if (!dev) {
@@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
64 return -EINVAL; 63 return -EINVAL;
65 } 64 }
66 65
67 /* find the FID from dev */
68 dst_fid = bnxt_flow_get_dst_fid(bp, dev);
69 if (dst_fid == BNXT_FID_INVALID) {
70 netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
71 return -EINVAL;
72 }
73
74 actions->flags |= BNXT_TC_ACTION_FLAG_FWD; 66 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
75 actions->dst_fid = dst_fid;
76 actions->dst_dev = dev; 67 actions->dst_dev = dev;
77 return 0; 68 return 0;
78} 69}
@@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
160 if (rc) 151 if (rc)
161 return rc; 152 return rc;
162 153
163 /* Tunnel encap/decap action must be accompanied by a redirect action */ 154 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
164 if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP || 155 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
165 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) && 156 /* dst_fid is PF's fid */
166 !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) { 157 actions->dst_fid = bp->pf.fw_fid;
167 netdev_info(bp->dev, 158 } else {
168 "error: no redir action along with encap/decap"); 159 /* find the FID from dst_dev */
169 return -EINVAL; 160 actions->dst_fid =
161 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
162 if (actions->dst_fid == BNXT_FID_INVALID)
163 return -EINVAL;
164 }
170 } 165 }
171 166
172 return rc; 167 return rc;
@@ -426,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
426 } 421 }
427 422
428 /* If all IP and L4 fields are wildcarded then this is an L2 flow */ 423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
429 if (is_wildcard(&l3_mask, sizeof(l3_mask)) && 424 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
430 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { 425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
431 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; 426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
432 } else { 427 } else {
@@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
532 } 527 }
533 528
534 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { 529 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
535 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR | 530 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
536 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
537 ether_addr_copy(req.dst_macaddr, l2_info->dmac); 531 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
538 ether_addr_copy(req.src_macaddr, l2_info->smac);
539 } 532 }
540 if (l2_info->num_vlans) { 533 if (l2_info->num_vlans) {
541 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; 534 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
@@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,
901 894
902static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, 895static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
903 struct ip_tunnel_key *tun_key, 896 struct ip_tunnel_key *tun_key,
904 struct bnxt_tc_l2_key *l2_info, 897 struct bnxt_tc_l2_key *l2_info)
905 struct net_device *real_dst_dev)
906{ 898{
907#ifdef CONFIG_INET 899#ifdef CONFIG_INET
900 struct net_device *real_dst_dev = bp->dev;
908 struct flowi4 flow = { {0} }; 901 struct flowi4 flow = { {0} };
909 struct net_device *dst_dev; 902 struct net_device *dst_dev;
910 struct neighbour *nbr; 903 struct neighbour *nbr;
@@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1008 */ 1001 */
1009 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; 1002 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1010 tun_key.tp_dst = flow->tun_key.tp_dst; 1003 tun_key.tp_dst = flow->tun_key.tp_dst;
1011 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev); 1004 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1012 if (rc) 1005 if (rc)
1013 goto put_decap; 1006 goto put_decap;
1014 1007
1015 decap_key->ttl = tun_key.ttl;
1016 decap_l2_info = &decap_node->l2_info; 1008 decap_l2_info = &decap_node->l2_info;
1009 /* decap smac is wildcarded */
1017 ether_addr_copy(decap_l2_info->dmac, l2_info.smac); 1010 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1018 ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
1019 if (l2_info.num_vlans) { 1011 if (l2_info.num_vlans) {
1020 decap_l2_info->num_vlans = l2_info.num_vlans; 1012 decap_l2_info->num_vlans = l2_info.num_vlans;
1021 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; 1013 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
@@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1095 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) 1087 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1096 goto done; 1088 goto done;
1097 1089
1098 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info, 1090 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1099 flow->actions.dst_dev);
1100 if (rc) 1091 if (rc)
1101 goto put_encap; 1092 goto put_encap;
1102 1093
@@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1169 return 0; 1160 return 0;
1170} 1161}
1171 1162
1163static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1164 u16 src_fid)
1165{
1166 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1167 flow->src_fid = bp->pf.fw_fid;
1168 else
1169 flow->src_fid = src_fid;
1170}
1171
1172/* Add a new flow or replace an existing flow. 1172/* Add a new flow or replace an existing flow.
1173 * Notes on locking: 1173 * Notes on locking:
1174 * There are essentially two critical sections here. 1174 * There are essentially two critical sections here.
@@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1204 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); 1204 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1205 if (rc) 1205 if (rc)
1206 goto free_node; 1206 goto free_node;
1207 flow->src_fid = src_fid; 1207
1208 bnxt_tc_set_src_fid(bp, flow, src_fid);
1208 1209
1209 if (!bnxt_tc_can_offload(bp, flow)) { 1210 if (!bnxt_tc_can_offload(bp, flow)) {
1210 rc = -ENOSPC; 1211 rc = -ENOSPC;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index de51c2177d03..8995cfefbfcf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation. 7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
8 * 9 *
9 * Firmware is: 10 * Firmware is:
10 * Derived from proprietary unpublished source code, 11 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
12 * 14 *
13 * Permission is hereby granted for the distribution of this firmware 15 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright 16 * data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10052 10054
10053 tw32(GRC_MODE, tp->grc_mode | val); 10055 tw32(GRC_MODE, tp->grc_mode | val);
10054 10056
10057 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10058 * south bridge limitation. As a workaround, Driver is setting MRRS
10059 * to 2048 instead of default 4096.
10060 */
10061 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10062 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10063 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10064 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10065 }
10066
10055 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10067 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10056 val = tr32(GRC_MISC_CFG); 10068 val = tr32(GRC_MISC_CFG);
10057 val &= ~0xff; 10069 val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14225 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14237 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14226 * breaks all requests to 256 bytes. 14238 * breaks all requests to 256 bytes.
14227 */ 14239 */
14228 if (tg3_asic_rev(tp) == ASIC_REV_57766) 14240 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243 tg3_asic_rev(tp) == ASIC_REV_5720)
14229 reset_phy = true; 14244 reset_phy = true;
14230 14245
14231 err = tg3_restart_hw(tp, reset_phy); 14246 err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d02d1e6..1f0271fa7c74 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
7 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2004 Sun Microsystems Inc.
8 * Copyright (C) 2007-2014 Broadcom Corporation. 8 * Copyright (C) 2007-2016 Broadcom Corporation.
9 * Copyright (C) 2016-2017 Broadcom Limited.
9 */ 10 */
10 11
11#ifndef _T3_H 12#ifndef _T3_H
@@ -96,6 +97,7 @@
96#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 97#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
97#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 98#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
98#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a 99#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
100#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
99#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ 101#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
100#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c 102#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
101#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a 103#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -281,6 +283,9 @@
281#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 283#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
282#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 284#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
283/* 0xa8 --> 0xb8 unused */ 285/* 0xa8 --> 0xb8 unused */
286#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
287#define MAX_READ_REQ_SIZE_2048 0x00004000
288#define MAX_READ_REQ_MASK 0x00007000
284#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 289#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
285#define DUAL_MAC_CTRL_CH_MASK 0x00000003 290#define DUAL_MAC_CTRL_CH_MASK 0x00000003
286#define DUAL_MAC_CTRL_ID 0x00000004 291#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 6aa0eee88ea5..a5eecd895a82 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)
1113 dev_err(&oct->pci_dev->dev, 1113 dev_err(&oct->pci_dev->dev,
1114 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 1114 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1115 core); 1115 core);
1116 err_msg_was_printed[core] = true; 1116 err_msg_was_printed[core] = true;
1117 } 1117 }
1118 } 1118 }
1119 1119
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 8b2c31e2a2b0..a3d12dbde95b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,6 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1355 1355
1356 /* Offload checksum calculation to HW */ 1356 /* Offload checksum calculation to HW */
1357 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1358 if (ip.v4->version == 4)
1359 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1358 hdr->l3_offset = skb_network_offset(skb); 1360 hdr->l3_offset = skb_network_offset(skb);
1359 hdr->l4_offset = skb_transport_offset(skb); 1361 hdr->l4_offset = skb_transport_offset(skb);
1360 1362
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..d8424ed16c33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -344,7 +344,6 @@ struct adapter_params {
344 344
345 unsigned int sf_size; /* serial flash size in bytes */ 345 unsigned int sf_size; /* serial flash size in bytes */
346 unsigned int sf_nsec; /* # of flash sectors */ 346 unsigned int sf_nsec; /* # of flash sectors */
347 unsigned int sf_fw_start; /* start of FW image in flash */
348 347
349 unsigned int fw_vers; /* firmware version */ 348 unsigned int fw_vers; /* firmware version */
350 unsigned int bs_vers; /* bootstrap version */ 349 unsigned int bs_vers; /* bootstrap version */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..a452d5a1b0f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111 ethtype_mask = 0; 111 ethtype_mask = 0;
112 } 112 }
113 113
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
116
114 fs->val.ethtype = ethtype_key; 117 fs->val.ethtype = ethtype_key;
115 fs->mask.ethtype = ethtype_mask; 118 fs->mask.ethtype = ethtype_mask;
116 fs->val.proto = key->ip_proto; 119 fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205 VLAN_PRIO_SHIFT); 208 VLAN_PRIO_SHIFT);
206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207 VLAN_PRIO_SHIFT); 210 VLAN_PRIO_SHIFT);
208 fs->val.ivlan = cpu_to_be16(vlan_tci); 211 fs->val.ivlan = vlan_tci;
209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 212 fs->mask.ivlan = vlan_tci_mask;
210 213
211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
212 * as 802.1Q. Also, when vlan tag is present in packets, 215 * as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..375ef86a84da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2844,8 +2844,6 @@ enum {
2844 SF_RD_DATA_FAST = 0xb, /* read flash */ 2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */ 2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2847
2848 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2849}; 2847};
2850 2848
2851/** 2849/**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3558 const __be32 *p = (const __be32 *)fw_data; 3556 const __be32 *p = (const __be32 *)fw_data;
3559 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3557 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3560 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3558 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3561 unsigned int fw_img_start = adap->params.sf_fw_start; 3559 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3562 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 3560 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3561 unsigned int fw_start = FLASH_FW_START;
3563 3562
3564 if (!size) { 3563 if (!size) {
3565 dev_err(adap->pdev_dev, "FW image has no data\n"); 3564 dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3575 "FW image size differs from size in FW header\n"); 3574 "FW image size differs from size in FW header\n");
3576 return -EINVAL; 3575 return -EINVAL;
3577 } 3576 }
3578 if (size > FW_MAX_SIZE) { 3577 if (size > fw_size) {
3579 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 3578 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3580 FW_MAX_SIZE); 3579 fw_size);
3581 return -EFBIG; 3580 return -EFBIG;
3582 } 3581 }
3583 if (!t4_fw_matches_chip(adap, hdr)) 3582 if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3604 */ 3603 */
3605 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3604 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3606 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3605 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3607 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 3606 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3608 if (ret) 3607 if (ret)
3609 goto out; 3608 goto out;
3610 3609
3611 addr = fw_img_start; 3610 addr = fw_start;
3612 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3611 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3613 addr += SF_PAGE_SIZE; 3612 addr += SF_PAGE_SIZE;
3614 fw_data += SF_PAGE_SIZE; 3613 fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3618 } 3617 }
3619 3618
3620 ret = t4_write_flash(adap, 3619 ret = t4_write_flash(adap,
3621 fw_img_start + offsetof(struct fw_hdr, fw_ver), 3620 fw_start + offsetof(struct fw_hdr, fw_ver),
3622 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 3621 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3623out: 3622out:
3624 if (ret) 3623 if (ret)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); 1913module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1914 1914
1915#endif /* CONFIG_CS89x0_PLATFORM */ 1915#endif /* CONFIG_CS89x0_PLATFORM */
1916
1917MODULE_LICENSE("GPL");
1918MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
1919MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..e180657a02ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
4634 4634
4635 be_schedule_worker(adapter); 4635 be_schedule_worker(adapter);
4636 4636
4637 /*
4638 * The IF was destroyed and re-created. We need to clear
4639 * all promiscuous flags valid for the destroyed IF.
4640 * Without this promisc mode is not restored during
4641 * be_open() because the driver thinks that it is
4642 * already enabled in HW.
4643 */
4644 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645
4637 if (netif_running(netdev)) 4646 if (netif_running(netdev))
4638 status = be_open(netdev); 4647 status = be_open(netdev);
4639 4648
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 610573855213..a74300a4459c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
818 for (i = 0; i < txq->bd.ring_size; i++) { 818 for (i = 0; i < txq->bd.ring_size; i++) {
819 /* Initialize the BD for every fragment in the page. */ 819 /* Initialize the BD for every fragment in the page. */
820 bdp->cbd_sc = cpu_to_fec16(0); 820 bdp->cbd_sc = cpu_to_fec16(0);
821 if (bdp->cbd_bufaddr &&
822 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
823 dma_unmap_single(&fep->pdev->dev,
824 fec32_to_cpu(bdp->cbd_bufaddr),
825 fec16_to_cpu(bdp->cbd_datlen),
826 DMA_TO_DEVICE);
821 if (txq->tx_skbuff[i]) { 827 if (txq->tx_skbuff[i]) {
822 dev_kfree_skb_any(txq->tx_skbuff[i]); 828 dev_kfree_skb_any(txq->tx_skbuff[i]);
823 txq->tx_skbuff[i] = NULL; 829 txq->tx_skbuff[i] = NULL;
@@ -3463,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
3463 goto failed_regulator; 3469 goto failed_regulator;
3464 } 3470 }
3465 } else { 3471 } else {
3472 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3473 ret = -EPROBE_DEFER;
3474 goto failed_regulator;
3475 }
3466 fep->reg_phy = NULL; 3476 fep->reg_phy = NULL;
3467 } 3477 }
3468 3478
@@ -3546,8 +3556,9 @@ failed_clk_ipg:
3546failed_clk: 3556failed_clk:
3547 if (of_phy_is_fixed_link(np)) 3557 if (of_phy_is_fixed_link(np))
3548 of_phy_deregister_fixed_link(np); 3558 of_phy_deregister_fixed_link(np);
3549failed_phy:
3550 of_node_put(phy_node); 3559 of_node_put(phy_node);
3560failed_phy:
3561 dev_id--;
3551failed_ioremap: 3562failed_ioremap:
3552 free_netdev(ndev); 3563 free_netdev(ndev);
3553 3564
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
613 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
614} 614}
615 615
616static void fs_timeout(struct net_device *dev) 616static void fs_timeout_work(struct work_struct *work)
617{ 617{
618 struct fs_enet_private *fep = netdev_priv(dev); 618 struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
619 timeout_work);
620 struct net_device *dev = fep->ndev;
619 unsigned long flags; 621 unsigned long flags;
620 int wake = 0; 622 int wake = 0;
621 623
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
627 phy_stop(dev->phydev); 629 phy_stop(dev->phydev);
628 (*fep->ops->stop)(dev); 630 (*fep->ops->stop)(dev);
629 (*fep->ops->restart)(dev); 631 (*fep->ops->restart)(dev);
630 phy_start(dev->phydev);
631 } 632 }
632 633
633 phy_start(dev->phydev); 634 phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
639 netif_wake_queue(dev); 640 netif_wake_queue(dev);
640} 641}
641 642
643static void fs_timeout(struct net_device *dev)
644{
645 struct fs_enet_private *fep = netdev_priv(dev);
646
647 schedule_work(&fep->timeout_work);
648}
649
642/*----------------------------------------------------------------------------- 650/*-----------------------------------------------------------------------------
643 * generic link-change handler - should be sufficient for most cases 651 * generic link-change handler - should be sufficient for most cases
644 *-----------------------------------------------------------------------------*/ 652 *-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
759 netif_stop_queue(dev); 767 netif_stop_queue(dev);
760 netif_carrier_off(dev); 768 netif_carrier_off(dev);
761 napi_disable(&fep->napi); 769 napi_disable(&fep->napi);
770 cancel_work_sync(&fep->timeout_work);
762 phy_stop(dev->phydev); 771 phy_stop(dev->phydev);
763 772
764 spin_lock_irqsave(&fep->lock, flags); 773 spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1019 1028
1020 ndev->netdev_ops = &fs_enet_netdev_ops; 1029 ndev->netdev_ops = &fs_enet_netdev_ops;
1021 ndev->watchdog_timeo = 2 * HZ; 1030 ndev->watchdog_timeo = 2 * HZ;
1031 INIT_WORK(&fep->timeout_work, fs_timeout_work);
1022 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight); 1032 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
1023 1033
1024 ndev->ethtool_ops = &fs_ethtool_ops; 1034 ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
125 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ 126 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
127 struct fs_platform_info *fpi; 127 struct fs_platform_info *fpi;
128 struct work_struct timeout_work;
128 const struct fs_ops *ops; 129 const struct fs_ops *ops;
129 int rx_ring, tx_ring; 130 int rx_ring, tx_ring;
130 dma_addr_t ring_mem_addr; 131 dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5be52d89b182..7f837006bb6a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)
1378 1378
1379 gfar_init_addr_hash_table(priv); 1379 gfar_init_addr_hash_table(priv);
1380 1380
1381 /* Insert receive time stamps into padding alignment bytes */ 1381 /* Insert receive time stamps into padding alignment bytes, and
1382 * plus 2 bytes padding to ensure the cpu alignment.
1383 */
1382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1383 priv->padding = 8; 1385 priv->padding = 8 + DEFAULT_PADDING;
1384 1386
1385 if (dev->features & NETIF_F_IP_CSUM || 1387 if (dev->features & NETIF_F_IP_CSUM ||
1386 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1388 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
@@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)
1790 GFAR_SUPPORTED_GBIT : 0; 1792 GFAR_SUPPORTED_GBIT : 0;
1791 phy_interface_t interface; 1793 phy_interface_t interface;
1792 struct phy_device *phydev; 1794 struct phy_device *phydev;
1795 struct ethtool_eee edata;
1793 1796
1794 priv->oldlink = 0; 1797 priv->oldlink = 0;
1795 priv->oldspeed = 0; 1798 priv->oldspeed = 0;
@@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)
1814 /* Add support for flow control, but don't advertise it by default */ 1817 /* Add support for flow control, but don't advertise it by default */
1815 phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); 1818 phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1816 1819
1820 /* disable EEE autoneg, EEE not supported by eTSEC */
1821 memset(&edata, 0, sizeof(struct ethtool_eee));
1822 phy_ethtool_set_eee(phydev, &edata);
1823
1817 return 0; 1824 return 0;
1818} 1825}
1819 1826
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
319 now = tmr_cnt_read(etsects); 319 now = tmr_cnt_read(etsects);
320 now += delta; 320 now += delta;
321 tmr_cnt_write(etsects, now); 321 tmr_cnt_write(etsects, now);
322 set_fipers(etsects);
322 323
323 spin_unlock_irqrestore(&etsects->lock, flags); 324 spin_unlock_irqrestore(&etsects->lock, flags);
324 325
325 set_fipers(etsects);
326
327 return 0; 326 return 0;
328} 327}
329 328
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff2450ed6..241db3199b88 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
494 case 16384: 494 case 16384:
495 ret |= EMAC_MR1_RFS_16K; 495 ret |= EMAC_MR1_RFS_16K;
496 break; 496 break;
497 case 8192:
498 ret |= EMAC4_MR1_RFS_8K;
499 break;
497 case 4096: 500 case 4096:
498 ret |= EMAC_MR1_RFS_4K; 501 ret |= EMAC_MR1_RFS_4K;
499 break; 502 break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
516 case 16384: 519 case 16384:
517 ret |= EMAC4_MR1_TFS_16K; 520 ret |= EMAC4_MR1_TFS_16K;
518 break; 521 break;
522 case 8192:
523 ret |= EMAC4_MR1_TFS_8K;
524 break;
519 case 4096: 525 case 4096:
520 ret |= EMAC4_MR1_TFS_4K; 526 ret |= EMAC4_MR1_TFS_4K;
521 break; 527 break;
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27ceebb..c26d2631ca30 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -151,9 +151,11 @@ struct emac_regs {
151 151
152#define EMAC4_MR1_RFS_2K 0x00100000 152#define EMAC4_MR1_RFS_2K 0x00100000
153#define EMAC4_MR1_RFS_4K 0x00180000 153#define EMAC4_MR1_RFS_4K 0x00180000
154#define EMAC4_MR1_RFS_8K 0x00200000
154#define EMAC4_MR1_RFS_16K 0x00280000 155#define EMAC4_MR1_RFS_16K 0x00280000
155#define EMAC4_MR1_TFS_2K 0x00020000 156#define EMAC4_MR1_TFS_2K 0x00020000
156#define EMAC4_MR1_TFS_4K 0x00030000 157#define EMAC4_MR1_TFS_4K 0x00030000
158#define EMAC4_MR1_TFS_8K 0x00040000
157#define EMAC4_MR1_TFS_16K 0x00050000 159#define EMAC4_MR1_TFS_16K 0x00050000
158#define EMAC4_MR1_TR 0x00008000 160#define EMAC4_MR1_TR 0x00008000
159#define EMAC4_MR1_MWSW_001 0x00001000 161#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
242#define EMAC_STACR_PHYE 0x00004000 244#define EMAC_STACR_PHYE 0x00004000
243#define EMAC_STACR_STAC_MASK 0x00003000 245#define EMAC_STACR_STAC_MASK 0x00003000
244#define EMAC_STACR_STAC_READ 0x00001000 246#define EMAC_STACR_STAC_READ 0x00001000
245#define EMAC_STACR_STAC_WRITE 0x00002000 247#define EMAC_STACR_STAC_WRITE 0x00000800
246#define EMAC_STACR_OPBC_MASK 0x00000C00 248#define EMAC_STACR_OPBC_MASK 0x00000C00
247#define EMAC_STACR_OPBC_50 0x00000000 249#define EMAC_STACR_OPBC_50 0x00000000
248#define EMAC_STACR_OPBC_66 0x00000400 250#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef37d3a..b65f5f3ac034 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
410 struct ibmvnic_rx_pool *rx_pool; 410 struct ibmvnic_rx_pool *rx_pool;
411 int rx_scrqs; 411 int rx_scrqs;
412 int i, j, rc; 412 int i, j, rc;
413 u64 *size_array;
414
415 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
416 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
413 417
414 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 418 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
415 for (i = 0; i < rx_scrqs; i++) { 419 for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
417 421
418 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 422 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
419 423
420 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); 424 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
425 free_long_term_buff(adapter, &rx_pool->long_term_buff);
426 rx_pool->buff_size = be64_to_cpu(size_array[i]);
427 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
428 rx_pool->size *
429 rx_pool->buff_size);
430 } else {
431 rc = reset_long_term_buff(adapter,
432 &rx_pool->long_term_buff);
433 }
434
421 if (rc) 435 if (rc)
422 return rc; 436 return rc;
423 437
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
439static void release_rx_pools(struct ibmvnic_adapter *adapter) 453static void release_rx_pools(struct ibmvnic_adapter *adapter)
440{ 454{
441 struct ibmvnic_rx_pool *rx_pool; 455 struct ibmvnic_rx_pool *rx_pool;
442 int rx_scrqs;
443 int i, j; 456 int i, j;
444 457
445 if (!adapter->rx_pool) 458 if (!adapter->rx_pool)
446 return; 459 return;
447 460
448 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 461 for (i = 0; i < adapter->num_active_rx_pools; i++) {
449 for (i = 0; i < rx_scrqs; i++) {
450 rx_pool = &adapter->rx_pool[i]; 462 rx_pool = &adapter->rx_pool[i];
451 463
452 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 464 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
469 481
470 kfree(adapter->rx_pool); 482 kfree(adapter->rx_pool);
471 adapter->rx_pool = NULL; 483 adapter->rx_pool = NULL;
484 adapter->num_active_rx_pools = 0;
472} 485}
473 486
474static int init_rx_pools(struct net_device *netdev) 487static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
493 return -1; 506 return -1;
494 } 507 }
495 508
509 adapter->num_active_rx_pools = 0;
510
496 for (i = 0; i < rxadd_subcrqs; i++) { 511 for (i = 0; i < rxadd_subcrqs; i++) {
497 rx_pool = &adapter->rx_pool[i]; 512 rx_pool = &adapter->rx_pool[i];
498 513
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
536 rx_pool->next_free = 0; 551 rx_pool->next_free = 0;
537 } 552 }
538 553
554 adapter->num_active_rx_pools = rxadd_subcrqs;
555
539 return 0; 556 return 0;
540} 557}
541 558
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
586static void release_tx_pools(struct ibmvnic_adapter *adapter) 603static void release_tx_pools(struct ibmvnic_adapter *adapter)
587{ 604{
588 struct ibmvnic_tx_pool *tx_pool; 605 struct ibmvnic_tx_pool *tx_pool;
589 int i, tx_scrqs; 606 int i;
590 607
591 if (!adapter->tx_pool) 608 if (!adapter->tx_pool)
592 return; 609 return;
593 610
594 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 611 for (i = 0; i < adapter->num_active_tx_pools; i++) {
595 for (i = 0; i < tx_scrqs; i++) {
596 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 612 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
597 tx_pool = &adapter->tx_pool[i]; 613 tx_pool = &adapter->tx_pool[i];
598 kfree(tx_pool->tx_buff); 614 kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
603 619
604 kfree(adapter->tx_pool); 620 kfree(adapter->tx_pool);
605 adapter->tx_pool = NULL; 621 adapter->tx_pool = NULL;
622 adapter->num_active_tx_pools = 0;
606} 623}
607 624
608static int init_tx_pools(struct net_device *netdev) 625static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
619 if (!adapter->tx_pool) 636 if (!adapter->tx_pool)
620 return -1; 637 return -1;
621 638
639 adapter->num_active_tx_pools = 0;
640
622 for (i = 0; i < tx_subcrqs; i++) { 641 for (i = 0; i < tx_subcrqs; i++) {
623 tx_pool = &adapter->tx_pool[i]; 642 tx_pool = &adapter->tx_pool[i];
624 643
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
666 tx_pool->producer_index = 0; 685 tx_pool->producer_index = 0;
667 } 686 }
668 687
688 adapter->num_active_tx_pools = tx_subcrqs;
689
669 return 0; 690 return 0;
670} 691}
671 692
@@ -756,6 +777,12 @@ static int ibmvnic_login(struct net_device *netdev)
756 } 777 }
757 } while (adapter->renegotiate); 778 } while (adapter->renegotiate);
758 779
780 /* handle pending MAC address changes after successful login */
781 if (adapter->mac_change_pending) {
782 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
783 adapter->mac_change_pending = false;
784 }
785
759 return 0; 786 return 0;
760} 787}
761 788
@@ -854,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
854 if (adapter->vpd->buff) 881 if (adapter->vpd->buff)
855 len = adapter->vpd->len; 882 len = adapter->vpd->len;
856 883
857 reinit_completion(&adapter->fw_done); 884 init_completion(&adapter->fw_done);
858 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 885 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
859 crq.get_vpd_size.cmd = GET_VPD_SIZE; 886 crq.get_vpd_size.cmd = GET_VPD_SIZE;
860 ibmvnic_send_crq(adapter, &crq); 887 ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
916 if (!adapter->vpd) 943 if (!adapter->vpd)
917 return -ENOMEM; 944 return -ENOMEM;
918 945
946 /* Vital Product Data (VPD) */
947 rc = ibmvnic_get_vpd(adapter);
948 if (rc) {
949 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
950 return rc;
951 }
952
919 adapter->map_id = 1; 953 adapter->map_id = 1;
920 adapter->napi = kcalloc(adapter->req_rx_queues, 954 adapter->napi = kcalloc(adapter->req_rx_queues,
921 sizeof(struct napi_struct), GFP_KERNEL); 955 sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1023,10 @@ static int __ibmvnic_open(struct net_device *netdev)
989static int ibmvnic_open(struct net_device *netdev) 1023static int ibmvnic_open(struct net_device *netdev)
990{ 1024{
991 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1025 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
992 int rc, vpd; 1026 int rc;
993 1027
994 mutex_lock(&adapter->reset_lock); 1028 mutex_lock(&adapter->reset_lock);
995 1029
996 if (adapter->mac_change_pending) {
997 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
998 adapter->mac_change_pending = false;
999 }
1000
1001 if (adapter->state != VNIC_CLOSED) { 1030 if (adapter->state != VNIC_CLOSED) {
1002 rc = ibmvnic_login(netdev); 1031 rc = ibmvnic_login(netdev);
1003 if (rc) { 1032 if (rc) {
@@ -1017,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
1017 rc = __ibmvnic_open(netdev); 1046 rc = __ibmvnic_open(netdev);
1018 netif_carrier_on(netdev); 1047 netif_carrier_on(netdev);
1019 1048
1020 /* Vital Product Data (VPD) */
1021 vpd = ibmvnic_get_vpd(adapter);
1022 if (vpd)
1023 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1024
1025 mutex_unlock(&adapter->reset_lock); 1049 mutex_unlock(&adapter->reset_lock);
1026 1050
1027 return rc; 1051 return rc;
@@ -1275,6 +1299,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1275 unsigned char *dst; 1299 unsigned char *dst;
1276 u64 *handle_array; 1300 u64 *handle_array;
1277 int index = 0; 1301 int index = 0;
1302 u8 proto = 0;
1278 int ret = 0; 1303 int ret = 0;
1279 1304
1280 if (adapter->resetting) { 1305 if (adapter->resetting) {
@@ -1363,17 +1388,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1363 } 1388 }
1364 1389
1365 if (skb->protocol == htons(ETH_P_IP)) { 1390 if (skb->protocol == htons(ETH_P_IP)) {
1366 if (ip_hdr(skb)->version == 4) 1391 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1367 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1392 proto = ip_hdr(skb)->protocol;
1368 else if (ip_hdr(skb)->version == 6) 1393 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1369 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1394 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1370 1395 proto = ipv6_hdr(skb)->nexthdr;
1371 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1372 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1373 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1374 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1375 } 1396 }
1376 1397
1398 if (proto == IPPROTO_TCP)
1399 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1400 else if (proto == IPPROTO_UDP)
1401 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1402
1377 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1403 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1378 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1404 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1379 hdrs += 2; 1405 hdrs += 2;
@@ -1527,7 +1553,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1527 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1553 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1528 struct sockaddr *addr = p; 1554 struct sockaddr *addr = p;
1529 1555
1530 if (adapter->state != VNIC_OPEN) { 1556 if (adapter->state == VNIC_PROBED) {
1531 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1557 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1532 adapter->mac_change_pending = true; 1558 adapter->mac_change_pending = true;
1533 return 0; 1559 return 0;
@@ -1545,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1545static int do_reset(struct ibmvnic_adapter *adapter, 1571static int do_reset(struct ibmvnic_adapter *adapter,
1546 struct ibmvnic_rwi *rwi, u32 reset_state) 1572 struct ibmvnic_rwi *rwi, u32 reset_state)
1547{ 1573{
1574 u64 old_num_rx_queues, old_num_tx_queues;
1548 struct net_device *netdev = adapter->netdev; 1575 struct net_device *netdev = adapter->netdev;
1549 int i, rc; 1576 int i, rc;
1550 1577
@@ -1554,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1554 netif_carrier_off(netdev); 1581 netif_carrier_off(netdev);
1555 adapter->reset_reason = rwi->reset_reason; 1582 adapter->reset_reason = rwi->reset_reason;
1556 1583
1584 old_num_rx_queues = adapter->req_rx_queues;
1585 old_num_tx_queues = adapter->req_tx_queues;
1586
1557 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1587 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1558 rc = ibmvnic_reenable_crq_queue(adapter); 1588 rc = ibmvnic_reenable_crq_queue(adapter);
1559 if (rc) 1589 if (rc)
@@ -1598,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1598 rc = init_resources(adapter); 1628 rc = init_resources(adapter);
1599 if (rc) 1629 if (rc)
1600 return rc; 1630 return rc;
1631 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1632 adapter->req_tx_queues != old_num_tx_queues) {
1633 release_rx_pools(adapter);
1634 release_tx_pools(adapter);
1635 init_rx_pools(netdev);
1636 init_tx_pools(netdev);
1601 } else { 1637 } else {
1602 rc = reset_tx_pools(adapter); 1638 rc = reset_tx_pools(adapter);
1603 if (rc) 1639 if (rc)
@@ -3345,7 +3381,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3345 return; 3381 return;
3346 } 3382 }
3347 3383
3384 adapter->ip_offload_ctrl.len =
3385 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3348 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3386 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3387 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3388 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3349 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3389 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3350 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3390 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3351 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3391 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3585,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3585 *req_value, 3625 *req_value,
3586 (long int)be64_to_cpu(crq->request_capability_rsp. 3626 (long int)be64_to_cpu(crq->request_capability_rsp.
3587 number), name); 3627 number), name);
3588 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3628
3629 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3630 REQ_MTU) {
3631 pr_err("mtu of %llu is not supported. Reverting.\n",
3632 *req_value);
3633 *req_value = adapter->fallback.mtu;
3634 } else {
3635 *req_value =
3636 be64_to_cpu(crq->request_capability_rsp.number);
3637 }
3638
3589 ibmvnic_send_req_caps(adapter, 1); 3639 ibmvnic_send_req_caps(adapter, 1);
3590 return; 3640 return;
3591 default: 3641 default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..3aec42118db2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
1091 u64 opt_rxba_entries_per_subcrq; 1091 u64 opt_rxba_entries_per_subcrq;
1092 __be64 tx_rx_desc_req; 1092 __be64 tx_rx_desc_req;
1093 u8 map_id; 1093 u8 map_id;
1094 u64 num_active_rx_pools;
1095 u64 num_active_tx_pools;
1094 1096
1095 struct tasklet_struct tasklet; 1097 struct tasklet_struct tasklet;
1096 enum vnic_state state; 1098 enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
331enum e1000_state_t { 331enum e1000_state_t {
332 __E1000_TESTING, 332 __E1000_TESTING,
333 __E1000_RESETTING, 333 __E1000_RESETTING,
334 __E1000_DOWN 334 __E1000_DOWN,
335 __E1000_DISABLED
335}; 336};
336 337
337#undef pr_fmt 338#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
946{ 946{
947 struct net_device *netdev; 947 struct net_device *netdev;
948 struct e1000_adapter *adapter; 948 struct e1000_adapter *adapter = NULL;
949 struct e1000_hw *hw; 949 struct e1000_hw *hw;
950 950
951 static int cards_found; 951 static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
955 u16 tmp = 0; 955 u16 tmp = 0;
956 u16 eeprom_apme_mask = E1000_EEPROM_APME; 956 u16 eeprom_apme_mask = E1000_EEPROM_APME;
957 int bars, need_ioport; 957 int bars, need_ioport;
958 bool disable_dev = false;
958 959
959 /* do not allocate ioport bars when not needed */ 960 /* do not allocate ioport bars when not needed */
960 need_ioport = e1000_is_need_ioport(pdev); 961 need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
1259 iounmap(hw->ce4100_gbe_mdio_base_virt); 1260 iounmap(hw->ce4100_gbe_mdio_base_virt);
1260 iounmap(hw->hw_addr); 1261 iounmap(hw->hw_addr);
1261err_ioremap: 1262err_ioremap:
1263 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1262 free_netdev(netdev); 1264 free_netdev(netdev);
1263err_alloc_etherdev: 1265err_alloc_etherdev:
1264 pci_release_selected_regions(pdev, bars); 1266 pci_release_selected_regions(pdev, bars);
1265err_pci_reg: 1267err_pci_reg:
1266 pci_disable_device(pdev); 1268 if (!adapter || disable_dev)
1269 pci_disable_device(pdev);
1267 return err; 1270 return err;
1268} 1271}
1269 1272
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
1281 struct net_device *netdev = pci_get_drvdata(pdev); 1284 struct net_device *netdev = pci_get_drvdata(pdev);
1282 struct e1000_adapter *adapter = netdev_priv(netdev); 1285 struct e1000_adapter *adapter = netdev_priv(netdev);
1283 struct e1000_hw *hw = &adapter->hw; 1286 struct e1000_hw *hw = &adapter->hw;
1287 bool disable_dev;
1284 1288
1285 e1000_down_and_stop(adapter); 1289 e1000_down_and_stop(adapter);
1286 e1000_release_manageability(adapter); 1290 e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
1299 iounmap(hw->flash_address); 1303 iounmap(hw->flash_address);
1300 pci_release_selected_regions(pdev, adapter->bars); 1304 pci_release_selected_regions(pdev, adapter->bars);
1301 1305
1306 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1302 free_netdev(netdev); 1307 free_netdev(netdev);
1303 1308
1304 pci_disable_device(pdev); 1309 if (disable_dev)
1310 pci_disable_device(pdev);
1305} 1311}
1306 1312
1307/** 1313/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5156 if (netif_running(netdev)) 5162 if (netif_running(netdev))
5157 e1000_free_irq(adapter); 5163 e1000_free_irq(adapter);
5158 5164
5159 pci_disable_device(pdev); 5165 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5166 pci_disable_device(pdev);
5160 5167
5161 return 0; 5168 return 0;
5162} 5169}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
5200 pr_err("Cannot enable PCI device from suspend\n"); 5207 pr_err("Cannot enable PCI device from suspend\n");
5201 return err; 5208 return err;
5202 } 5209 }
5210
5211 /* flush memory to make sure state is correct */
5212 smp_mb__before_atomic();
5213 clear_bit(__E1000_DISABLED, &adapter->flags);
5203 pci_set_master(pdev); 5214 pci_set_master(pdev);
5204 5215
5205 pci_enable_wake(pdev, PCI_D3hot, 0); 5216 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5274 5285
5275 if (netif_running(netdev)) 5286 if (netif_running(netdev))
5276 e1000_down(adapter); 5287 e1000_down(adapter);
5277 pci_disable_device(pdev); 5288
5289 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5290 pci_disable_device(pdev);
5278 5291
5279 /* Request a slot slot reset. */ 5292 /* Request a slot slot reset. */
5280 return PCI_ERS_RESULT_NEED_RESET; 5293 return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5302 pr_err("Cannot re-enable PCI device after reset.\n"); 5315 pr_err("Cannot re-enable PCI device after reset.\n");
5303 return PCI_ERS_RESULT_DISCONNECT; 5316 return PCI_ERS_RESULT_DISCONNECT;
5304 } 5317 }
5318
5319 /* flush memory to make sure state is correct */
5320 smp_mb__before_atomic();
5321 clear_bit(__E1000_DISABLED, &adapter->flags);
5305 pci_set_master(pdev); 5322 pci_set_master(pdev);
5306 5323
5307 pci_enable_wake(pdev, PCI_D3hot, 0); 5324 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1370 **/ 1373 **/
1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1372{ 1375{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1382 * Change or Rx Sequence Error interrupt. 1385 * Change or Rx Sequence Error interrupt.
1383 */ 1386 */
1384 if (!mac->get_link_status) 1387 if (!mac->get_link_status)
1385 return 0; 1388 return 1;
1386 1389
1387 /* First we want to see if the MII Status Register reports 1390 /* First we want to see if the MII Status Register reports
1388 * link. If so, then we want to get the current speed/duplex 1391 * link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1613 * different link partner. 1616 * different link partner.
1614 */ 1617 */
1615 ret_val = e1000e_config_fc_after_link_up(hw); 1618 ret_val = e1000e_config_fc_after_link_up(hw);
1616 if (ret_val) 1619 if (ret_val) {
1617 e_dbg("Error configuring flow control\n"); 1620 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1618 1623
1619 return ret_val; 1624 return 1;
1620} 1625}
1621 1626
1622static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2463 return err; 2463 return err;
2464} 2464}
2465 2465
2466#ifdef CONFIG_PM
2467/** 2466/**
2468 * fm10k_resume - Generic PM resume hook 2467 * fm10k_resume - Generic PM resume hook
2469 * @dev: generic device structure 2468 * @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
2472 * suspend or hibernation. This function does not need to handle lower PCIe 2471 * suspend or hibernation. This function does not need to handle lower PCIe
2473 * device state as the stack takes care of that for us. 2472 * device state as the stack takes care of that for us.
2474 **/ 2473 **/
2475static int fm10k_resume(struct device *dev) 2474static int __maybe_unused fm10k_resume(struct device *dev)
2476{ 2475{
2477 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2476 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2478 struct net_device *netdev = interface->netdev; 2477 struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
2499 * system suspend or hibernation. This function does not need to handle lower 2498 * system suspend or hibernation. This function does not need to handle lower
2500 * PCIe device state as the stack takes care of that for us. 2499 * PCIe device state as the stack takes care of that for us.
2501 **/ 2500 **/
2502static int fm10k_suspend(struct device *dev) 2501static int __maybe_unused fm10k_suspend(struct device *dev)
2503{ 2502{
2504 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2503 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
2505 struct net_device *netdev = interface->netdev; 2504 struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
2511 return 0; 2510 return 0;
2512} 2511}
2513 2512
2514#endif /* CONFIG_PM */
2515
2516/** 2513/**
2517 * fm10k_io_error_detected - called when PCI error is detected 2514 * fm10k_io_error_detected - called when PCI error is detected
2518 * @pdev: Pointer to PCI device 2515 * @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
2643 .id_table = fm10k_pci_tbl, 2640 .id_table = fm10k_pci_tbl,
2644 .probe = fm10k_probe, 2641 .probe = fm10k_probe,
2645 .remove = fm10k_remove, 2642 .remove = fm10k_remove,
2646#ifdef CONFIG_PM
2647 .driver = { 2643 .driver = {
2648 .pm = &fm10k_pm_ops, 2644 .pm = &fm10k_pm_ops,
2649 }, 2645 },
2650#endif /* CONFIG_PM */
2651 .sriov_configure = fm10k_iov_configure, 2646 .sriov_configure = fm10k_iov_configure,
2652 .err_handler = &fm10k_err_handler 2647 .err_handler = &fm10k_err_handler
2653}; 2648};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..af792112a2d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1573 else 1573 else
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1575 1575
1576 /* Copy the address first, so that we avoid a possible race with
1577 * .set_rx_mode(). If we copy after changing the address in the filter
1578 * list, we might open ourselves to a narrow race window where
1579 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1580 * from passing.
1581 */
1582 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1583
1576 spin_lock_bh(&vsi->mac_filter_hash_lock); 1584 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr); 1585 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data); 1586 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1587 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) { 1588 if (vsi->type == I40E_VSI_MAIN) {
1582 i40e_status ret; 1589 i40e_status ret;
1583 1590
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev); 1930 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi; 1931 struct i40e_vsi *vsi = np->vsi;
1925 1932
1933 /* Under some circumstances, we might receive a request to delete
1934 * our own device address from our uc list. Because we store the
1935 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1936 * such requests and not delete our device address from this list.
1937 */
1938 if (ether_addr_equal(addr, netdev->dev_addr))
1939 return 0;
1940
1926 i40e_del_mac_filter(vsi, addr); 1941 i40e_del_mac_filter(vsi, addr);
1927 1942
1928 return 0; 1943 return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6038 /* Set Bit 7 to be valid */ 6053 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID; 6054 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6040 6055
6041 /* Set L4type to both TCP and UDP support */ 6056 /* Set L4type for TCP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; 6057 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6043 6058
6044 /* Set cloud filter mode */ 6059 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; 6060 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6969 is_valid_ether_addr(filter->src_mac)) || 6984 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) && 6985 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac))) 6986 is_multicast_ether_addr(filter->src_mac)))
6972 return -EINVAL; 6987 return -EOPNOTSUPP;
6973 6988
6974 /* Make sure port is specified, otherwise bail out, for channel 6989 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
6975 * specific cloud filter needs 'L4 port' to be non-zero 6990 * ports are not supported via big buffer now.
6976 */ 6991 */
6977 if (!filter->dst_port) 6992 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
6978 return -EINVAL; 6993 return -EOPNOTSUPP;
6979 6994
6980 /* adding filter using src_port/src_ip is not supported at this stage */ 6995 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 || 6996 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6)) 6997 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6983 return -EINVAL; 6998 return -EOPNOTSUPP;
6984 6999
6985 /* copy element needed to add cloud filter from filter */ 7000 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element); 7001 i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6991 is_multicast_ether_addr(filter->src_mac)) { 7006 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */ 7007 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4) 7008 if (filter->dst_ipv4)
6994 return -EINVAL; 7009 return -EOPNOTSUPP;
6995 7010
6996 /* since we validated that L4 port must be valid before 7011 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value 7012 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7356 7371
7357 if (tc < 0) { 7372 if (tc < 0) {
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); 7373 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7359 return -EINVAL; 7374 return -EOPNOTSUPP;
7360 } 7375 }
7361 7376
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 7377 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7490{ 7505{
7491 struct i40e_vsi *vsi = np->vsi; 7506 struct i40e_vsi *vsi = np->vsi;
7492 7507
7508 if (!tc_can_offload(vsi->netdev))
7509 return -EOPNOTSUPP;
7493 if (cls_flower->common.chain_index) 7510 if (cls_flower->common.chain_index)
7494 return -EOPNOTSUPP; 7511 return -EOPNOTSUPP;
7495 7512
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..5bc2748ac468 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3047 /* Walk through fragments adding latest fragment, testing it, and 3047 /* Walk through fragments adding latest fragment, testing it, and
3048 * then removing stale fragments from the sum. 3048 * then removing stale fragments from the sum.
3049 */ 3049 */
3050 stale = &skb_shinfo(skb)->frags[0]; 3050 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3051 for (;;) { 3051 int stale_size = skb_frag_size(stale);
3052
3052 sum += skb_frag_size(frag++); 3053 sum += skb_frag_size(frag++);
3053 3054
3055 /* The stale fragment may present us with a smaller
3056 * descriptor than the actual fragment size. To account
3057 * for that we need to remove all the data on the front and
3058 * figure out what the remainder would be in the last
3059 * descriptor associated with the fragment.
3060 */
3061 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3062 int align_pad = -(stale->page_offset) &
3063 (I40E_MAX_READ_REQ_SIZE - 1);
3064
3065 sum -= align_pad;
3066 stale_size -= align_pad;
3067
3068 do {
3069 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3070 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3071 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3072 }
3073
3054 /* if sum is negative we failed to make sufficient progress */ 3074 /* if sum is negative we failed to make sufficient progress */
3055 if (sum < 0) 3075 if (sum < 0)
3056 return true; 3076 return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3058 if (!nr_frags--) 3078 if (!nr_frags--)
3059 break; 3079 break;
3060 3080
3061 sum -= skb_frag_size(stale++); 3081 sum -= stale_size;
3062 } 3082 }
3063 3083
3064 return false; 3084 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..1ba29bb85b67 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2012 /* Walk through fragments adding latest fragment, testing it, and 2012 /* Walk through fragments adding latest fragment, testing it, and
2013 * then removing stale fragments from the sum. 2013 * then removing stale fragments from the sum.
2014 */ 2014 */
2015 stale = &skb_shinfo(skb)->frags[0]; 2015 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2016 for (;;) { 2016 int stale_size = skb_frag_size(stale);
2017
2017 sum += skb_frag_size(frag++); 2018 sum += skb_frag_size(frag++);
2018 2019
2020 /* The stale fragment may present us with a smaller
2021 * descriptor than the actual fragment size. To account
2022 * for that we need to remove all the data on the front and
2023 * figure out what the remainder would be in the last
2024 * descriptor associated with the fragment.
2025 */
2026 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2027 int align_pad = -(stale->page_offset) &
2028 (I40E_MAX_READ_REQ_SIZE - 1);
2029
2030 sum -= align_pad;
2031 stale_size -= align_pad;
2032
2033 do {
2034 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2035 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2036 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2037 }
2038
2019 /* if sum is negative we failed to make sufficient progress */ 2039 /* if sum is negative we failed to make sufficient progress */
2020 if (sum < 0) 2040 if (sum < 0)
2021 return true; 2041 return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2023 if (!nr_frags--) 2043 if (!nr_frags--)
2024 break; 2044 break;
2025 2045
2026 sum -= skb_frag_size(stale++); 2046 sum -= stale_size;
2027 } 2047 }
2028 2048
2029 return false; 2049 return false;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c9798210fa0f..0495487f7b42 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
344 dev->regs + MVMDIO_ERR_INT_MASK); 344 dev->regs + MVMDIO_ERR_INT_MASK);
345 345
346 } else if (dev->err_interrupt == -EPROBE_DEFER) { 346 } else if (dev->err_interrupt == -EPROBE_DEFER) {
347 return -EPROBE_DEFER; 347 ret = -EPROBE_DEFER;
348 goto out_mdio;
348 } 349 }
349 350
350 if (pdev->dev.of_node) 351 if (pdev->dev.of_node)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc93b69cfd1e..a539263cd79c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
1214 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1214 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1216 1216
1217 pp->link = 0;
1218 pp->duplex = -1;
1219 pp->speed = 0;
1220
1217 udelay(200); 1221 udelay(200);
1218} 1222}
1219 1223
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1958 1962
1959 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1963 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1960 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1964 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1965 mvneta_rx_error(pp, rx_desc);
1961err_drop_frame: 1966err_drop_frame:
1962 dev->stats.rx_errors++; 1967 dev->stats.rx_errors++;
1963 mvneta_rx_error(pp, rx_desc);
1964 /* leave the descriptor untouched */ 1968 /* leave the descriptor untouched */
1965 continue; 1969 continue;
1966 } 1970 }
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3011{ 3015{
3012 int queue; 3016 int queue;
3013 3017
3014 for (queue = 0; queue < txq_number; queue++) 3018 for (queue = 0; queue < rxq_number; queue++)
3015 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3019 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3016} 3020}
3017 3021
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d83a78be98a2..634b2f41cc9e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -85,7 +85,7 @@
85 85
86/* RSS Registers */ 86/* RSS Registers */
87#define MVPP22_RSS_INDEX 0x1500 87#define MVPP22_RSS_INDEX 0x1500
88#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) ((idx) << 8) 88#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
89#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) 89#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
90#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) 90#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
91#define MVPP22_RSS_TABLE_ENTRY 0x1508 91#define MVPP22_RSS_TABLE_ENTRY 0x1508
@@ -5598,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5598 u32 txq_dma; 5598 u32 txq_dma;
5599 5599
5600 /* Allocate memory for TX descriptors */ 5600 /* Allocate memory for TX descriptors */
5601 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 5601 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
5602 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5602 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5603 &aggr_txq->descs_dma, GFP_KERNEL); 5603 &aggr_txq->descs_dma, GFP_KERNEL);
5604 if (!aggr_txq->descs) 5604 if (!aggr_txq->descs)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f098a60..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
4081 if (hw->ports > 1) { 4081 if (hw->ports > 1) {
4082 skge_write32(hw, B0_IMSK, 0); 4082 skge_write32(hw, B0_IMSK, 0);
4083 skge_read32(hw, B0_IMSK); 4083 skge_read32(hw, B0_IMSK);
4084 free_irq(pdev->irq, hw);
4085 } 4084 }
4086 spin_unlock_irq(&hw->hw_lock); 4085 spin_unlock_irq(&hw->hw_lock);
4087 4086
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 54adfd967858..fc67e35b253e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
1961 /* set GE2 TUNE */ 1961 /* set GE2 TUNE */
1962 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); 1962 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1963 1963
1964 /* GE1, Force 1000M/FD, FC ON */ 1964 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1965 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); 1965 * up with the more appropriate value when mtk_phy_link_adjust call is
1966 1966 * being invoked.
1967 /* GE2, Force 1000M/FD, FC ON */ 1967 */
1968 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); 1968 for (i = 0; i < MTK_MAC_COUNT; i++)
1969 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1969 1970
1970 /* Indicates CDM to parse the MTK special tag from CPU 1971 /* Indicates CDM to parse the MTK special tag from CPU
1971 * which also is working out for untag packets. 1972 * which also is working out for untag packets.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 struct net_device *dev = mdev->pndev[port]; 188 struct net_device *dev = mdev->pndev[port];
189 struct mlx4_en_priv *priv = netdev_priv(dev); 189 struct mlx4_en_priv *priv = netdev_priv(dev);
190 struct net_device_stats *stats = &dev->stats; 190 struct net_device_stats *stats = &dev->stats;
191 struct mlx4_cmd_mailbox *mailbox; 191 struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
192 u64 in_mod = reset << 8 | port; 192 u64 in_mod = reset << 8 | port;
193 int err; 193 int err;
194 int i, counter_index; 194 int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
199 if (IS_ERR(mailbox)) 199 if (IS_ERR(mailbox))
200 return PTR_ERR(mailbox); 200 return PTR_ERR(mailbox);
201
202 mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
203 if (IS_ERR(mailbox_priority)) {
204 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
205 return PTR_ERR(mailbox_priority);
206 }
207
201 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 208 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
202 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 209 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
203 MLX4_CMD_NATIVE); 210 MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 213
207 mlx4_en_stats = mailbox->buf; 214 mlx4_en_stats = mailbox->buf;
208 215
216 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
217 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
218 err = mlx4_get_counter_stats(mdev->dev, counter_index,
219 &tmp_counter_stats, reset);
220
221 /* 0xffs indicates invalid value */
222 memset(mailbox_priority->buf, 0xff,
223 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
224
225 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
226 memset(mailbox_priority->buf, 0,
227 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
228 err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
229 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
230 0, MLX4_CMD_DUMP_ETH_STATS,
231 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
232 if (err)
233 goto out;
234 }
235
236 flowstats = mailbox_priority->buf;
237
209 spin_lock_bh(&priv->stats_lock); 238 spin_lock_bh(&priv->stats_lock);
210 239
211 mlx4_en_fold_software_stats(dev); 240 mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
345 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); 374 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
346 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); 375 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
347 376
348 spin_unlock_bh(&priv->stats_lock);
349
350 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
351 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
352 err = mlx4_get_counter_stats(mdev->dev, counter_index,
353 &tmp_counter_stats, reset);
354
355 /* 0xffs indicates invalid value */
356 memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
357
358 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
359 memset(mailbox->buf, 0,
360 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
361 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
362 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
363 0, MLX4_CMD_DUMP_ETH_STATS,
364 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
365 if (err)
366 goto out;
367 }
368
369 flowstats = mailbox->buf;
370
371 spin_lock_bh(&priv->stats_lock);
372
373 if (tmp_counter_stats.counter_mode == 0) { 377 if (tmp_counter_stats.counter_mode == 0) {
374 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); 378 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
375 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); 379 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
410 414
411out: 415out:
412 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 416 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
417 mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
413 return err; 418 return err;
414} 419}
415 420
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
185 if (priv->mdev->dev->caps.flags & 185 if (priv->mdev->dev->caps.flags &
186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
187 buf[3] = mlx4_en_test_registers(priv); 187 buf[3] = mlx4_en_test_registers(priv);
188 if (priv->port_up) 188 if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
189 buf[4] = mlx4_en_test_loopback(priv); 189 buf[4] = mlx4_en_test_loopback(priv);
190 } 190 }
191 191
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..2b72677eccd4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) 155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
156#define PREAMBLE_LEN 8
157#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
158 ETH_HLEN + PREAMBLE_LEN)
156 159
157#define MLX4_EN_MIN_MTU 46 160#define MLX4_EN_MIN_MTU 46
158/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple 161/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
611 MLX4_MAX_PORTS; 611 MLX4_MAX_PORTS;
612 else 612 else
613 res_alloc->guaranteed[t] = 0; 613 res_alloc->guaranteed[t] = 0;
614 res_alloc->res_free -= res_alloc->guaranteed[t];
615 break; 614 break;
616 default: 615 default:
617 break; 616 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1fffdebbc9e8..e9a1fbcc4adf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
362 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 362 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
363 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 363 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
364 case MLX5_CMD_OP_QUERY_Q_COUNTER: 364 case MLX5_CMD_OP_QUERY_Q_COUNTER:
365 case MLX5_CMD_OP_SET_RATE_LIMIT: 365 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
366 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 366 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
367 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 367 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
368 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 368 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
505 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 505 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
506 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 506 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
507 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 507 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
508 MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); 508 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
509 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 509 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
510 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 510 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
511 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 511 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3284cb..c2d89bfa1a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,6 +82,9 @@
82 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 82 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
83#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) 83#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
84#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) 84#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
85#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
86 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
87 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
85 88
86#define MLX5_MPWRQ_LOG_WQE_SZ 18 89#define MLX5_MPWRQ_LOG_WQE_SZ 18
87#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ 90#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
590 struct mlx5_core_dev *mdev; 593 struct mlx5_core_dev *mdev;
591 struct hwtstamp_config *tstamp; 594 struct hwtstamp_config *tstamp;
592 int ix; 595 int ix;
596 int cpu;
593}; 597};
594 598
595struct mlx5e_channels { 599struct mlx5e_channels {
@@ -891,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
891 u16 vid); 895 u16 vid);
892void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); 896void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
893void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); 897void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
894void mlx5e_timestamp_set(struct mlx5e_priv *priv); 898void mlx5e_timestamp_init(struct mlx5e_priv *priv);
895 899
896struct mlx5e_redirect_rqt_param { 900struct mlx5e_redirect_rqt_param {
897 bool is_rss; 901 bool is_rss;
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
935 u8 cq_period_mode); 939 u8 cq_period_mode);
936void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 940void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
937 u8 cq_period_mode); 941 u8 cq_period_mode);
938void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, 942void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
939 struct mlx5e_params *params, u8 rq_type); 943 struct mlx5e_params *params,
944 u8 rq_type);
940 945
941static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 946static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
942{ 947{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c6d90b6dd80e..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
274static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 274static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
275 struct ieee_ets *ets) 275 struct ieee_ets *ets)
276{ 276{
277 bool have_ets_tc = false;
277 int bw_sum = 0; 278 int bw_sum = 0;
278 int i; 279 int i;
279 280
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
288 } 289 }
289 290
290 /* Validate Bandwidth Sum */ 291 /* Validate Bandwidth Sum */
291 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 292 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
292 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 293 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
294 have_ets_tc = true;
293 bw_sum += ets->tc_tx_bw[i]; 295 bw_sum += ets->tc_tx_bw[i];
296 }
297 }
294 298
295 if (bw_sum != 0 && bw_sum != 100) { 299 if (have_ets_tc && bw_sum != 100) {
296 netdev_err(netdev, 300 netdev_err(netdev,
297 "Failed to validate ETS: BW sum is illegal\n"); 301 "Failed to validate ETS: BW sum is illegal\n");
298 return -EINVAL; 302 return -EINVAL;
@@ -918,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
918 922
919static void mlx5e_ets_init(struct mlx5e_priv *priv) 923static void mlx5e_ets_init(struct mlx5e_priv *priv)
920{ 924{
921 int i;
922 struct ieee_ets ets; 925 struct ieee_ets ets;
926 int err;
927 int i;
923 928
924 if (!MLX5_CAP_GEN(priv->mdev, ets)) 929 if (!MLX5_CAP_GEN(priv->mdev, ets))
925 return; 930 return;
@@ -932,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
932 ets.prio_tc[i] = i; 937 ets.prio_tc[i] = i;
933 } 938 }
934 939
935 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 940 if (ets.ets_cap > 1) {
936 ets.prio_tc[0] = 1; 941 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
937 ets.prio_tc[1] = 0; 942 ets.prio_tc[0] = 1;
943 ets.prio_tc[1] = 0;
944 }
938 945
939 mlx5e_dcbnl_ieee_setets_core(priv, &ets); 946 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
947 if (err)
948 netdev_err(priv->netdev,
949 "%s, Failed to init ETS: %d\n", __func__, err);
940} 950}
941 951
942enum { 952enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 23425f028405..ea5fff2c3143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
207 return; 207 return;
208 208
209 mutex_lock(&priv->state_lock); 209 mutex_lock(&priv->state_lock);
210 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 210 mlx5e_update_stats(priv, true);
211 mlx5e_update_stats(priv, true);
212 mutex_unlock(&priv->state_lock); 211 mutex_unlock(&priv->state_lock);
213 212
214 for (i = 0; i < mlx5e_num_stats_grps; i++) 213 for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -1523,8 +1522,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
1523 new_channels.params = priv->channels.params; 1522 new_channels.params = priv->channels.params;
1524 MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); 1523 MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
1525 1524
1526 mlx5e_set_rq_type_params(priv->mdev, &new_channels.params, 1525 new_channels.params.mpwqe_log_stride_sz =
1527 new_channels.params.rq_wq_type); 1526 MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
1527 new_channels.params.mpwqe_log_num_strides =
1528 MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
1528 1529
1529 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 1530 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1530 priv->channels.params = new_channels.params; 1531 priv->channels.params = new_channels.params;
@@ -1536,6 +1537,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
1536 return err; 1537 return err;
1537 1538
1538 mlx5e_switch_priv_channels(priv, &new_channels, NULL); 1539 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1540 mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
1541 MLX5E_GET_PFLAG(&priv->channels.params,
1542 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
1543
1539 return 0; 1544 return 0;
1540} 1545}
1541 1546
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d2b057a3e512..d8aefeed124d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
71 struct mlx5e_cq_param icosq_cq; 71 struct mlx5e_cq_param icosq_cq;
72}; 72};
73 73
74static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
75{
76 return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
77}
78
79static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) 74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
80{ 75{
81 return MLX5_CAP_GEN(mdev, striding_rq) && 76 return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
83 MLX5_CAP_ETH(mdev, reg_umr_sq); 78 MLX5_CAP_ETH(mdev, reg_umr_sq);
84} 79}
85 80
86void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, 81void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
87 struct mlx5e_params *params, u8 rq_type) 82 struct mlx5e_params *params, u8 rq_type)
88{ 83{
89 params->rq_wq_type = rq_type; 84 params->rq_wq_type = rq_type;
90 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
93 params->log_rq_size = is_kdump_kernel() ? 88 params->log_rq_size = is_kdump_kernel() ?
94 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : 89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
95 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; 90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
96 params->mpwqe_log_stride_sz = 91 params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
97 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? 92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
98 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
99 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
100 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - 93 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
101 params->mpwqe_log_stride_sz; 94 params->mpwqe_log_stride_sz;
102 break; 95 break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
120 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 113 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
121} 114}
122 115
123static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 116static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
117 struct mlx5e_params *params)
124{ 118{
125 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && 119 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
126 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? 120 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
127 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
128 MLX5_WQ_TYPE_LINKED_LIST; 122 MLX5_WQ_TYPE_LINKED_LIST;
129 mlx5e_set_rq_type_params(mdev, params, rq_type); 123 mlx5e_init_rq_type_params(mdev, params, rq_type);
130} 124}
131 125
132static void mlx5e_update_carrier(struct mlx5e_priv *priv) 126static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
444 int wq_sz = mlx5_wq_ll_get_size(&rq->wq); 438 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
445 int mtt_sz = mlx5e_get_wqe_mtt_sz(); 439 int mtt_sz = mlx5e_get_wqe_mtt_sz();
446 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; 440 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
447 int node = mlx5e_get_node(c->priv, c->ix);
448 int i; 441 int i;
449 442
450 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), 443 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
451 GFP_KERNEL, node); 444 GFP_KERNEL, cpu_to_node(c->cpu));
452 if (!rq->mpwqe.info) 445 if (!rq->mpwqe.info)
453 goto err_out; 446 goto err_out;
454 447
455 /* We allocate more than mtt_sz as we will align the pointer */ 448 /* We allocate more than mtt_sz as we will align the pointer */
456 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, 449 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
457 GFP_KERNEL, node); 450 cpu_to_node(c->cpu));
458 if (unlikely(!rq->mpwqe.mtt_no_align)) 451 if (unlikely(!rq->mpwqe.mtt_no_align))
459 goto err_free_wqe_info; 452 goto err_free_wqe_info;
460 453
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
562 int err; 555 int err;
563 int i; 556 int i;
564 557
565 rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 558 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
566 559
567 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, 560 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
568 &rq->wq_ctrl); 561 &rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
629 default: /* MLX5_WQ_TYPE_LINKED_LIST */ 622 default: /* MLX5_WQ_TYPE_LINKED_LIST */
630 rq->wqe.frag_info = 623 rq->wqe.frag_info =
631 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), 624 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
632 GFP_KERNEL, 625 GFP_KERNEL, cpu_to_node(c->cpu));
633 mlx5e_get_node(c->priv, c->ix));
634 if (!rq->wqe.frag_info) { 626 if (!rq->wqe.frag_info) {
635 err = -ENOMEM; 627 err = -ENOMEM;
636 goto err_rq_wq_destroy; 628 goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1000 sq->uar_map = mdev->mlx5e_res.bfreg.map; 992 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1001 sq->min_inline_mode = params->tx_min_inline_mode; 993 sq->min_inline_mode = params->tx_min_inline_mode;
1002 994
1003 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 995 param->wq.db_numa_node = cpu_to_node(c->cpu);
1004 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 996 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1005 if (err) 997 if (err)
1006 return err; 998 return err;
1007 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 999 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1008 1000
1009 err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1001 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1010 if (err) 1002 if (err)
1011 goto err_sq_wq_destroy; 1003 goto err_sq_wq_destroy;
1012 1004
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1053 sq->channel = c; 1045 sq->channel = c;
1054 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1046 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1055 1047
1056 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1048 param->wq.db_numa_node = cpu_to_node(c->cpu);
1057 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1049 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1058 if (err) 1050 if (err)
1059 return err; 1051 return err;
1060 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1052 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1061 1053
1062 err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1054 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1063 if (err) 1055 if (err)
1064 goto err_sq_wq_destroy; 1056 goto err_sq_wq_destroy;
1065 1057
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1126 if (MLX5_IPSEC_DEV(c->priv->mdev)) 1118 if (MLX5_IPSEC_DEV(c->priv->mdev))
1127 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1119 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1128 1120
1129 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1121 param->wq.db_numa_node = cpu_to_node(c->cpu);
1130 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1122 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1131 if (err) 1123 if (err)
1132 return err; 1124 return err;
1133 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 1125 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1134 1126
1135 err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); 1127 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1136 if (err) 1128 if (err)
1137 goto err_sq_wq_destroy; 1129 goto err_sq_wq_destroy;
1138 1130
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1504 struct mlx5_core_dev *mdev = c->priv->mdev; 1496 struct mlx5_core_dev *mdev = c->priv->mdev;
1505 int err; 1497 int err;
1506 1498
1507 param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); 1499 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1508 param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); 1500 param->wq.db_numa_node = cpu_to_node(c->cpu);
1509 param->eq_ix = c->ix; 1501 param->eq_ix = c->ix;
1510 1502
1511 err = mlx5e_alloc_cq_common(mdev, param, cq); 1503 err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
1604 mlx5e_free_cq(cq); 1596 mlx5e_free_cq(cq);
1605} 1597}
1606 1598
1599static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1600{
1601 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1602}
1603
1607static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1604static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1608 struct mlx5e_params *params, 1605 struct mlx5e_params *params,
1609 struct mlx5e_channel_param *cparam) 1606 struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1752{ 1749{
1753 struct mlx5e_cq_moder icocq_moder = {0, 0}; 1750 struct mlx5e_cq_moder icocq_moder = {0, 0};
1754 struct net_device *netdev = priv->netdev; 1751 struct net_device *netdev = priv->netdev;
1752 int cpu = mlx5e_get_cpu(priv, ix);
1755 struct mlx5e_channel *c; 1753 struct mlx5e_channel *c;
1756 unsigned int irq; 1754 unsigned int irq;
1757 int err; 1755 int err;
1758 int eqn; 1756 int eqn;
1759 1757
1760 c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); 1758 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1761 if (!c) 1759 if (!c)
1762 return -ENOMEM; 1760 return -ENOMEM;
1763 1761
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1765 c->mdev = priv->mdev; 1763 c->mdev = priv->mdev;
1766 c->tstamp = &priv->tstamp; 1764 c->tstamp = &priv->tstamp;
1767 c->ix = ix; 1765 c->ix = ix;
1766 c->cpu = cpu;
1768 c->pdev = &priv->mdev->pdev->dev; 1767 c->pdev = &priv->mdev->pdev->dev;
1769 c->netdev = priv->netdev; 1768 c->netdev = priv->netdev;
1770 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1769 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
1853 for (tc = 0; tc < c->num_tc; tc++) 1852 for (tc = 0; tc < c->num_tc; tc++)
1854 mlx5e_activate_txqsq(&c->sq[tc]); 1853 mlx5e_activate_txqsq(&c->sq[tc]);
1855 mlx5e_activate_rq(&c->rq); 1854 mlx5e_activate_rq(&c->rq);
1856 netif_set_xps_queue(c->netdev, 1855 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1857 mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
1858} 1856}
1859 1857
1860static void mlx5e_deactivate_channel(struct mlx5e_channel *c) 1858static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2671,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2671 netif_carrier_on(netdev); 2669 netif_carrier_on(netdev);
2672} 2670}
2673 2671
2674void mlx5e_timestamp_set(struct mlx5e_priv *priv) 2672void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2675{ 2673{
2676 priv->tstamp.tx_type = HWTSTAMP_TX_OFF; 2674 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2677 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; 2675 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2692,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
2692 mlx5e_activate_priv_channels(priv); 2690 mlx5e_activate_priv_channels(priv);
2693 if (priv->profile->update_carrier) 2691 if (priv->profile->update_carrier)
2694 priv->profile->update_carrier(priv); 2692 priv->profile->update_carrier(priv);
2695 mlx5e_timestamp_set(priv);
2696 2693
2697 if (priv->profile->update_stats) 2694 if (priv->profile->update_stats)
2698 queue_delayed_work(priv->wq, &priv->update_stats_work, 0); 2695 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3221,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3221 return 0; 3218 return 0;
3222} 3219}
3223 3220
3224#define MLX5E_SET_FEATURE(netdev, feature, enable) \ 3221#define MLX5E_SET_FEATURE(features, feature, enable) \
3225 do { \ 3222 do { \
3226 if (enable) \ 3223 if (enable) \
3227 netdev->features |= feature; \ 3224 *features |= feature; \
3228 else \ 3225 else \
3229 netdev->features &= ~feature; \ 3226 *features &= ~feature; \
3230 } while (0) 3227 } while (0)
3231 3228
3232typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); 3229typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3349,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
3349#endif 3346#endif
3350 3347
3351static int mlx5e_handle_feature(struct net_device *netdev, 3348static int mlx5e_handle_feature(struct net_device *netdev,
3349 netdev_features_t *features,
3352 netdev_features_t wanted_features, 3350 netdev_features_t wanted_features,
3353 netdev_features_t feature, 3351 netdev_features_t feature,
3354 mlx5e_feature_handler feature_handler) 3352 mlx5e_feature_handler feature_handler)
@@ -3367,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
3367 return err; 3365 return err;
3368 } 3366 }
3369 3367
3370 MLX5E_SET_FEATURE(netdev, feature, enable); 3368 MLX5E_SET_FEATURE(features, feature, enable);
3371 return 0; 3369 return 0;
3372} 3370}
3373 3371
3374static int mlx5e_set_features(struct net_device *netdev, 3372static int mlx5e_set_features(struct net_device *netdev,
3375 netdev_features_t features) 3373 netdev_features_t features)
3376{ 3374{
3375 netdev_features_t oper_features = netdev->features;
3377 int err; 3376 int err;
3378 3377
3379 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, 3378 err = mlx5e_handle_feature(netdev, &oper_features, features,
3380 set_feature_lro); 3379 NETIF_F_LRO, set_feature_lro);
3381 err |= mlx5e_handle_feature(netdev, features, 3380 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3382 NETIF_F_HW_VLAN_CTAG_FILTER, 3381 NETIF_F_HW_VLAN_CTAG_FILTER,
3383 set_feature_cvlan_filter); 3382 set_feature_cvlan_filter);
3384 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, 3383 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3385 set_feature_tc_num_filters); 3384 NETIF_F_HW_TC, set_feature_tc_num_filters);
3386 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, 3385 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3387 set_feature_rx_all); 3386 NETIF_F_RXALL, set_feature_rx_all);
3388 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS, 3387 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3389 set_feature_rx_fcs); 3388 NETIF_F_RXFCS, set_feature_rx_fcs);
3390 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, 3389 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3391 set_feature_rx_vlan); 3390 NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3392#ifdef CONFIG_RFS_ACCEL 3391#ifdef CONFIG_RFS_ACCEL
3393 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, 3392 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3394 set_feature_arfs); 3393 NETIF_F_NTUPLE, set_feature_arfs);
3395#endif 3394#endif
3396 3395
3397 return err ? -EINVAL : 0; 3396 if (err) {
3397 netdev->features = oper_features;
3398 return -EINVAL;
3399 }
3400
3401 return 0;
3398} 3402}
3399 3403
3400static netdev_features_t mlx5e_fix_features(struct net_device *netdev, 3404static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3679,6 +3683,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3679 struct sk_buff *skb, 3683 struct sk_buff *skb,
3680 netdev_features_t features) 3684 netdev_features_t features)
3681{ 3685{
3686 unsigned int offset = 0;
3682 struct udphdr *udph; 3687 struct udphdr *udph;
3683 u8 proto; 3688 u8 proto;
3684 u16 port; 3689 u16 port;
@@ -3688,7 +3693,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3688 proto = ip_hdr(skb)->protocol; 3693 proto = ip_hdr(skb)->protocol;
3689 break; 3694 break;
3690 case htons(ETH_P_IPV6): 3695 case htons(ETH_P_IPV6):
3691 proto = ipv6_hdr(skb)->nexthdr; 3696 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3692 break; 3697 break;
3693 default: 3698 default:
3694 goto out; 3699 goto out;
@@ -4140,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4140 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 4145 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4141 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); 4146 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4142 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 4147 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4148
4149 mlx5e_timestamp_init(priv);
4143} 4150}
4144 4151
4145static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) 4152static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..3409d86eb06b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
877 877
878 mlx5e_build_rep_params(mdev, &priv->channels.params); 878 mlx5e_build_rep_params(mdev, &priv->channels.params);
879 mlx5e_build_rep_netdev(netdev); 879 mlx5e_build_rep_netdev(netdev);
880
881 mlx5e_timestamp_init(priv);
880} 882}
881 883
882static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) 884static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index e401d9d245f3..b69a705fd787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : 201 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_WORSE; 202 MLX5E_AM_STATS_WORSE;
203 203
204 if (!prev->ppms)
205 return curr->ppms ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_SAME;
207
204 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) 208 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
205 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : 209 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
206 MLX5E_AM_STATS_WORSE; 210 MLX5E_AM_STATS_WORSE;
211 if (!prev->epms)
212 return MLX5E_AM_STATS_SAME;
207 213
208 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) 214 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
209 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : 215 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
238 int err = 0; 238 int err = 0;
239 239
240 /* Temporarily enable local_lb */ 240 /* Temporarily enable local_lb */
241 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 241 err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
242 mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); 242 if (err)
243 if (!lbtp->local_lb) 243 return err;
244 mlx5_nic_vport_update_local_lb(priv->mdev, true); 244
245 if (!lbtp->local_lb) {
246 err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
247 if (err)
248 return err;
245 } 249 }
246 250
247 err = mlx5e_refresh_tirs(priv, true); 251 err = mlx5e_refresh_tirs(priv, true);
248 if (err) 252 if (err)
249 return err; 253 goto out;
250 254
251 lbtp->loopback_ok = false; 255 lbtp->loopback_ok = false;
252 init_completion(&lbtp->comp); 256 init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
256 lbtp->pt.dev = priv->netdev; 260 lbtp->pt.dev = priv->netdev;
257 lbtp->pt.af_packet_priv = lbtp; 261 lbtp->pt.af_packet_priv = lbtp;
258 dev_add_pack(&lbtp->pt); 262 dev_add_pack(&lbtp->pt);
263
264 return 0;
265
266out:
267 if (!lbtp->local_lb)
268 mlx5_nic_vport_update_local_lb(priv->mdev, false);
269
259 return err; 270 return err;
260} 271}
261 272
262static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, 273static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
263 struct mlx5e_lbt_priv *lbtp) 274 struct mlx5e_lbt_priv *lbtp)
264{ 275{
265 if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { 276 if (!lbtp->local_lb)
266 if (!lbtp->local_lb) 277 mlx5_nic_vport_update_local_lb(priv->mdev, false);
267 mlx5_nic_vport_update_local_lb(priv->mdev, false);
268 }
269 278
270 dev_remove_pack(&lbtp->pt); 279 dev_remove_pack(&lbtp->pt);
271 mlx5e_refresh_tirs(priv, false); 280 mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 7d3d503fa675..14d57828945d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -470,7 +470,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
470 break; 470 break;
471 case MLX5_EVENT_TYPE_CQ_ERROR: 471 case MLX5_EVENT_TYPE_CQ_ERROR:
472 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 472 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
473 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 473 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
474 cqn, eqe->data.cq_err.syndrome); 474 cqn, eqe->data.cq_err.syndrome);
475 mlx5_cq_event(dev, cqn, eqe->type); 475 mlx5_cq_event(dev, cqn, eqe->type);
476 break; 476 break;
@@ -782,7 +782,7 @@ err1:
782 return err; 782 return err;
783} 783}
784 784
785int mlx5_stop_eqs(struct mlx5_core_dev *dev) 785void mlx5_stop_eqs(struct mlx5_core_dev *dev)
786{ 786{
787 struct mlx5_eq_table *table = &dev->priv.eq_table; 787 struct mlx5_eq_table *table = &dev->priv.eq_table;
788 int err; 788 int err;
@@ -791,22 +791,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
791 if (MLX5_CAP_GEN(dev, pg)) { 791 if (MLX5_CAP_GEN(dev, pg)) {
792 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); 792 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
793 if (err) 793 if (err)
794 return err; 794 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
795 err);
795 } 796 }
796#endif 797#endif
797 798
798 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 799 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
799 if (err) 800 if (err)
800 return err; 801 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
802 err);
801 803
802 mlx5_destroy_unmap_eq(dev, &table->async_eq); 804 err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
805 if (err)
806 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
807 err);
803 mlx5_cmd_use_polling(dev); 808 mlx5_cmd_use_polling(dev);
804 809
805 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 810 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
806 if (err) 811 if (err)
807 mlx5_cmd_use_events(dev); 812 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
808 813 err);
809 return err;
810} 814}
811 815
812int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 816int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
index 3c11d6e2160a..14962969c5ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
66 u8 actual_size; 66 u8 actual_size;
67 int err; 67 int err;
68 68
69 if (!size)
70 return -EINVAL;
71
69 if (!fdev->mdev) 72 if (!fdev->mdev)
70 return -ENOTCONN; 73 return -ENOTCONN;
71 74
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
95 u8 actual_size; 98 u8 actual_size;
96 int err; 99 int err;
97 100
101 if (!size)
102 return -EINVAL;
103
98 if (!fdev->mdev) 104 if (!fdev->mdev)
99 return -ENOTCONN; 105 return -ENOTCONN;
100 106
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c70fd663a633..dfaad9ecb2b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
174static void del_sw_flow_table(struct fs_node *node); 174static void del_sw_flow_table(struct fs_node *node);
175static void del_sw_flow_group(struct fs_node *node); 175static void del_sw_flow_group(struct fs_node *node);
176static void del_sw_fte(struct fs_node *node); 176static void del_sw_fte(struct fs_node *node);
177static void del_sw_prio(struct fs_node *node);
178static void del_sw_ns(struct fs_node *node);
177/* Delete rule (destination) is special case that 179/* Delete rule (destination) is special case that
178 * requires to lock the FTE for all the deletion process. 180 * requires to lock the FTE for all the deletion process.
179 */ 181 */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
408 return NULL; 410 return NULL;
409} 411}
410 412
413static void del_sw_ns(struct fs_node *node)
414{
415 kfree(node);
416}
417
418static void del_sw_prio(struct fs_node *node)
419{
420 kfree(node);
421}
422
411static void del_hw_flow_table(struct fs_node *node) 423static void del_hw_flow_table(struct fs_node *node)
412{ 424{
413 struct mlx5_flow_table *ft; 425 struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2064 return ERR_PTR(-ENOMEM); 2076 return ERR_PTR(-ENOMEM);
2065 2077
2066 fs_prio->node.type = FS_TYPE_PRIO; 2078 fs_prio->node.type = FS_TYPE_PRIO;
2067 tree_init_node(&fs_prio->node, NULL, NULL); 2079 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2068 tree_add_node(&fs_prio->node, &ns->node); 2080 tree_add_node(&fs_prio->node, &ns->node);
2069 fs_prio->num_levels = num_levels; 2081 fs_prio->num_levels = num_levels;
2070 fs_prio->prio = prio; 2082 fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2090 return ERR_PTR(-ENOMEM); 2102 return ERR_PTR(-ENOMEM);
2091 2103
2092 fs_init_namespace(ns); 2104 fs_init_namespace(ns);
2093 tree_init_node(&ns->node, NULL, NULL); 2105 tree_init_node(&ns->node, NULL, del_sw_ns);
2094 tree_add_node(&ns->node, &prio->node); 2106 tree_add_node(&ns->node, &prio->node);
2095 list_add_tail(&ns->node.list, &prio->node.children); 2107 list_add_tail(&ns->node.list, &prio->node.children);
2096 2108
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a0e797ad001..21d29f7936f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
241 u32 fw; 241 u32 fw;
242 int i; 242 int i;
243 243
244 /* If the syndrom is 0, the device is OK and no need to print buffer */ 244 /* If the syndrome is 0, the device is OK and no need to print buffer */
245 if (!ioread8(&h->synd)) 245 if (!ioread8(&h->synd))
246 return; 246 return;
247 247
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 261b95d014a0..a281d95ce17c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
57 struct mlx5e_params *params) 57 struct mlx5e_params *params)
58{ 58{
59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
60 mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST); 60 mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
61 61
62 /* RQ size in ipoib by default is 512 */ 62 /* RQ size in ipoib by default is 512 */
63 params->log_rq_size = is_kdump_kernel() ? 63 params->log_rq_size = is_kdump_kernel() ?
@@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 86 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
87 mlx5i_build_nic_params(mdev, &priv->channels.params); 87 mlx5i_build_nic_params(mdev, &priv->channels.params);
88 88
89 mlx5e_timestamp_init(priv);
90
89 /* netdev init */ 91 /* netdev init */
90 netdev->hw_features |= NETIF_F_SG; 92 netdev->hw_features |= NETIF_F_SG;
91 netdev->hw_features |= NETIF_F_IP_CSUM; 93 netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
450 452
451 mlx5e_refresh_tirs(epriv, false); 453 mlx5e_refresh_tirs(epriv, false);
452 mlx5e_activate_priv_channels(epriv); 454 mlx5e_activate_priv_channels(epriv);
453 mlx5e_timestamp_set(epriv);
454 455
455 mutex_unlock(&epriv->state_lock); 456 mutex_unlock(&epriv->state_lock);
456 return 0; 457 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4b6cb9b38686..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -451,9 +451,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
451 451
452 switch (clock->ptp_info.pin_config[pin].func) { 452 switch (clock->ptp_info.pin_config[pin].func) {
453 case PTP_PF_EXTTS: 453 case PTP_PF_EXTTS:
454 ptp_event.index = pin;
455 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
456 be64_to_cpu(eqe->data.pps.time_stamp));
454 if (clock->pps_info.enabled) { 457 if (clock->pps_info.enabled) {
455 ptp_event.type = PTP_CLOCK_PPSUSR; 458 ptp_event.type = PTP_CLOCK_PPSUSR;
456 ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp); 459 ptp_event.pps_times.ts_real =
460 ns_to_timespec64(ptp_event.timestamp);
457 } else { 461 } else {
458 ptp_event.type = PTP_CLOCK_EXTTS; 462 ptp_event.type = PTP_CLOCK_EXTTS;
459 } 463 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d4a471a76d82..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -319,11 +319,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
319{ 319{
320 struct mlx5_priv *priv = &dev->priv; 320 struct mlx5_priv *priv = &dev->priv;
321 struct mlx5_eq_table *table = &priv->eq_table; 321 struct mlx5_eq_table *table = &priv->eq_table;
322 struct irq_affinity irqdesc = {
323 .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
324 };
325 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 322 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
326 int nvec; 323 int nvec;
324 int err;
327 325
328 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 326 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
329 MLX5_EQ_VEC_COMP_BASE; 327 MLX5_EQ_VEC_COMP_BASE;
@@ -333,22 +331,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
333 331
334 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); 332 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
335 if (!priv->irq_info) 333 if (!priv->irq_info)
336 goto err_free_msix; 334 return -ENOMEM;
337 335
338 nvec = pci_alloc_irq_vectors_affinity(dev->pdev, 336 nvec = pci_alloc_irq_vectors(dev->pdev,
339 MLX5_EQ_VEC_COMP_BASE + 1, nvec, 337 MLX5_EQ_VEC_COMP_BASE + 1, nvec,
340 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 338 PCI_IRQ_MSIX);
341 &irqdesc); 339 if (nvec < 0) {
342 if (nvec < 0) 340 err = nvec;
343 return nvec; 341 goto err_free_irq_info;
342 }
344 343
345 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 344 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
346 345
347 return 0; 346 return 0;
348 347
349err_free_msix: 348err_free_irq_info:
350 kfree(priv->irq_info); 349 kfree(priv->irq_info);
351 return -ENOMEM; 350 return err;
352} 351}
353 352
354static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) 353static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -593,8 +592,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
593 int ret = 0; 592 int ret = 0;
594 593
595 /* Disable local_lb by default */ 594 /* Disable local_lb by default */
596 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 595 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
597 MLX5_CAP_GEN(dev, disable_local_lb))
598 ret = mlx5_nic_vport_update_local_lb(dev, false); 596 ret = mlx5_nic_vport_update_local_lb(dev, false);
599 597
600 return ret; 598 return ret;
@@ -633,6 +631,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
633 return (u64)timer_l | (u64)timer_h1 << 32; 631 return (u64)timer_l | (u64)timer_h1 << 32;
634} 632}
635 633
634static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
635{
636 struct mlx5_priv *priv = &mdev->priv;
637 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
638
639 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
640 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
641 return -ENOMEM;
642 }
643
644 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
645 priv->irq_info[i].mask);
646
647 if (IS_ENABLED(CONFIG_SMP) &&
648 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
649 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
650
651 return 0;
652}
653
654static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
655{
656 struct mlx5_priv *priv = &mdev->priv;
657 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
658
659 irq_set_affinity_hint(irq, NULL);
660 free_cpumask_var(priv->irq_info[i].mask);
661}
662
663static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
664{
665 int err;
666 int i;
667
668 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
669 err = mlx5_irq_set_affinity_hint(mdev, i);
670 if (err)
671 goto err_out;
672 }
673
674 return 0;
675
676err_out:
677 for (i--; i >= 0; i--)
678 mlx5_irq_clear_affinity_hint(mdev, i);
679
680 return err;
681}
682
683static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
684{
685 int i;
686
687 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
688 mlx5_irq_clear_affinity_hint(mdev, i);
689}
690
636int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, 691int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
637 unsigned int *irqn) 692 unsigned int *irqn)
638{ 693{
@@ -1079,9 +1134,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1079 goto err_stop_poll; 1134 goto err_stop_poll;
1080 } 1135 }
1081 1136
1082 if (boot && mlx5_init_once(dev, priv)) { 1137 if (boot) {
1083 dev_err(&pdev->dev, "sw objs init failed\n"); 1138 err = mlx5_init_once(dev, priv);
1084 goto err_stop_poll; 1139 if (err) {
1140 dev_err(&pdev->dev, "sw objs init failed\n");
1141 goto err_stop_poll;
1142 }
1085 } 1143 }
1086 1144
1087 err = mlx5_alloc_irq_vectors(dev); 1145 err = mlx5_alloc_irq_vectors(dev);
@@ -1091,8 +1149,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1091 } 1149 }
1092 1150
1093 dev->priv.uar = mlx5_get_uars_page(dev); 1151 dev->priv.uar = mlx5_get_uars_page(dev);
1094 if (!dev->priv.uar) { 1152 if (IS_ERR(dev->priv.uar)) {
1095 dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); 1153 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1154 err = PTR_ERR(dev->priv.uar);
1096 goto err_disable_msix; 1155 goto err_disable_msix;
1097 } 1156 }
1098 1157
@@ -1108,6 +1167,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1108 goto err_stop_eqs; 1167 goto err_stop_eqs;
1109 } 1168 }
1110 1169
1170 err = mlx5_irq_set_affinity_hints(dev);
1171 if (err) {
1172 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
1173 goto err_affinity_hints;
1174 }
1175
1111 err = mlx5_init_fs(dev); 1176 err = mlx5_init_fs(dev);
1112 if (err) { 1177 if (err) {
1113 dev_err(&pdev->dev, "Failed to init flow steering\n"); 1178 dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1165,6 +1230,9 @@ err_sriov:
1165 mlx5_cleanup_fs(dev); 1230 mlx5_cleanup_fs(dev);
1166 1231
1167err_fs: 1232err_fs:
1233 mlx5_irq_clear_affinity_hints(dev);
1234
1235err_affinity_hints:
1168 free_comp_eqs(dev); 1236 free_comp_eqs(dev);
1169 1237
1170err_stop_eqs: 1238err_stop_eqs:
@@ -1233,6 +1301,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1233 1301
1234 mlx5_sriov_detach(dev); 1302 mlx5_sriov_detach(dev);
1235 mlx5_cleanup_fs(dev); 1303 mlx5_cleanup_fs(dev);
1304 mlx5_irq_clear_affinity_hints(dev);
1236 free_comp_eqs(dev); 1305 free_comp_eqs(dev);
1237 mlx5_stop_eqs(dev); 1306 mlx5_stop_eqs(dev);
1238 mlx5_put_uars_page(dev, priv->uar); 1307 mlx5_put_uars_page(dev, priv->uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 0f5ddd22927d..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -259,8 +259,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
259err_cmd: 259err_cmd:
260 memset(din, 0, sizeof(din)); 260 memset(din, 0, sizeof(din));
261 memset(dout, 0, sizeof(dout)); 261 memset(dout, 0, sizeof(dout));
262 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 262 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
263 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); 263 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
264 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); 264 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
265 return err; 265 return err;
266} 266}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e651e4c02867..d3c33e9eea72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
125 return ret_entry; 125 return ret_entry;
126} 126}
127 127
128static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, 128static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
129 u32 rate, u16 index) 129 u32 rate, u16 index)
130{ 130{
131 u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; 131 u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; 132 u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
133 133
134 MLX5_SET(set_rate_limit_in, in, opcode, 134 MLX5_SET(set_pp_rate_limit_in, in, opcode,
135 MLX5_CMD_OP_SET_RATE_LIMIT); 135 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
136 MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); 136 MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
137 MLX5_SET(set_rate_limit_in, in, rate_limit, rate); 137 MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
138 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 138 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
139} 139}
140 140
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
173 entry->refcount++; 173 entry->refcount++;
174 } else { 174 } else {
175 /* new rate limit */ 175 /* new rate limit */
176 err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); 176 err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
177 if (err) { 177 if (err) {
178 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", 178 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
179 rate, err); 179 rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
209 entry->refcount--; 209 entry->refcount--;
210 if (!entry->refcount) { 210 if (!entry->refcount) {
211 /* need to remove rate */ 211 /* need to remove rate */
212 mlx5_set_rate_limit_cmd(dev, 0, entry->index); 212 mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
213 entry->rate = 0; 213 entry->rate = 0;
214 } 214 }
215 215
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
262 /* Clear all configured rates */ 262 /* Clear all configured rates */
263 for (i = 0; i < table->max_size; i++) 263 for (i = 0; i < table->max_size; i++)
264 if (table->rl_entry[i].rate) 264 if (table->rl_entry[i].rate)
265 mlx5_set_rate_limit_cmd(dev, 0, 265 mlx5_set_pp_rate_limit_cmd(dev, 0,
266 table->rl_entry[i].index); 266 table->rl_entry[i].index);
267 267
268 kfree(dev->priv.rl_table.rl_entry); 268 kfree(dev->priv.rl_table.rl_entry);
269} 269}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
168 struct mlx5_uars_page *ret; 168 struct mlx5_uars_page *ret;
169 169
170 mutex_lock(&mdev->priv.bfregs.reg_head.lock); 170 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
171 if (list_empty(&mdev->priv.bfregs.reg_head.list)) { 171 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
172 ret = alloc_uars_page(mdev, false);
173 if (IS_ERR(ret)) {
174 ret = NULL;
175 goto out;
176 }
177 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
178 } else {
179 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, 172 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
180 struct mlx5_uars_page, list); 173 struct mlx5_uars_page, list);
181 kref_get(&ret->ref_count); 174 kref_get(&ret->ref_count);
175 goto out;
182 } 176 }
177 ret = alloc_uars_page(mdev, false);
178 if (IS_ERR(ret))
179 goto out;
180 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
183out: 181out:
184 mutex_unlock(&mdev->priv.bfregs.reg_head.lock); 182 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
185 183
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 9cb939b6a859..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -911,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
911 void *in; 911 void *in;
912 int err; 912 int err;
913 913
914 mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); 914 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
915 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
916 return 0;
917
915 in = kvzalloc(inlen, GFP_KERNEL); 918 in = kvzalloc(inlen, GFP_KERNEL);
916 if (!in) 919 if (!in)
917 return -ENOMEM; 920 return -ENOMEM;
918 921
919 MLX5_SET(modify_nic_vport_context_in, in, 922 MLX5_SET(modify_nic_vport_context_in, in,
920 field_select.disable_mc_local_lb, 1);
921 MLX5_SET(modify_nic_vport_context_in, in,
922 nic_vport_context.disable_mc_local_lb, !enable); 923 nic_vport_context.disable_mc_local_lb, !enable);
923
924 MLX5_SET(modify_nic_vport_context_in, in,
925 field_select.disable_uc_local_lb, 1);
926 MLX5_SET(modify_nic_vport_context_in, in, 924 MLX5_SET(modify_nic_vport_context_in, in,
927 nic_vport_context.disable_uc_local_lb, !enable); 925 nic_vport_context.disable_uc_local_lb, !enable);
928 926
927 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
928 MLX5_SET(modify_nic_vport_context_in, in,
929 field_select.disable_mc_local_lb, 1);
930
931 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
932 MLX5_SET(modify_nic_vport_context_in, in,
933 field_select.disable_uc_local_lb, 1);
934
929 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 935 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
930 936
937 if (!err)
938 mlx5_core_dbg(mdev, "%s local_lb\n",
939 enable ? "enable" : "disable");
940
931 kvfree(in); 941 kvfree(in);
932 return err; 942 return err;
933} 943}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6cfc70..2f74953e4561 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
71 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 71 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
72 struct mlx5e_vxlan *vxlan; 72 struct mlx5e_vxlan *vxlan;
73 73
74 spin_lock(&vxlan_db->lock); 74 spin_lock_bh(&vxlan_db->lock);
75 vxlan = radix_tree_lookup(&vxlan_db->tree, port); 75 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
76 spin_unlock(&vxlan_db->lock); 76 spin_unlock_bh(&vxlan_db->lock);
77 77
78 return vxlan; 78 return vxlan;
79} 79}
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
88 struct mlx5e_vxlan *vxlan; 88 struct mlx5e_vxlan *vxlan;
89 int err; 89 int err;
90 90
91 if (mlx5e_vxlan_lookup_port(priv, port)) 91 mutex_lock(&priv->state_lock);
92 vxlan = mlx5e_vxlan_lookup_port(priv, port);
93 if (vxlan) {
94 atomic_inc(&vxlan->refcount);
92 goto free_work; 95 goto free_work;
96 }
93 97
94 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) 98 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
95 goto free_work; 99 goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
99 goto err_delete_port; 103 goto err_delete_port;
100 104
101 vxlan->udp_port = port; 105 vxlan->udp_port = port;
106 atomic_set(&vxlan->refcount, 1);
102 107
103 spin_lock_irq(&vxlan_db->lock); 108 spin_lock_bh(&vxlan_db->lock);
104 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); 109 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
105 spin_unlock_irq(&vxlan_db->lock); 110 spin_unlock_bh(&vxlan_db->lock);
106 if (err) 111 if (err)
107 goto err_free; 112 goto err_free;
108 113
@@ -113,35 +118,39 @@ err_free:
113err_delete_port: 118err_delete_port:
114 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); 119 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
115free_work: 120free_work:
121 mutex_unlock(&priv->state_lock);
116 kfree(vxlan_work); 122 kfree(vxlan_work);
117} 123}
118 124
119static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) 125static void mlx5e_vxlan_del_port(struct work_struct *work)
120{ 126{
127 struct mlx5e_vxlan_work *vxlan_work =
128 container_of(work, struct mlx5e_vxlan_work, work);
129 struct mlx5e_priv *priv = vxlan_work->priv;
121 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 130 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
131 u16 port = vxlan_work->port;
122 struct mlx5e_vxlan *vxlan; 132 struct mlx5e_vxlan *vxlan;
133 bool remove = false;
123 134
124 spin_lock_irq(&vxlan_db->lock); 135 mutex_lock(&priv->state_lock);
125 vxlan = radix_tree_delete(&vxlan_db->tree, port); 136 spin_lock_bh(&vxlan_db->lock);
126 spin_unlock_irq(&vxlan_db->lock); 137 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
127
128 if (!vxlan) 138 if (!vxlan)
129 return; 139 goto out_unlock;
130
131 mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
132
133 kfree(vxlan);
134}
135 140
136static void mlx5e_vxlan_del_port(struct work_struct *work) 141 if (atomic_dec_and_test(&vxlan->refcount)) {
137{ 142 radix_tree_delete(&vxlan_db->tree, port);
138 struct mlx5e_vxlan_work *vxlan_work = 143 remove = true;
139 container_of(work, struct mlx5e_vxlan_work, work); 144 }
140 struct mlx5e_priv *priv = vxlan_work->priv;
141 u16 port = vxlan_work->port;
142 145
143 __mlx5e_vxlan_core_del_port(priv, port); 146out_unlock:
147 spin_unlock_bh(&vxlan_db->lock);
144 148
149 if (remove) {
150 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
151 kfree(vxlan);
152 }
153 mutex_unlock(&priv->state_lock);
145 kfree(vxlan_work); 154 kfree(vxlan_work);
146} 155}
147 156
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
171 struct mlx5e_vxlan *vxlan; 180 struct mlx5e_vxlan *vxlan;
172 unsigned int port = 0; 181 unsigned int port = 0;
173 182
174 spin_lock_irq(&vxlan_db->lock); 183 /* Lockless since we are the only radix-tree consumers, wq is disabled */
175 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { 184 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
176 port = vxlan->udp_port; 185 port = vxlan->udp_port;
177 spin_unlock_irq(&vxlan_db->lock); 186 radix_tree_delete(&vxlan_db->tree, port);
178 __mlx5e_vxlan_core_del_port(priv, (u16)port); 187 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
179 spin_lock_irq(&vxlan_db->lock); 188 kfree(vxlan);
180 } 189 }
181 spin_unlock_irq(&vxlan_db->lock);
182} 190}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c048e3..5ef6ae7d568a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
36#include "en.h" 36#include "en.h"
37 37
38struct mlx5e_vxlan { 38struct mlx5e_vxlan {
39 atomic_t refcount;
39 u16 udp_port; 40 u16 udp_port;
40}; 41};
41 42
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..6ef20e5cc77d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1643 return 0; 1643 return 0;
1644 } 1644 }
1645 1645
1646 wmb(); /* reset needs to be written before we read control register */ 1646 /* Reset needs to be written before we read control register, and
1647 * we must wait for the HW to become responsive once again
1648 */
1649 wmb();
1650 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1651
1647 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1652 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648 do { 1653 do {
1649 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1654 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
59#define MLXSW_PCI_SW_RESET 0xF0010 59#define MLXSW_PCI_SW_RESET 0xF0010
60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
62#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
62#define MLXSW_PCI_FW_READY 0xA1844 63#define MLXSW_PCI_FW_READY 0xA1844
63#define MLXSW_PCI_FW_READY_MASK 0xFFFF 64#define MLXSW_PCI_FW_READY_MASK 0xFFFF
64#define MLXSW_PCI_FW_READY_MAGIC 0x5E 65#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d0897b7d860..c3837ca7a705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4300 4300
4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4302{ 4302{
4303 u16 vid = 1;
4303 int err; 4304 int err;
4304 4305
4305 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4306 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4312 true, false); 4313 true, false);
4313 if (err) 4314 if (err)
4314 goto err_port_vlan_set; 4315 goto err_port_vlan_set;
4316
4317 for (; vid <= VLAN_N_VID - 1; vid++) {
4318 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4319 vid, false);
4320 if (err)
4321 goto err_vid_learning_set;
4322 }
4323
4315 return 0; 4324 return 0;
4316 4325
4326err_vid_learning_set:
4327 for (vid--; vid >= 1; vid--)
4328 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4317err_port_vlan_set: 4329err_port_vlan_set:
4318 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4330 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4319err_port_stp_set: 4331err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
4323 4335
4324static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4336static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4325{ 4337{
4338 u16 vid;
4339
4340 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4341 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4342 vid, true);
4343
4326 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4327 false, false); 4345 false, false);
4328 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4346 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
@@ -4358,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4358 } 4376 }
4359 if (!info->linking) 4377 if (!info->linking)
4360 break; 4378 break;
4361 if (netdev_has_any_upper_dev(upper_dev)) { 4379 if (netdev_has_any_upper_dev(upper_dev) &&
4380 (!netif_is_bridge_master(upper_dev) ||
4381 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4382 upper_dev))) {
4362 NL_SET_ERR_MSG(extack, 4383 NL_SET_ERR_MSG(extack,
4363 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4384 "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4364 return -EINVAL; 4385 return -EINVAL;
@@ -4486,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4486 u16 vid) 4507 u16 vid)
4487{ 4508{
4488 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4509 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4489 struct netdev_notifier_changeupper_info *info = ptr; 4511 struct netdev_notifier_changeupper_info *info = ptr;
4490 struct netlink_ext_ack *extack; 4512 struct netlink_ext_ack *extack;
4491 struct net_device *upper_dev; 4513 struct net_device *upper_dev;
@@ -4502,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4502 } 4524 }
4503 if (!info->linking) 4525 if (!info->linking)
4504 break; 4526 break;
4505 if (netdev_has_any_upper_dev(upper_dev)) { 4527 if (netdev_has_any_upper_dev(upper_dev) &&
4528 (!netif_is_bridge_master(upper_dev) ||
4529 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4530 upper_dev))) {
4506 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4531 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4507 return -EINVAL; 4532 return -EINVAL;
4508 } 4533 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..05ce1befd9b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
366 struct net_device *brport_dev, 366 struct net_device *brport_dev,
367 struct net_device *br_dev); 367 struct net_device *br_dev);
368bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
369 const struct net_device *br_dev);
368 370
369/* spectrum.c */ 371/* spectrum.c */
370int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 372int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..b5397da94d7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
46 int tclass_num, u32 min, u32 max, 46 int tclass_num, u32 min, u32 max,
47 u32 probability, bool is_ecn) 47 u32 probability, bool is_ecn)
48{ 48{
49 char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 49 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
50 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
50 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
51 int err; 52 int err;
52 53
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (err) 61 if (err)
61 return err; 62 return err;
62 63
63 mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
64 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
65 66
66 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
67} 68}
68 69
69static int 70static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 72ef4f8025f0..7042c855a5d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; 821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err; 822 int err;
823 823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree; 824 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree); 825 mlxsw_sp_lpm_tree_hold(new_tree);
826 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
827 if (err)
828 goto err_tree_bind;
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0; 830 return 0;
831
832err_tree_bind:
833 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
834 fib->lpm_tree = old_tree;
835 return err;
831} 836}
832 837
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 838static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
868 return err; 873 return err;
869 874
870no_replace: 875no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree; 876 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree); 877 mlxsw_sp_lpm_tree_hold(new_tree);
878 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
879 if (err) {
880 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
881 fib->lpm_tree = NULL;
882 return err;
883 }
876 return 0; 884 return 0;
877} 885}
878 886
@@ -1934,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1934 dipn = htonl(dip); 1942 dipn = htonl(dip);
1935 dev = mlxsw_sp->router->rifs[rif]->dev; 1943 dev = mlxsw_sp->router->rifs[rif]->dev;
1936 n = neigh_lookup(&arp_tbl, &dipn, dev); 1944 n = neigh_lookup(&arp_tbl, &dipn, dev);
1937 if (!n) { 1945 if (!n)
1938 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1939 &dip);
1940 return; 1946 return;
1941 }
1942 1947
1943 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); 1948 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1944 neigh_event_send(n, NULL); 1949 neigh_event_send(n, NULL);
@@ -1965,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1965 1970
1966 dev = mlxsw_sp->router->rifs[rif]->dev; 1971 dev = mlxsw_sp->router->rifs[rif]->dev;
1967 n = neigh_lookup(&nd_tbl, &dip, dev); 1972 n = neigh_lookup(&nd_tbl, &dip, dev);
1968 if (!n) { 1973 if (!n)
1969 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1970 &dip);
1971 return; 1974 return;
1972 }
1973 1975
1974 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); 1976 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1975 neigh_event_send(n, NULL); 1977 neigh_event_send(n, NULL);
@@ -2436,25 +2438,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2436 rhashtable_destroy(&mlxsw_sp->router->neigh_ht); 2438 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2437} 2439}
2438 2440
2439static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
2440 const struct mlxsw_sp_rif *rif)
2441{
2442 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2443
2444 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
2445 rif->rif_index, rif->addr);
2446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2447}
2448
2449static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 2441static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2450 struct mlxsw_sp_rif *rif) 2442 struct mlxsw_sp_rif *rif)
2451{ 2443{
2452 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; 2444 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2453 2445
2454 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
2455 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, 2446 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2456 rif_list_node) 2447 rif_list_node) {
2448 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2457 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2449 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2450 }
2458} 2451}
2459 2452
2460enum mlxsw_sp_nexthop_type { 2453enum mlxsw_sp_nexthop_type {
@@ -3237,7 +3230,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3237{ 3230{
3238 if (!removing) 3231 if (!removing)
3239 nh->should_offload = 1; 3232 nh->should_offload = 1;
3240 else if (nh->offloaded) 3233 else
3241 nh->should_offload = 0; 3234 nh->should_offload = 0;
3242 nh->update = 1; 3235 nh->update = 1;
3243} 3236}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
152 return NULL; 152 return NULL;
153} 153}
154 154
155bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
156 const struct net_device *br_dev)
157{
158 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
159}
160
155static struct mlxsw_sp_bridge_device * 161static struct mlxsw_sp_bridge_device *
156mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 162mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
157 struct net_device *br_dev) 163 struct net_device *br_dev)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e379b78e86ef..13190aa09faf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
82 return nfp_net_ebpf_capable(nn) ? "BPF" : ""; 82 return nfp_net_ebpf_capable(nn) ? "BPF" : "";
83} 83}
84 84
85static int
86nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
87{
88 int err;
89
90 nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
91 if (!nn->app_priv)
92 return -ENOMEM;
93
94 err = nfp_app_nic_vnic_alloc(app, nn, id);
95 if (err)
96 goto err_free_priv;
97
98 return 0;
99err_free_priv:
100 kfree(nn->app_priv);
101 return err;
102}
103
85static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) 104static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
86{ 105{
106 struct nfp_bpf_vnic *bv = nn->app_priv;
107
87 if (nn->dp.bpf_offload_xdp) 108 if (nn->dp.bpf_offload_xdp)
88 nfp_bpf_xdp_offload(app, nn, NULL); 109 nfp_bpf_xdp_offload(app, nn, NULL);
110 WARN_ON(bv->tc_prog);
111 kfree(bv);
89} 112}
90 113
91static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, 114static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
93{ 116{
94 struct tc_cls_bpf_offload *cls_bpf = type_data; 117 struct tc_cls_bpf_offload *cls_bpf = type_data;
95 struct nfp_net *nn = cb_priv; 118 struct nfp_net *nn = cb_priv;
119 struct bpf_prog *oldprog;
120 struct nfp_bpf_vnic *bv;
121 int err;
96 122
97 if (type != TC_SETUP_CLSBPF || 123 if (type != TC_SETUP_CLSBPF ||
98 !tc_can_offload(nn->dp.netdev) || 124 !tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
100 cls_bpf->common.protocol != htons(ETH_P_ALL) || 126 cls_bpf->common.protocol != htons(ETH_P_ALL) ||
101 cls_bpf->common.chain_index) 127 cls_bpf->common.chain_index)
102 return -EOPNOTSUPP; 128 return -EOPNOTSUPP;
103 if (nn->dp.bpf_offload_xdp)
104 return -EBUSY;
105 129
106 /* Only support TC direct action */ 130 /* Only support TC direct action */
107 if (!cls_bpf->exts_integrated || 131 if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
110 return -EOPNOTSUPP; 134 return -EOPNOTSUPP;
111 } 135 }
112 136
113 switch (cls_bpf->command) { 137 if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
114 case TC_CLSBPF_REPLACE:
115 return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
116 case TC_CLSBPF_ADD:
117 return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
118 case TC_CLSBPF_DESTROY:
119 return nfp_net_bpf_offload(nn, NULL, true);
120 default:
121 return -EOPNOTSUPP; 138 return -EOPNOTSUPP;
139
140 bv = nn->app_priv;
141 oldprog = cls_bpf->oldprog;
142
143 /* Don't remove if oldprog doesn't match driver's state */
144 if (bv->tc_prog != oldprog) {
145 oldprog = NULL;
146 if (!cls_bpf->prog)
147 return 0;
122 } 148 }
149
150 err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
151 if (err)
152 return err;
153
154 bv->tc_prog = cls_bpf->prog;
155 return 0;
123} 156}
124 157
125static int nfp_bpf_setup_tc_block(struct net_device *netdev, 158static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
167 200
168 .extra_cap = nfp_bpf_extra_cap, 201 .extra_cap = nfp_bpf_extra_cap,
169 202
170 .vnic_alloc = nfp_app_nic_vnic_alloc, 203 .vnic_alloc = nfp_bpf_vnic_alloc,
171 .vnic_free = nfp_bpf_vnic_free, 204 .vnic_free = nfp_bpf_vnic_free,
172 205
173 .setup_tc = nfp_bpf_setup_tc, 206 .setup_tc = nfp_bpf_setup_tc,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 082a15f6dfb5..57b6043177a3 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -172,6 +172,14 @@ struct nfp_prog {
172 struct list_head insns; 172 struct list_head insns;
173}; 173};
174 174
175/**
176 * struct nfp_bpf_vnic - per-vNIC BPF priv structure
177 * @tc_prog: currently loaded cls_bpf program
178 */
179struct nfp_bpf_vnic {
180 struct bpf_prog *tc_prog;
181};
182
175int nfp_bpf_jit(struct nfp_prog *prog); 183int nfp_bpf_jit(struct nfp_prog *prog);
176 184
177extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; 185extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..99b0487b6d82 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
568 return err; 568 return err;
569 } 569 }
570 nn_writeb(nn, ctrl_offset, entry->entry); 570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
571 572
572 return 0; 573 return 0;
573} 574}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
582 unsigned int vector_idx) 583 unsigned int vector_idx)
583{ 584{
584 nn_writeb(nn, ctrl_offset, 0xff); 585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
585 free_irq(nn->irq_entries[vector_idx].vector, nn); 587 free_irq(nn->irq_entries[vector_idx].vector, nn);
586} 588}
587 589
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd09eab..6c02b2d6ba06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
333 ls >= ARRAY_SIZE(ls_to_ethtool)) 333 ls >= ARRAY_SIZE(ls_to_ethtool))
334 return 0; 334 return 0;
335 335
336 cmd->base.speed = ls_to_ethtool[sts]; 336 cmd->base.speed = ls_to_ethtool[ls];
337 cmd->base.duplex = DUPLEX_FULL; 337 cmd->base.duplex = DUPLEX_FULL;
338 338
339 return 0; 339 return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 924a05e05da0..78b36c67c232 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,
84{ 84{
85 u8 __iomem *mem = port->eth_stats; 85 u8 __iomem *mem = port->eth_stats;
86 86
87 /* TX and RX stats are flipped as we are returning the stats as seen 87 stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
88 * at the switch port corresponding to the phys port. 88 stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
89 */ 89 stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
90 stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
91 stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
92 stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
93 90
94 stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); 91 stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
95 stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); 92 stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
96 stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); 93 stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
97} 94}
98 95
99static void 96static void
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b3940564..b7abb8205d3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
358 kfree(p_rdma_info); 358 kfree(p_rdma_info);
359} 359}
360 360
361static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
362{
363 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
364
365 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
366
367 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
368 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
369 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
370}
371
372static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
373{
374 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
375}
376
361static void qed_rdma_free(struct qed_hwfn *p_hwfn) 377static void qed_rdma_free(struct qed_hwfn *p_hwfn)
362{ 378{
363 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
364 380
381 qed_rdma_free_reserved_lkey(p_hwfn);
365 qed_rdma_resc_free(p_hwfn); 382 qed_rdma_resc_free(p_hwfn);
366} 383}
367 384
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
615{ 632{
616 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 633 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
617 634
618 /* The first DPI is reserved for the Kernel */
619 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
620
621 /* Tid 0 will be used as the key for "reserved MR". 635 /* Tid 0 will be used as the key for "reserved MR".
622 * The driver should allocate memory for it so it can be loaded but no 636 * The driver should allocate memory for it so it can be loaded but no
623 * ramrod should be passed on it. 637 * ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
797 return p_hwfn->p_rdma_info->dev; 811 return p_hwfn->p_rdma_info->dev;
798} 812}
799 813
800static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
801{
802 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
803
804 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
805
806 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
807 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
808 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
809}
810
811static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 814static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
812{ 815{
813 struct qed_hwfn *p_hwfn; 816 struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9abd001..3588081b2e27 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
776 int rc = 0; 776 int rc = 0;
777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; 777 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
778 bool b_ret_ent = true; 778 bool b_ret_ent = true;
779 bool eblock;
779 780
780 if (!p_hwfn) 781 if (!p_hwfn)
781 return -EINVAL; 782 return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
794 if (rc) 795 if (rc)
795 goto spq_post_fail; 796 goto spq_post_fail;
796 797
798 /* Check if entry is in block mode before qed_spq_add_entry,
799 * which might kfree p_ent.
800 */
801 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
802
797 /* Add the request to the pending queue */ 803 /* Add the request to the pending queue */
798 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); 804 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
799 if (rc) 805 if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
811 817
812 spin_unlock_bh(&p_spq->lock); 818 spin_unlock_bh(&p_spq->lock);
813 819
814 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { 820 if (eblock) {
815 /* For entries in QED BLOCK mode, the completion code cannot 821 /* For entries in QED BLOCK mode, the completion code cannot
816 * perform the necessary cleanup - if it did, we couldn't 822 * perform the necessary cleanup - if it did, we couldn't
817 * access p_ent here to see whether it's successful or not. 823 * access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
47#define MDIO_CLK_25_28 7 47#define MDIO_CLK_25_28 7
48 48
49#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
50#define MDIO_STATUS_DELAY_TIME 1
50 51
51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 52static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
52{ 53{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
65 66
66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 67 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
67 !(reg & (MDIO_START | MDIO_BUSY)), 68 !(reg & (MDIO_START | MDIO_BUSY)),
68 100, MDIO_WAIT_TIMES * 100)) 69 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
69 return -EIO; 70 return -EIO;
70 71
71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 72 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
88 writel(reg, adpt->base + EMAC_MDIO_CTRL); 89 writel(reg, adpt->base + EMAC_MDIO_CTRL);
89 90
90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 91 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
91 !(reg & (MDIO_START | MDIO_BUSY)), 100, 92 !(reg & (MDIO_START | MDIO_BUSY)),
92 MDIO_WAIT_TIMES * 100)) 93 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
93 return -EIO; 94 return -EIO;
94 95
95 return 0; 96 return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 70c92b649b29..38c924bdd32e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
253 return ret; 253 return ret;
254 } 254 }
255 255
256 ret = emac_mac_up(adpt); 256 ret = adpt->phy.open(adpt);
257 if (ret) { 257 if (ret) {
258 emac_mac_rx_tx_rings_free_all(adpt); 258 emac_mac_rx_tx_rings_free_all(adpt);
259 free_irq(irq->irq, irq); 259 free_irq(irq->irq, irq);
260 return ret; 260 return ret;
261 } 261 }
262 262
263 ret = adpt->phy.open(adpt); 263 ret = emac_mac_up(adpt);
264 if (ret) { 264 if (ret) {
265 emac_mac_down(adpt);
266 emac_mac_rx_tx_rings_free_all(adpt); 265 emac_mac_rx_tx_rings_free_all(adpt);
267 free_irq(irq->irq, irq); 266 free_irq(irq->irq, irq);
267 adpt->phy.close(adpt);
268 return ret; 268 return ret;
269 } 269 }
270 270
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 71bee1af71ef..df21e900f874 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -195,6 +195,7 @@ err2:
195err1: 195err1:
196 rmnet_unregister_real_device(real_dev, port); 196 rmnet_unregister_real_device(real_dev, port);
197err0: 197err0:
198 kfree(ep);
198 return err; 199 return err;
199} 200}
200 201
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 29842ccc91a9..08e4afc0ab39 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
126 126
127 if (skb_headroom(skb) < required_headroom) { 127 if (skb_headroom(skb) < required_headroom) {
128 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 128 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
129 return RMNET_MAP_CONSUMED; 129 goto fail;
130 } 130 }
131 131
132 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); 132 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
133 if (!map_header) 133 if (!map_header)
134 return RMNET_MAP_CONSUMED; 134 goto fail;
135 135
136 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { 136 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
137 if (mux_id == 0xff) 137 if (mux_id == 0xff)
@@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
143 skb->protocol = htons(ETH_P_MAP); 143 skb->protocol = htons(ETH_P_MAP);
144 144
145 return RMNET_MAP_SUCCESS; 145 return RMNET_MAP_SUCCESS;
146
147fail:
148 kfree_skb(skb);
149 return RMNET_MAP_CONSUMED;
146} 150}
147 151
148static void 152static void
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa65ad4..734286ebe5ef 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2244,19 +2244,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
2244 void __iomem *ioaddr = tp->mmio_addr; 2244 void __iomem *ioaddr = tp->mmio_addr;
2245 dma_addr_t paddr = tp->counters_phys_addr; 2245 dma_addr_t paddr = tp->counters_phys_addr;
2246 u32 cmd; 2246 u32 cmd;
2247 bool ret;
2248 2247
2249 RTL_W32(CounterAddrHigh, (u64)paddr >> 32); 2248 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2249 RTL_R32(CounterAddrHigh);
2250 cmd = (u64)paddr & DMA_BIT_MASK(32); 2250 cmd = (u64)paddr & DMA_BIT_MASK(32);
2251 RTL_W32(CounterAddrLow, cmd); 2251 RTL_W32(CounterAddrLow, cmd);
2252 RTL_W32(CounterAddrLow, cmd | counter_cmd); 2252 RTL_W32(CounterAddrLow, cmd | counter_cmd);
2253 2253
2254 ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); 2254 return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
2255
2256 RTL_W32(CounterAddrLow, 0);
2257 RTL_W32(CounterAddrHigh, 0);
2258
2259 return ret;
2260} 2255}
2261 2256
2262static bool rtl8169_reset_counters(struct net_device *dev) 2257static bool rtl8169_reset_counters(struct net_device *dev)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..009780df664b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
2308 struct ravb_private *priv = netdev_priv(ndev); 2308 struct ravb_private *priv = netdev_priv(ndev);
2309 int ret = 0; 2309 int ret = 0;
2310 2310
2311 if (priv->wol_enabled) { 2311 /* If WoL is enabled set reset mode to rearm the WoL logic */
2312 /* Reduce the usecount of the clock to zero and then 2312 if (priv->wol_enabled)
2313 * restore it to its original value. This is done to force
2314 * the clock to be re-enabled which is a workaround
2315 * for renesas-cpg-mssr driver which do not enable clocks
2316 * when resuming from PSCI suspend/resume.
2317 *
2318 * Without this workaround the driver fails to communicate
2319 * with the hardware if WoL was enabled when the system
2320 * entered PSCI suspend. This is due to that if WoL is enabled
2321 * we explicitly keep the clock from being turned off when
2322 * suspending, but in PSCI sleep power is cut so the clock
2323 * is disabled anyhow, the clock driver is not aware of this
2324 * so the clock is not turned back on when resuming.
2325 *
2326 * TODO: once the renesas-cpg-mssr suspend/resume is working
2327 * this clock dance should be removed.
2328 */
2329 clk_disable(priv->clk);
2330 clk_disable(priv->clk);
2331 clk_enable(priv->clk);
2332 clk_enable(priv->clk);
2333
2334 /* Set reset mode to rearm the WoL logic */
2335 ravb_write(ndev, CCC_OPC_RESET, CCC); 2313 ravb_write(ndev, CCC_OPC_RESET, CCC);
2336 }
2337 2314
2338 /* All register have been reset to default values. 2315 /* All register have been reset to default values.
2339 * Restore all registers which where setup at probe time and 2316 * Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7e060aa9fbed..53924a4fc31c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
147 [FWNLCR0] = 0x0090, 147 [FWNLCR0] = 0x0090,
148 [FWALCR0] = 0x0094, 148 [FWALCR0] = 0x0094,
149 [TXNLCR1] = 0x00a0, 149 [TXNLCR1] = 0x00a0,
150 [TXALCR1] = 0x00a0, 150 [TXALCR1] = 0x00a4,
151 [RXNLCR1] = 0x00a8, 151 [RXNLCR1] = 0x00a8,
152 [RXALCR1] = 0x00ac, 152 [RXALCR1] = 0x00ac,
153 [FWNLCR1] = 0x00b0, 153 [FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
399 [FWNLCR0] = 0x0090, 399 [FWNLCR0] = 0x0090,
400 [FWALCR0] = 0x0094, 400 [FWALCR0] = 0x0094,
401 [TXNLCR1] = 0x00a0, 401 [TXNLCR1] = 0x00a0,
402 [TXALCR1] = 0x00a0, 402 [TXALCR1] = 0x00a4,
403 [RXNLCR1] = 0x00a8, 403 [RXNLCR1] = 0x00a8,
404 [RXALCR1] = 0x00ac, 404 [RXALCR1] = 0x00ac,
405 [FWNLCR1] = 0x00b0, 405 [FWNLCR1] = 0x00b0,
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1149 entry, le32_to_cpu(txdesc->status)); 1149 entry, le32_to_cpu(txdesc->status));
1150 /* Free the original skb. */ 1150 /* Free the original skb. */
1151 if (mdp->tx_skbuff[entry]) { 1151 if (mdp->tx_skbuff[entry]) {
1152 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), 1152 dma_unmap_single(&mdp->pdev->dev,
1153 le32_to_cpu(txdesc->addr),
1153 le32_to_cpu(txdesc->len) >> 16, 1154 le32_to_cpu(txdesc->len) >> 16,
1154 DMA_TO_DEVICE); 1155 DMA_TO_DEVICE);
1155 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1156 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
1179 if (mdp->rx_skbuff[i]) { 1180 if (mdp->rx_skbuff[i]) {
1180 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; 1181 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1181 1182
1182 dma_unmap_single(&ndev->dev, 1183 dma_unmap_single(&mdp->pdev->dev,
1183 le32_to_cpu(rxdesc->addr), 1184 le32_to_cpu(rxdesc->addr),
1184 ALIGN(mdp->rx_buf_sz, 32), 1185 ALIGN(mdp->rx_buf_sz, 32),
1185 DMA_FROM_DEVICE); 1186 DMA_FROM_DEVICE);
1186 } 1187 }
1187 } 1188 }
1188 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1189 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1189 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1190 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1190 mdp->rx_desc_dma); 1191 mdp->rx_desc_dma);
1191 mdp->rx_ring = NULL; 1192 mdp->rx_ring = NULL;
1192 } 1193 }
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
1203 sh_eth_tx_free(ndev, false); 1204 sh_eth_tx_free(ndev, false);
1204 1205
1205 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1206 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1206 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1207 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1207 mdp->tx_desc_dma); 1208 mdp->tx_desc_dma);
1208 mdp->tx_ring = NULL; 1209 mdp->tx_ring = NULL;
1209 } 1210 }
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
1245 1246
1246 /* The size of the buffer is a multiple of 32 bytes. */ 1247 /* The size of the buffer is a multiple of 32 bytes. */
1247 buf_len = ALIGN(mdp->rx_buf_sz, 32); 1248 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1248 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, 1249 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1249 DMA_FROM_DEVICE); 1250 DMA_FROM_DEVICE);
1250 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1251 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1251 kfree_skb(skb); 1252 kfree_skb(skb);
1252 break; 1253 break;
1253 } 1254 }
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
1323 1324
1324 /* Allocate all Rx descriptors. */ 1325 /* Allocate all Rx descriptors. */
1325 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1326 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1326 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1327 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1327 GFP_KERNEL); 1328 &mdp->rx_desc_dma, GFP_KERNEL);
1328 if (!mdp->rx_ring) 1329 if (!mdp->rx_ring)
1329 goto ring_free; 1330 goto ring_free;
1330 1331
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
1332 1333
1333 /* Allocate all Tx descriptors. */ 1334 /* Allocate all Tx descriptors. */
1334 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1335 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1335 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1336 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1336 GFP_KERNEL); 1337 &mdp->tx_desc_dma, GFP_KERNEL);
1337 if (!mdp->tx_ring) 1338 if (!mdp->tx_ring)
1338 goto ring_free; 1339 goto ring_free;
1339 return 0; 1340 return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1527 mdp->rx_skbuff[entry] = NULL; 1528 mdp->rx_skbuff[entry] = NULL;
1528 if (mdp->cd->rpadir) 1529 if (mdp->cd->rpadir)
1529 skb_reserve(skb, NET_IP_ALIGN); 1530 skb_reserve(skb, NET_IP_ALIGN);
1530 dma_unmap_single(&ndev->dev, dma_addr, 1531 dma_unmap_single(&mdp->pdev->dev, dma_addr,
1531 ALIGN(mdp->rx_buf_sz, 32), 1532 ALIGN(mdp->rx_buf_sz, 32),
1532 DMA_FROM_DEVICE); 1533 DMA_FROM_DEVICE);
1533 skb_put(skb, pkt_len); 1534 skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1555 if (skb == NULL) 1556 if (skb == NULL)
1556 break; /* Better luck next round. */ 1557 break; /* Better luck next round. */
1557 sh_eth_set_receive_align(skb); 1558 sh_eth_set_receive_align(skb);
1558 dma_addr = dma_map_single(&ndev->dev, skb->data, 1559 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1559 buf_len, DMA_FROM_DEVICE); 1560 buf_len, DMA_FROM_DEVICE);
1560 if (dma_mapping_error(&ndev->dev, dma_addr)) { 1561 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1561 kfree_skb(skb); 1562 kfree_skb(skb);
1562 break; 1563 break;
1563 } 1564 }
@@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
1891 return PTR_ERR(phydev); 1892 return PTR_ERR(phydev);
1892 } 1893 }
1893 1894
1895 /* mask with MAC supported features */
1896 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1897 int err = phy_set_max_speed(phydev, SPEED_100);
1898 if (err) {
1899 netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1900 phy_disconnect(phydev);
1901 return err;
1902 }
1903 }
1904
1894 phy_attached_info(phydev); 1905 phy_attached_info(phydev);
1895 1906
1896 return 0; 1907 return 0;
@@ -2078,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2078 add_reg(CSMR); 2089 add_reg(CSMR);
2079 if (cd->select_mii) 2090 if (cd->select_mii)
2080 add_reg(RMII_MII); 2091 add_reg(RMII_MII);
2081 add_reg(ARSTR);
2082 if (cd->tsu) { 2092 if (cd->tsu) {
2093 add_tsu_reg(ARSTR);
2083 add_tsu_reg(TSU_CTRST); 2094 add_tsu_reg(TSU_CTRST);
2084 add_tsu_reg(TSU_FWEN0); 2095 add_tsu_reg(TSU_FWEN0);
2085 add_tsu_reg(TSU_FWEN1); 2096 add_tsu_reg(TSU_FWEN1);
@@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2441 /* soft swap. */ 2452 /* soft swap. */
2442 if (!mdp->cd->hw_swap) 2453 if (!mdp->cd->hw_swap)
2443 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); 2454 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2444 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2455 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2445 DMA_TO_DEVICE); 2456 DMA_TO_DEVICE);
2446 if (dma_mapping_error(&ndev->dev, dma_addr)) { 2457 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2447 kfree_skb(skb); 2458 kfree_skb(skb);
2448 return NETDEV_TX_OK; 2459 return NETDEV_TX_OK;
2449 } 2460 }
@@ -3214,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3214 /* ioremap the TSU registers */ 3225 /* ioremap the TSU registers */
3215 if (mdp->cd->tsu) { 3226 if (mdp->cd->tsu) {
3216 struct resource *rtsu; 3227 struct resource *rtsu;
3228
3217 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 3229 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3218 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 3230 if (!rtsu) {
3219 if (IS_ERR(mdp->tsu_addr)) { 3231 dev_err(&pdev->dev, "no TSU resource\n");
3220 ret = PTR_ERR(mdp->tsu_addr); 3232 ret = -ENODEV;
3233 goto out_release;
3234 }
3235 /* We can only request the TSU region for the first port
3236 * of the two sharing this TSU for the probe to succeed...
3237 */
3238 if (devno % 2 == 0 &&
3239 !devm_request_mem_region(&pdev->dev, rtsu->start,
3240 resource_size(rtsu),
3241 dev_name(&pdev->dev))) {
3242 dev_err(&pdev->dev, "can't request TSU resource.\n");
3243 ret = -EBUSY;
3244 goto out_release;
3245 }
3246 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3247 resource_size(rtsu));
3248 if (!mdp->tsu_addr) {
3249 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3250 ret = -ENOMEM;
3221 goto out_release; 3251 goto out_release;
3222 } 3252 }
3223 mdp->port = devno % 2; 3253 mdp->port = devno % 2;
3224 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 3254 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3225 } 3255 }
3226 3256
3227 /* initialize first or needed device */ 3257 /* Need to init only the first port of the two sharing a TSU */
3228 if (!devno || pd->needs_init) { 3258 if (devno % 2 == 0) {
3229 if (mdp->cd->chip_reset) 3259 if (mdp->cd->chip_reset)
3230 mdp->cd->chip_reset(ndev); 3260 mdp->cd->chip_reset(ndev);
3231 3261
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0ea7e16f2e6e..9937a2450e57 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
77 } 77 }
78 78
79 if (buffer->flags & EFX_TX_BUF_SKB) { 79 if (buffer->flags & EFX_TX_BUF_SKB) {
80 EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
80 (*pkts_compl)++; 81 (*pkts_compl)++;
81 (*bytes_compl) += buffer->skb->len; 82 (*bytes_compl) += buffer->skb->len;
82 dev_consume_skb_any((struct sk_buff *)buffer->skb); 83 dev_consume_skb_any((struct sk_buff *)buffer->skb);
@@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
426static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 427static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
427{ 428{
428 struct efx_tx_buffer *buffer; 429 struct efx_tx_buffer *buffer;
430 unsigned int bytes_compl = 0;
431 unsigned int pkts_compl = 0;
429 432
430 /* Work backwards until we hit the original insert pointer value */ 433 /* Work backwards until we hit the original insert pointer value */
431 while (tx_queue->insert_count != tx_queue->write_count) { 434 while (tx_queue->insert_count != tx_queue->write_count) {
432 --tx_queue->insert_count; 435 --tx_queue->insert_count;
433 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 436 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
434 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 437 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
435 } 438 }
436} 439}
437 440
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e1e5ac053760..ce2ea2d491ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
409 /* get timestamp value */ 409 /* get timestamp value */
410 u64(*get_timestamp) (void *desc, u32 ats); 410 u64(*get_timestamp) (void *desc, u32 ats);
411 /* get rx timestamp status */ 411 /* get rx timestamp status */
412 int (*get_rx_timestamp_status) (void *desc, u32 ats); 412 int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
413 /* Display ring */ 413 /* Display ring */
414 void (*display_ring)(void *head, unsigned int size, bool rx); 414 void (*display_ring)(void *head, unsigned int size, bool rx);
415 /* set MSS via context descriptor */ 415 /* set MSS via context descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e5ff734d4f9b..9eb7f65d8000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
808 val, reg); 808 val, reg);
809 809
810 if (gmac->variant->soc_has_internal_phy) { 810 if (gmac->variant->soc_has_internal_phy) {
811 if (of_property_read_bool(priv->plat->phy_node, 811 if (of_property_read_bool(node, "allwinner,leds-active-low"))
812 "allwinner,leds-active-low"))
813 reg |= H3_EPHY_LED_POL; 812 reg |= H3_EPHY_LED_POL;
814 else 813 else
815 reg &= ~H3_EPHY_LED_POL; 814 reg &= ~H3_EPHY_LED_POL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4b286e27c4ca..7e089bf906b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
258 return ret; 258 return ret;
259} 259}
260 260
261static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) 261static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
262 u32 ats)
262{ 263{
263 struct dma_desc *p = (struct dma_desc *)desc; 264 struct dma_desc *p = (struct dma_desc *)desc;
264 int ret = -EINVAL; 265 int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
270 271
271 /* Check if timestamp is OK from context descriptor */ 272 /* Check if timestamp is OK from context descriptor */
272 do { 273 do {
273 ret = dwmac4_rx_check_timestamp(desc); 274 ret = dwmac4_rx_check_timestamp(next_desc);
274 if (ret < 0) 275 if (ret < 0)
275 goto exit; 276 goto exit;
276 i++; 277 i++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7546b3664113..2a828a312814 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
400 return ns; 400 return ns;
401} 401}
402 402
403static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) 403static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
404 u32 ats)
404{ 405{
405 if (ats) { 406 if (ats) {
406 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 407 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index f817f8f36569..db4cee57bb24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
265 return ns; 265 return ns;
266} 266}
267 267
268static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) 268static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
269{ 269{
270 struct dma_desc *p = (struct dma_desc *)desc; 270 struct dma_desc *p = (struct dma_desc *)desc;
271 271
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 721b61655261..08c19ebd5306 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
34{ 34{
35 u32 value = readl(ioaddr + PTP_TCR); 35 u32 value = readl(ioaddr + PTP_TCR);
36 unsigned long data; 36 unsigned long data;
37 u32 reg_value;
37 38
38 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second 39 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
39 * formula = (1/ptp_clock) * 1000000000 40 * formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
50 51
51 data &= PTP_SSIR_SSINC_MASK; 52 data &= PTP_SSIR_SSINC_MASK;
52 53
54 reg_value = data;
53 if (gmac4) 55 if (gmac4)
54 data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; 56 reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
55 57
56 writel(data, ioaddr + PTP_SSIR); 58 writel(reg_value, ioaddr + PTP_SSIR);
57 59
58 return data; 60 return data;
59} 61}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f63c2ddced3c..c0af0bc4e714 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
364bool stmmac_eee_init(struct stmmac_priv *priv) 364bool stmmac_eee_init(struct stmmac_priv *priv)
365{ 365{
366 struct net_device *ndev = priv->dev; 366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
367 unsigned long flags; 368 unsigned long flags;
368 bool ret = false; 369 bool ret = false;
369 370
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
375
370 /* Using PCS we cannot dial with the phy registers at this stage 376 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE. 377 * so we do not support extra feature like EEE.
372 */ 378 */
@@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
482 desc = np; 488 desc = np;
483 489
484 /* Check if timestamp is available */ 490 /* Check if timestamp is available */
485 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { 491 if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
486 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 492 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
487 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 493 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
488 shhwtstamp = skb_hwtstamps(skb); 494 shhwtstamp = skb_hwtstamps(skb);
@@ -2588,6 +2594,7 @@ static int stmmac_open(struct net_device *dev)
2588 2594
2589 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 2595 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2590 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2596 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2597 priv->mss = 0;
2591 2598
2592 ret = alloc_dma_desc_resources(priv); 2599 ret = alloc_dma_desc_resources(priv);
2593 if (ret < 0) { 2600 if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
715 /* warning!!!! We are retrieving the virtual ptr in the sw_data 715 /* warning!!!! We are retrieving the virtual ptr in the sw_data
716 * field as a 32bit value. Will not work on 64bit machines 716 * field as a 32bit value. Will not work on 64bit machines
717 */ 717 */
718 page = (struct page *)GET_SW_DATA0(desc); 718 page = (struct page *)GET_SW_DATA0(ndesc);
719 719
720 if (likely(dma_buff && buf_len && page)) { 720 if (likely(dma_buff && buf_len && page)) {
721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 721 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..64fda2e1040e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
825 if (IS_ERR(rt)) 825 if (IS_ERR(rt))
826 return PTR_ERR(rt); 826 return PTR_ERR(rt);
827 827
828 if (skb_dst(skb)) {
829 int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
830 GENEVE_BASE_HLEN - info->options_len - 14;
831
832 skb_dst_update_pmtu(skb, mtu);
833 }
834
828 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 835 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
829 if (geneve->collect_md) { 836 if (geneve->collect_md) {
830 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 837 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
864 if (IS_ERR(dst)) 871 if (IS_ERR(dst))
865 return PTR_ERR(dst); 872 return PTR_ERR(dst);
866 873
874 if (skb_dst(skb)) {
875 int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
876 GENEVE_BASE_HLEN - info->options_len - 14;
877
878 skb_dst_update_pmtu(skb, mtu);
879 }
880
867 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 881 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
868 if (geneve->collect_md) { 882 if (geneve->collect_md) {
869 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 883 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
1379 rrpriv->info_dma); 1379 rrpriv->info_dma);
1380 rrpriv->info = NULL; 1380 rrpriv->info = NULL;
1381 1381
1382 free_irq(pdev->irq, dev);
1383 spin_unlock_irqrestore(&rrpriv->lock, flags); 1382 spin_unlock_irqrestore(&rrpriv->lock, flags);
1383 free_irq(pdev->irq, dev);
1384 1384
1385 return 0; 1385 return 0;
1386} 1386}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 11c1e7950fe5..77cc4fbaeace 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
393 .flowi4_oif = dev->ifindex, 393 .flowi4_oif = dev->ifindex,
394 .flowi4_tos = RT_TOS(ip4h->tos), 394 .flowi4_tos = RT_TOS(ip4h->tos),
395 .flowi4_flags = FLOWI_FLAG_ANYSRC, 395 .flowi4_flags = FLOWI_FLAG_ANYSRC,
396 .flowi4_mark = skb->mark,
396 .daddr = ip4h->daddr, 397 .daddr = ip4h->daddr,
397 .saddr = ip4h->saddr, 398 .saddr = ip4h->saddr,
398 }; 399 };
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1444 return 0; 1444 return 0;
1445 1445
1446unregister_netdev: 1446unregister_netdev:
1447 /* macvlan_uninit would free the macvlan port */
1447 unregister_netdevice(dev); 1448 unregister_netdevice(dev);
1449 return err;
1448destroy_macvlan_port: 1450destroy_macvlan_port:
1449 if (create) 1451 /* the macvlan port may be freed by macvlan_uninit when fail to register.
1452 * so we destroy the macvlan port only when it's valid.
1453 */
1454 if (create && macvlan_port_get_rtnl(dev))
1450 macvlan_port_destroy(port->dev); 1455 macvlan_port_destroy(port->dev);
1451 return err; 1456 return err;
1452} 1457}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6add563..e911e4990b20 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
239{ 239{
240 int value; 240 int value;
241 241
242 mutex_lock(&phydev->lock);
243
244 value = phy_read(phydev, MII_BMCR); 242 value = phy_read(phydev, MII_BMCR);
245 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 243 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
246 phy_write(phydev, MII_BMCR, value); 244 phy_write(phydev, MII_BMCR, value);
247 245
248 mutex_unlock(&phydev->lock);
249
250 return 0; 246 return 0;
251} 247}
252 248
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27df044..82104edca393 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
637 if (err < 0) 637 if (err < 0)
638 goto error; 638 goto error;
639 639
640 /* Do not touch the fiber page if we're in copper->sgmii mode */
641 if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
642 return 0;
643
640 /* Then the fiber link */ 644 /* Then the fiber link */
641 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); 645 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
642 if (err < 0) 646 if (err < 0)
@@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
875 879
876 /* SGMII-to-Copper mode initialization */ 880 /* SGMII-to-Copper mode initialization */
877 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 881 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
882 u32 pause;
883
878 /* Select page 18 */ 884 /* Select page 18 */
879 err = marvell_set_page(phydev, 18); 885 err = marvell_set_page(phydev, 18);
880 if (err < 0) 886 if (err < 0)
@@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
898 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 904 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
899 if (err < 0) 905 if (err < 0)
900 return err; 906 return err;
907
908 /* There appears to be a bug in the 88e1512 when used in
909 * SGMII to copper mode, where the AN advertisment register
910 * clears the pause bits each time a negotiation occurs.
911 * This means we can never be truely sure what was advertised,
912 * so disable Pause support.
913 */
914 pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
915 phydev->supported &= ~pause;
916 phydev->advertising &= ~pause;
901 } 917 }
902 918
903 return m88e1121_config_init(phydev); 919 return m88e1121_config_init(phydev);
@@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
2069 .flags = PHY_HAS_INTERRUPT, 2085 .flags = PHY_HAS_INTERRUPT,
2070 .probe = marvell_probe, 2086 .probe = marvell_probe,
2071 .config_init = &m88e1145_config_init, 2087 .config_init = &m88e1145_config_init,
2072 .config_aneg = &marvell_config_aneg, 2088 .config_aneg = &m88e1101_config_aneg,
2073 .read_status = &genphy_read_status, 2089 .read_status = &genphy_read_status,
2074 .ack_interrupt = &marvell_ack_interrupt, 2090 .ack_interrupt = &marvell_ack_interrupt,
2075 .config_intr = &marvell_config_intr, 2091 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
118 118
119 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 119 data->regulator = devm_regulator_get(&pdev->dev, "phy");
120 if (IS_ERR(data->regulator)) { 120 if (IS_ERR(data->regulator)) {
121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
122 return -EPROBE_DEFER; 122 ret = -EPROBE_DEFER;
123 goto err_out_free_mdiobus;
124 }
123 125
124 dev_info(&pdev->dev, "no regulator found\n"); 126 dev_info(&pdev->dev, "no regulator found\n");
125 data->regulator = NULL; 127 data->regulator = NULL;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index bfd3090fb055..07c6048200c6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
194 } 194 }
195 195
196 ret = xgene_enet_ecc_init(pdata); 196 ret = xgene_enet_ecc_init(pdata);
197 if (ret) 197 if (ret) {
198 if (pdata->dev->of_node)
199 clk_disable_unprepare(pdata->clk);
198 return ret; 200 return ret;
201 }
199 xgene_gmac_reset(pdata); 202 xgene_gmac_reset(pdata);
200 203
201 return 0; 204 return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
388 return ret; 391 return ret;
389 392
390 mdio_bus = mdiobus_alloc(); 393 mdio_bus = mdiobus_alloc();
391 if (!mdio_bus) 394 if (!mdio_bus) {
392 return -ENOMEM; 395 ret = -ENOMEM;
396 goto out_clk;
397 }
393 398
394 mdio_bus->name = "APM X-Gene MDIO bus"; 399 mdio_bus->name = "APM X-Gene MDIO bus";
395 400
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
418 mdio_bus->phy_mask = ~0; 423 mdio_bus->phy_mask = ~0;
419 ret = mdiobus_register(mdio_bus); 424 ret = mdiobus_register(mdio_bus);
420 if (ret) 425 if (ret)
421 goto out; 426 goto out_mdiobus;
422 427
423 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, 428 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
424 acpi_register_phy, NULL, mdio_bus, NULL); 429 acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
426 } 431 }
427 432
428 if (ret) 433 if (ret)
429 goto out; 434 goto out_mdiobus;
430 435
431 pdata->mdio_bus = mdio_bus; 436 pdata->mdio_bus = mdio_bus;
432 xgene_mdio_status = true; 437 xgene_mdio_status = true;
433 438
434 return 0; 439 return 0;
435 440
436out: 441out_mdiobus:
437 mdiobus_free(mdio_bus); 442 mdiobus_free(mdio_bus);
438 443
444out_clk:
445 if (dev->of_node)
446 clk_disable_unprepare(pdata->clk);
447
439 return ret; 448 return ret;
440} 449}
441 450
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c1a36..54d00a1d2bef 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
270 270
271 if (addr == mdiodev->addr) { 271 if (addr == mdiodev->addr) {
272 dev->of_node = child; 272 dev->of_node = child;
273 dev->fwnode = of_fwnode_handle(child);
273 return; 274 return;
274 } 275 }
275 } 276 }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7585d9..842eb871a6e3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/phy.h> 23#include <linux/phy.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/bitfield.h>
25 26
26static int meson_gxl_config_init(struct phy_device *phydev) 27static int meson_gxl_config_init(struct phy_device *phydev)
27{ 28{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
50 return 0; 51 return 0;
51} 52}
52 53
54/* This function is provided to cope with the possible failures of this phy
55 * during aneg process. When aneg fails, the PHY reports that aneg is done
56 * but the value found in MII_LPA is wrong:
57 * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
58 * the link partner (LP) supports aneg but the LP never acked our base
59 * code word, it is likely that we never sent it to begin with.
60 * - Late failures: MII_LPA is filled with a value which seems to make sense
61 * but it actually is not what the LP is advertising. It seems that we
62 * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
63 * If this particular bit is not set when aneg is reported being done,
64 * it means MII_LPA is likely to be wrong.
65 *
66 * In both case, forcing a restart of the aneg process solve the problem.
67 * When this failure happens, the first retry is usually successful but,
68 * in some cases, it may take up to 6 retries to get a decent result
69 */
70static int meson_gxl_read_status(struct phy_device *phydev)
71{
72 int ret, wol, lpa, exp;
73
74 if (phydev->autoneg == AUTONEG_ENABLE) {
75 ret = genphy_aneg_done(phydev);
76 if (ret < 0)
77 return ret;
78 else if (!ret)
79 goto read_status_continue;
80
81 /* Need to access WOL bank, make sure the access is open */
82 ret = phy_write(phydev, 0x14, 0x0000);
83 if (ret)
84 return ret;
85 ret = phy_write(phydev, 0x14, 0x0400);
86 if (ret)
87 return ret;
88 ret = phy_write(phydev, 0x14, 0x0000);
89 if (ret)
90 return ret;
91 ret = phy_write(phydev, 0x14, 0x0400);
92 if (ret)
93 return ret;
94
95 /* Request LPI_STATUS WOL register */
96 ret = phy_write(phydev, 0x14, 0x8D80);
97 if (ret)
98 return ret;
99
100 /* Read LPI_STATUS value */
101 wol = phy_read(phydev, 0x15);
102 if (wol < 0)
103 return wol;
104
105 lpa = phy_read(phydev, MII_LPA);
106 if (lpa < 0)
107 return lpa;
108
109 exp = phy_read(phydev, MII_EXPANSION);
110 if (exp < 0)
111 return exp;
112
113 if (!(wol & BIT(12)) ||
114 ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
115 /* Looks like aneg failed after all */
116 phydev_dbg(phydev, "LPA corruption - aneg restart\n");
117 return genphy_restart_aneg(phydev);
118 }
119 }
120
121read_status_continue:
122 return genphy_read_status(phydev);
123}
124
53static struct phy_driver meson_gxl_phy[] = { 125static struct phy_driver meson_gxl_phy[] = {
54 { 126 {
55 .phy_id = 0x01814400, 127 .phy_id = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
60 .config_init = meson_gxl_config_init, 132 .config_init = meson_gxl_config_init,
61 .config_aneg = genphy_config_aneg, 133 .config_aneg = genphy_config_aneg,
62 .aneg_done = genphy_aneg_done, 134 .aneg_done = genphy_aneg_done,
63 .read_status = genphy_read_status, 135 .read_status = meson_gxl_read_status,
64 .suspend = genphy_suspend, 136 .suspend = genphy_suspend,
65 .resume = genphy_resume, 137 .resume = genphy_resume,
66 }, 138 },
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fdb43dd9b5cd..422ff6333c52 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
496 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); 496 return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
497} 497}
498 498
499/* Center KSZ9031RNX FLP timing at 16ms. */
499static int ksz9031_center_flp_timing(struct phy_device *phydev) 500static int ksz9031_center_flp_timing(struct phy_device *phydev)
500{ 501{
501 int result; 502 int result;
502 503
503 /* Center KSZ9031RNX FLP timing at 16ms. */
504 result = ksz9031_extended_write(phydev, OP_DATA, 0, 504 result = ksz9031_extended_write(phydev, OP_DATA, 0,
505 MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); 505 MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
506 if (result)
507 return result;
508
506 result = ksz9031_extended_write(phydev, OP_DATA, 0, 509 result = ksz9031_extended_write(phydev, OP_DATA, 0,
507 MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); 510 MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
508
509 if (result) 511 if (result)
510 return result; 512 return result;
511 513
@@ -622,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
622 phydev->link = 0; 624 phydev->link = 0;
623 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) 625 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
624 phydev->drv->config_intr(phydev); 626 phydev->drv->config_intr(phydev);
627 return genphy_config_aneg(phydev);
625 } 628 }
626 629
627 return 0; 630 return 0;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67bc1e73..ed10d1fc8f59 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
828 */ 828 */
829void phy_start(struct phy_device *phydev) 829void phy_start(struct phy_device *phydev)
830{ 830{
831 bool do_resume = false;
832 int err = 0; 831 int err = 0;
833 832
834 mutex_lock(&phydev->lock); 833 mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
841 phydev->state = PHY_UP; 840 phydev->state = PHY_UP;
842 break; 841 break;
843 case PHY_HALTED: 842 case PHY_HALTED:
843 /* if phy was suspended, bring the physical link up again */
844 phy_resume(phydev);
845
844 /* make sure interrupts are re-enabled for the PHY */ 846 /* make sure interrupts are re-enabled for the PHY */
845 if (phydev->irq != PHY_POLL) { 847 if (phydev->irq != PHY_POLL) {
846 err = phy_enable_interrupts(phydev); 848 err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
849 } 851 }
850 852
851 phydev->state = PHY_RESUMING; 853 phydev->state = PHY_RESUMING;
852 do_resume = true;
853 break; 854 break;
854 default: 855 default:
855 break; 856 break;
856 } 857 }
857 mutex_unlock(&phydev->lock); 858 mutex_unlock(&phydev->lock);
858 859
859 /* if phy was suspended, bring the physical link up again */
860 if (do_resume)
861 phy_resume(phydev);
862
863 phy_trigger_machine(phydev, true); 860 phy_trigger_machine(phydev, true);
864} 861}
865EXPORT_SYMBOL(phy_start); 862EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac29025..b15b31ca2618 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
138 ret = phy_resume(phydev); 139 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
139 if (ret < 0) 141 if (ret < 0)
140 return ret; 142 return ret;
141 143
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1026 if (err) 1028 if (err)
1027 goto error; 1029 goto error;
1028 1030
1031 mutex_lock(&phydev->lock);
1029 phy_resume(phydev); 1032 phy_resume(phydev);
1033 mutex_unlock(&phydev->lock);
1030 phy_led_triggers_register(phydev); 1034 phy_led_triggers_register(phydev);
1031 1035
1032 return err; 1036 return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
1157 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1161 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1158 int ret = 0; 1162 int ret = 0;
1159 1163
1164 WARN_ON(!mutex_is_locked(&phydev->lock));
1165
1160 if (phydev->drv && phydrv->resume) 1166 if (phydev->drv && phydrv->resume)
1161 ret = phydrv->resume(phydev); 1167 ret = phydrv->resume(phydev);
1162 1168
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
1639{ 1645{
1640 int value; 1646 int value;
1641 1647
1642 mutex_lock(&phydev->lock);
1643
1644 value = phy_read(phydev, MII_BMCR); 1648 value = phy_read(phydev, MII_BMCR);
1645 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 1649 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
1646 1650
1647 mutex_unlock(&phydev->lock);
1648
1649 return 0; 1651 return 0;
1650} 1652}
1651EXPORT_SYMBOL(genphy_resume); 1653EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e3bbc70372d3..249ce5cbea22 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
526 pl->link_config.pause = MLO_PAUSE_AN; 526 pl->link_config.pause = MLO_PAUSE_AN;
527 pl->link_config.speed = SPEED_UNKNOWN; 527 pl->link_config.speed = SPEED_UNKNOWN;
528 pl->link_config.duplex = DUPLEX_UNKNOWN; 528 pl->link_config.duplex = DUPLEX_UNKNOWN;
529 pl->link_config.an_enabled = true;
529 pl->ops = ops; 530 pl->ops = ops;
530 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 531 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
531 532
@@ -773,6 +774,7 @@ void phylink_stop(struct phylink *pl)
773 sfp_upstream_stop(pl->sfp_bus); 774 sfp_upstream_stop(pl->sfp_bus);
774 775
775 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 776 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
777 queue_work(system_power_efficient_wq, &pl->resolve);
776 flush_work(&pl->resolve); 778 flush_work(&pl->resolve);
777} 779}
778EXPORT_SYMBOL_GPL(phylink_stop); 780EXPORT_SYMBOL_GPL(phylink_stop);
@@ -950,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
950 mutex_lock(&pl->state_mutex); 952 mutex_lock(&pl->state_mutex);
951 /* Configure the MAC to match the new settings */ 953 /* Configure the MAC to match the new settings */
952 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); 954 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
955 pl->link_config.interface = config.interface;
953 pl->link_config.speed = our_kset.base.speed; 956 pl->link_config.speed = our_kset.base.speed;
954 pl->link_config.duplex = our_kset.base.duplex; 957 pl->link_config.duplex = our_kset.base.duplex;
955 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; 958 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -1293,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1293 switch (cmd) { 1296 switch (cmd) {
1294 case SIOCGMIIPHY: 1297 case SIOCGMIIPHY:
1295 mii->phy_id = pl->phydev->mdio.addr; 1298 mii->phy_id = pl->phydev->mdio.addr;
1299 /* fall through */
1296 1300
1297 case SIOCGMIIREG: 1301 case SIOCGMIIREG:
1298 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); 1302 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1315,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1315 switch (cmd) { 1319 switch (cmd) {
1316 case SIOCGMIIPHY: 1320 case SIOCGMIIPHY:
1317 mii->phy_id = 0; 1321 mii->phy_id = 0;
1322 /* fall through */
1318 1323
1319 case SIOCGMIIREG: 1324 case SIOCGMIIREG:
1320 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); 1325 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1426,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
1426 WARN_ON(!lockdep_rtnl_is_held()); 1431 WARN_ON(!lockdep_rtnl_is_held());
1427 1432
1428 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1433 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
1434 queue_work(system_power_efficient_wq, &pl->resolve);
1429 flush_work(&pl->resolve); 1435 flush_work(&pl->resolve);
1430
1431 netif_carrier_off(pl->netdev);
1432} 1436}
1433 1437
1434static void phylink_sfp_link_up(void *upstream) 1438static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..ab64a142b832 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
356void sfp_unregister_upstream(struct sfp_bus *bus) 356void sfp_unregister_upstream(struct sfp_bus *bus)
357{ 357{
358 rtnl_lock(); 358 rtnl_lock();
359 sfp_unregister_bus(bus); 359 if (bus->sfp)
360 sfp_unregister_bus(bus);
360 bus->upstream = NULL; 361 bus->upstream = NULL;
361 bus->netdev = NULL; 362 bus->netdev = NULL;
362 rtnl_unlock(); 363 rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
459void sfp_unregister_socket(struct sfp_bus *bus) 460void sfp_unregister_socket(struct sfp_bus *bus)
460{ 461{
461 rtnl_lock(); 462 rtnl_lock();
462 sfp_unregister_bus(bus); 463 if (bus->netdev)
464 sfp_unregister_bus(bus);
463 bus->sfp_dev = NULL; 465 bus->sfp_dev = NULL;
464 bus->sfp = NULL; 466 bus->sfp = NULL;
465 bus->socket_ops = NULL; 467 bus->socket_ops = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e381811e5f11..9dfc1c4c954f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
351{ 351{
352 unsigned int los = sfp->state & SFP_F_LOS; 352 unsigned int los = sfp->state & SFP_F_LOS;
353 353
354 /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor 354 /* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
355 * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume 355 * are set, we assume that no LOS signal is available.
356 * the same as SFP_OPTIONS_LOS_NORMAL set.
357 */ 356 */
358 if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) 357 if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
359 los ^= SFP_F_LOS; 358 los ^= SFP_F_LOS;
359 else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
360 los = 0;
360 361
361 if (los) 362 if (los)
362 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); 363 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
364 sfp_sm_link_up(sfp); 365 sfp_sm_link_up(sfp);
365} 366}
366 367
368static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
369{
370 return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
371 event == SFP_E_LOS_LOW) ||
372 (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
373 event == SFP_E_LOS_HIGH);
374}
375
376static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
377{
378 return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
379 event == SFP_E_LOS_HIGH) ||
380 (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
381 event == SFP_E_LOS_LOW);
382}
383
367static void sfp_sm_fault(struct sfp *sfp, bool warn) 384static void sfp_sm_fault(struct sfp *sfp, bool warn)
368{ 385{
369 if (sfp->sm_retries && !--sfp->sm_retries) { 386 if (sfp->sm_retries && !--sfp->sm_retries) {
@@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
470 return -EINVAL; 487 return -EINVAL;
471 } 488 }
472 489
490 /* If the module requires address swap mode, warn about it */
491 if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
492 dev_warn(sfp->dev,
493 "module address swap to access page 0xA2 is not supported.\n");
494
473 return sfp_module_insert(sfp->sfp_bus, &sfp->id); 495 return sfp_module_insert(sfp->sfp_bus, &sfp->id);
474} 496}
475 497
@@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
581 case SFP_S_WAIT_LOS: 603 case SFP_S_WAIT_LOS:
582 if (event == SFP_E_TX_FAULT) 604 if (event == SFP_E_TX_FAULT)
583 sfp_sm_fault(sfp, true); 605 sfp_sm_fault(sfp, true);
584 else if (event == 606 else if (sfp_los_event_inactive(sfp, event))
585 (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
586 SFP_E_LOS_HIGH : SFP_E_LOS_LOW))
587 sfp_sm_link_up(sfp); 607 sfp_sm_link_up(sfp);
588 break; 608 break;
589 609
@@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
591 if (event == SFP_E_TX_FAULT) { 611 if (event == SFP_E_TX_FAULT) {
592 sfp_sm_link_down(sfp); 612 sfp_sm_link_down(sfp);
593 sfp_sm_fault(sfp, true); 613 sfp_sm_fault(sfp, true);
594 } else if (event == 614 } else if (sfp_los_event_active(sfp, event)) {
595 (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ?
596 SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) {
597 sfp_sm_link_down(sfp); 615 sfp_sm_link_down(sfp);
598 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); 616 sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
599 } 617 }
@@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
639{ 657{
640 /* locking... and check module is present */ 658 /* locking... and check module is present */
641 659
642 if (sfp->id.ext.sff8472_compliance) { 660 if (sfp->id.ext.sff8472_compliance &&
661 !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {
643 modinfo->type = ETH_MODULE_SFF_8472; 662 modinfo->type = ETH_MODULE_SFF_8472;
644 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 663 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
645 } else { 664 } else {
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747ff4e3..264d4af0bf69 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1006 if (!ifname_is_set) 1006 if (!ifname_is_set)
1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); 1007 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1008 1008
1009 mutex_unlock(&pn->all_ppp_mutex);
1010
1009 ret = register_netdevice(ppp->dev); 1011 ret = register_netdevice(ppp->dev);
1010 if (ret < 0) 1012 if (ret < 0)
1011 goto err_unit; 1013 goto err_unit;
1012 1014
1013 atomic_inc(&ppp_unit_count); 1015 atomic_inc(&ppp_unit_count);
1014 1016
1015 mutex_unlock(&pn->all_ppp_mutex);
1016
1017 return 0; 1017 return 0;
1018 1018
1019err_unit: 1019err_unit:
1020 mutex_lock(&pn->all_ppp_mutex);
1020 unit_put(&pn->units_idr, ppp->file.index); 1021 unit_put(&pn->units_idr, ppp->file.index);
1021err: 1022err:
1022 mutex_unlock(&pn->all_ppp_mutex); 1023 mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
842 struct pppoe_hdr *ph; 842 struct pppoe_hdr *ph;
843 struct net_device *dev; 843 struct net_device *dev;
844 char *start; 844 char *start;
845 int hlen;
845 846
846 lock_sock(sk); 847 lock_sock(sk);
847 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { 848 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
860 if (total_len > (dev->mtu + dev->hard_header_len)) 861 if (total_len > (dev->mtu + dev->hard_header_len))
861 goto end; 862 goto end;
862 863
863 864 hlen = LL_RESERVED_SPACE(dev);
864 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 865 skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
865 0, GFP_KERNEL); 866 dev->needed_tailroom, 0, GFP_KERNEL);
866 if (!skb) { 867 if (!skb) {
867 error = -ENOMEM; 868 error = -ENOMEM;
868 goto end; 869 goto end;
869 } 870 }
870 871
871 /* Reserve space for headers. */ 872 /* Reserve space for headers. */
872 skb_reserve(skb, dev->hard_header_len); 873 skb_reserve(skb, hlen);
873 skb_reset_network_header(skb); 874 skb_reset_network_header(skb);
874 875
875 skb->dev = dev; 876 skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
930 /* Copy the data if there is no space for the header or if it's 931 /* Copy the data if there is no space for the header or if it's
931 * read-only. 932 * read-only.
932 */ 933 */
933 if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) 934 if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
934 goto abort; 935 goto abort;
935 936
936 __skb_push(skb, sizeof(*ph)); 937 __skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index e9489b88407c..0a886fda0129 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
829 DEFINE_WAIT(wait); 829 DEFINE_WAIT(wait);
830 ssize_t ret = 0; 830 ssize_t ret = 0;
831 831
832 if (!iov_iter_count(to)) 832 if (!iov_iter_count(to)) {
833 if (skb)
834 kfree_skb(skb);
833 return 0; 835 return 0;
836 }
834 837
835 if (skb) 838 if (skb)
836 goto put; 839 goto put;
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1154 size_t total_len, int flags) 1157 size_t total_len, int flags)
1155{ 1158{
1156 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1159 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1160 struct sk_buff *skb = m->msg_control;
1157 int ret; 1161 int ret;
1158 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1162 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1163 if (skb)
1164 kfree_skb(skb);
1159 return -EINVAL; 1165 return -EINVAL;
1160 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, 1166 }
1161 m->msg_control); 1167 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1162 if (ret > total_len) { 1168 if (ret > total_len) {
1163 m->msg_flags |= MSG_TRUNC; 1169 m->msg_flags |= MSG_TRUNC;
1164 ret = flags & MSG_TRUNC ? ret : total_len; 1170 ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 95749006d687..a8ec589d1359 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
611 skb_queue_purge(&tfile->sk.sk_error_queue); 611 skb_queue_purge(&tfile->sk.sk_error_queue);
612} 612}
613 613
614static void tun_cleanup_tx_array(struct tun_file *tfile)
615{
616 if (tfile->tx_array.ring.queue) {
617 skb_array_cleanup(&tfile->tx_array);
618 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
619 }
620}
621
614static void __tun_detach(struct tun_file *tfile, bool clean) 622static void __tun_detach(struct tun_file *tfile, bool clean)
615{ 623{
616 struct tun_file *ntfile; 624 struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
657 tun->dev->reg_state == NETREG_REGISTERED) 665 tun->dev->reg_state == NETREG_REGISTERED)
658 unregister_netdevice(tun->dev); 666 unregister_netdevice(tun->dev);
659 } 667 }
660 if (tun) 668 tun_cleanup_tx_array(tfile);
661 skb_array_cleanup(&tfile->tx_array);
662 sock_put(&tfile->sk); 669 sock_put(&tfile->sk);
663 } 670 }
664} 671}
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
700 /* Drop read queue */ 707 /* Drop read queue */
701 tun_queue_purge(tfile); 708 tun_queue_purge(tfile);
702 sock_put(&tfile->sk); 709 sock_put(&tfile->sk);
710 tun_cleanup_tx_array(tfile);
703 } 711 }
704 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 712 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
705 tun_enable_queue(tfile); 713 tun_enable_queue(tfile);
706 tun_queue_purge(tfile); 714 tun_queue_purge(tfile);
707 sock_put(&tfile->sk); 715 sock_put(&tfile->sk);
716 tun_cleanup_tx_array(tfile);
708 } 717 }
709 BUG_ON(tun->numdisabled != 0); 718 BUG_ON(tun->numdisabled != 0);
710 719
@@ -1952,8 +1961,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1952 1961
1953 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1962 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1954 1963
1955 if (!iov_iter_count(to)) 1964 if (!iov_iter_count(to)) {
1965 if (skb)
1966 kfree_skb(skb);
1956 return 0; 1967 return 0;
1968 }
1957 1969
1958 if (!skb) { 1970 if (!skb) {
1959 /* Read frames from ring */ 1971 /* Read frames from ring */
@@ -2069,22 +2081,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2069{ 2081{
2070 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2082 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2071 struct tun_struct *tun = tun_get(tfile); 2083 struct tun_struct *tun = tun_get(tfile);
2084 struct sk_buff *skb = m->msg_control;
2072 int ret; 2085 int ret;
2073 2086
2074 if (!tun) 2087 if (!tun) {
2075 return -EBADFD; 2088 ret = -EBADFD;
2089 goto out_free_skb;
2090 }
2076 2091
2077 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2092 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2078 ret = -EINVAL; 2093 ret = -EINVAL;
2079 goto out; 2094 goto out_put_tun;
2080 } 2095 }
2081 if (flags & MSG_ERRQUEUE) { 2096 if (flags & MSG_ERRQUEUE) {
2082 ret = sock_recv_errqueue(sock->sk, m, total_len, 2097 ret = sock_recv_errqueue(sock->sk, m, total_len,
2083 SOL_PACKET, TUN_TX_TIMESTAMP); 2098 SOL_PACKET, TUN_TX_TIMESTAMP);
2084 goto out; 2099 goto out;
2085 } 2100 }
2086 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, 2101 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
2087 m->msg_control);
2088 if (ret > (ssize_t)total_len) { 2102 if (ret > (ssize_t)total_len) {
2089 m->msg_flags |= MSG_TRUNC; 2103 m->msg_flags |= MSG_TRUNC;
2090 ret = flags & MSG_TRUNC ? ret : total_len; 2104 ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2092,6 +2106,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2092out: 2106out:
2093 tun_put(tun); 2107 tun_put(tun);
2094 return ret; 2108 return ret;
2109
2110out_put_tun:
2111 tun_put(tun);
2112out_free_skb:
2113 if (skb)
2114 kfree_skb(skb);
2115 return ret;
2095} 2116}
2096 2117
2097static int tun_peek_len(struct socket *sock) 2118static int tun_peek_len(struct socket *sock)
@@ -2839,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2839 2860
2840 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 2861 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2841 2862
2863 memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
2864
2842 return 0; 2865 return 0;
2843} 2866}
2844 2867
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804903c4..ec56ff29aac4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 2396 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2397 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2398 dev->rx_qlen = 4; 2398 dev->rx_qlen = 4;
2399 dev->tx_qlen = 4;
2399 } 2400 }
2400 2401
2401 ret = lan78xx_write_reg(dev, BURST_CAP, buf); 2402 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c750cf7c042b..728819feab44 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
261 net->hard_header_len = 0; 261 net->hard_header_len = 0;
262 net->addr_len = 0; 262 net->addr_len = 0;
263 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 263 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
264 set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
264 netdev_dbg(net, "mode: raw IP\n"); 265 netdev_dbg(net, "mode: raw IP\n");
265 } else if (!net->header_ops) { /* don't bother if already set */ 266 } else if (!net->header_ops) { /* don't bother if already set */
266 ether_setup(net); 267 ether_setup(net);
268 clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
267 netdev_dbg(net, "mode: Ethernet\n"); 269 netdev_dbg(net, "mode: Ethernet\n");
268 } 270 }
269 271
@@ -1098,6 +1100,7 @@ static const struct usb_device_id products[] = {
1098 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1099 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1100 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1103 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1101 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1102 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1103 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -1202,12 +1205,14 @@ static const struct usb_device_id products[] = {
1202 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1205 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1203 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1206 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1204 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1207 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1208 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1205 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1209 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1206 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1210 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1207 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1211 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1208 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1212 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1209 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1213 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1210 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1214 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1215 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1211 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1216 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1212 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1217 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1213 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1218 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9abf7986..0657203ffb91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
606 PHY_RESET, 606 PHY_RESET,
607 SCHEDULE_NAPI, 607 SCHEDULE_NAPI,
608 GREEN_ETHERNET, 608 GREEN_ETHERNET,
609 DELL_TB_RX_AGG_BUG,
609}; 610};
610 611
611/* Define these values to match your device */ 612/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1798 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1799 1800
1800 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); 1801 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1802
1803 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
1804 break;
1801 } 1805 }
1802 1806
1803 if (!skb_queue_empty(&skb_head)) { 1807 if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
4133 /* rx aggregation */ 4137 /* rx aggregation */
4134 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); 4138 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
4135 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); 4139 ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
4140 if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
4141 ocp_data |= RX_AGG_DISABLE;
4142
4136 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); 4143 ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
4137 4144
4138 rtl_tally_reset(tp); 4145 rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
5207 netdev->hw_features &= ~NETIF_F_RXCSUM; 5214 netdev->hw_features &= ~NETIF_F_RXCSUM;
5208 } 5215 }
5209 5216
5217 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
5218 udev->serial && !strcmp(udev->serial, "000001000000")) {
5219 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5220 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5221 }
5222
5210 netdev->ethtool_ops = &ops; 5223 netdev->ethtool_ops = &ops;
5211 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 5224 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
5212 5225
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 80348b6a8646..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457void usbnet_defer_kevent (struct usbnet *dev, int work) 457void usbnet_defer_kevent (struct usbnet *dev, int work)
458{ 458{
459 set_bit (work, &dev->flags); 459 set_bit (work, &dev->flags);
460 if (!schedule_work (&dev->kevent)) { 460 if (!schedule_work (&dev->kevent))
461 if (net_ratelimit()) 461 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 462 else
463 } else {
464 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 463 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465 }
466} 464}
467EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 465EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468 466
@@ -484,7 +482,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
484 return -ENOLINK; 482 return -ENOLINK;
485 } 483 }
486 484
487 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 485 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
486 skb = __netdev_alloc_skb(dev->net, size, flags);
487 else
488 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
488 if (!skb) { 489 if (!skb) {
489 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 490 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
490 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 491 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 19a985ef9104..559b215c0169 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
756 int num_skb_frags; 756 int num_skb_frags;
757 757
758 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 758 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
759 if (unlikely(!ctx)) { 759 if (unlikely(!buf)) {
760 pr_debug("%s: rx error: %d buffers out of %d missing\n", 760 pr_debug("%s: rx error: %d buffers out of %d missing\n",
761 dev->name, num_buf, 761 dev->name, num_buf,
762 virtio16_to_cpu(vi->vdev, 762 virtio16_to_cpu(vi->vdev,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029ded7c..cf95290b160c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1616 rq->rx_ring[i].basePA); 1616 rq->rx_ring[i].basePA);
1617 rq->rx_ring[i].base = NULL; 1617 rq->rx_ring[i].base = NULL;
1618 } 1618 }
1619 rq->buf_info[i] = NULL;
1620 } 1619 }
1621 1620
1622 if (rq->data_ring.base) { 1621 if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1638 (rq->rx_ring[0].size + rq->rx_ring[1].size); 1637 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1639 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], 1638 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1640 rq->buf_info_pa); 1639 rq->buf_info_pa);
1640 rq->buf_info[0] = rq->buf_info[1] = NULL;
1641 } 1641 }
1642} 1642}
1643 1643
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index feb1b2e15c2e..139c61c8244a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
673 struct sock *sk, 673 struct sock *sk,
674 struct sk_buff *skb) 674 struct sk_buff *skb)
675{ 675{
676 /* don't divert multicast */ 676 /* don't divert multicast or local broadcast */
677 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 677 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
678 ipv4_is_lbcast(ip_hdr(skb)->daddr))
678 return skb; 679 return skb;
679 680
680 if (qdisc_tx_is_default(vrf_dev)) 681 if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b9cc51079e..c3e34e3c82a7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2155,6 +2155,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2155 } 2155 }
2156 2156
2157 ndst = &rt->dst; 2157 ndst = &rt->dst;
2158 if (skb_dst(skb)) {
2159 int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
2160
2161 skb_dst_update_pmtu(skb, mtu);
2162 }
2163
2158 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2164 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2159 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2165 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2160 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2166 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2196,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2190 goto out_unlock; 2196 goto out_unlock;
2191 } 2197 }
2192 2198
2199 if (skb_dst(skb)) {
2200 int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
2201
2202 skb_dst_update_pmtu(skb, mtu);
2203 }
2204
2193 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2205 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2194 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2206 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2195 skb_scrub_packet(skb, xnet); 2207 skb_scrub_packet(skb, xnet);
@@ -3103,6 +3115,11 @@ static void vxlan_config_apply(struct net_device *dev,
3103 3115
3104 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : 3116 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3105 VXLAN_HEADROOM); 3117 VXLAN_HEADROOM);
3118 if (max_mtu < ETH_MIN_MTU)
3119 max_mtu = ETH_MIN_MTU;
3120
3121 if (!changelink && !conf->mtu)
3122 dev->mtu = max_mtu;
3106 } 3123 }
3107 3124
3108 if (dev->mtu > max_mtu) 3125 if (dev->mtu > max_mtu)
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..987f1252a3cf 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
384 } 384 }
385 } 385 }
386 386
387 if (changed & IEEE80211_CONF_CHANGE_PS) {
388 list_for_each_entry(tmp, &wcn->vif_list, list) {
389 vif = wcn36xx_priv_to_vif(tmp);
390 if (hw->conf.flags & IEEE80211_CONF_PS) {
391 if (vif->bss_conf.ps) /* ps allowed ? */
392 wcn36xx_pmc_enter_bmps_state(wcn, vif);
393 } else {
394 wcn36xx_pmc_exit_bmps_state(wcn, vif);
395 }
396 }
397 }
398
387 mutex_unlock(&wcn->conf_mutex); 399 mutex_unlock(&wcn->conf_mutex);
388 400
389 return 0; 401 return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
747 vif_priv->dtim_period = bss_conf->dtim_period; 759 vif_priv->dtim_period = bss_conf->dtim_period;
748 } 760 }
749 761
750 if (changed & BSS_CHANGED_PS) {
751 wcn36xx_dbg(WCN36XX_DBG_MAC,
752 "mac bss PS set %d\n",
753 bss_conf->ps);
754 if (bss_conf->ps) {
755 wcn36xx_pmc_enter_bmps_state(wcn, vif);
756 } else {
757 wcn36xx_pmc_exit_bmps_state(wcn, vif);
758 }
759 }
760
761 if (changed & BSS_CHANGED_BSSID) { 762 if (changed & BSS_CHANGED_BSSID) {
762 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", 763 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
763 bss_conf->bssid); 764 bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
46 46
47 if (WCN36XX_BMPS != vif_priv->pw_state) { 47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); 48 /* Unbalanced call or last BMPS enter failed */
49 return -EINVAL; 49 wcn36xx_dbg(WCN36XX_DBG_PMC,
50 "Not in BMPS mode, no need to exit\n");
51 return -EALREADY;
50 } 52 }
51 wcn36xx_smd_exit_bmps(wcn, vif); 53 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER; 54 vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d0609d30..9be0b051066a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
182 182
183 err = request_firmware(&clm, clm_name, dev); 183 err = request_firmware(&clm, clm_name, dev);
184 if (err) { 184 if (err) {
185 if (err == -ENOENT) { 185 brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
186 brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n"); 186 err);
187 return 0; 187 return 0;
188 }
189 brcmf_err("request CLM blob file failed (%d)\n", err);
190 return err;
191 } 188 }
192 189
193 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL); 190 chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 310c4e2746aa..cdf9e4161592 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2070 return head_pad; 2070 return head_pad;
2071} 2071}
2072 2072
2073/** 2073/*
2074 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for 2074 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2075 * bus layer usage. 2075 * bus layer usage.
2076 */ 2076 */
@@ -4121,8 +4121,8 @@ release:
4121 sdio_release_host(sdiodev->func[1]); 4121 sdio_release_host(sdiodev->func[1]);
4122fail: 4122fail:
4123 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4123 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4124 device_release_driver(dev);
4125 device_release_driver(&sdiodev->func[2]->dev); 4124 device_release_driver(&sdiodev->func[2]->dev);
4125 device_release_driver(dev);
4126} 4126}
4127 4127
4128struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4128struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 87b4434224a1..dfa111bb411e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -68,6 +68,9 @@
68 * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW 68 * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
69 * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames 69 * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
70 * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames 70 * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
71 * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
72 * monitor mode. Note this queue is the same as the queue for P2P device
73 * but we can't have active monitor mode along with P2P device anyway.
71 * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames 74 * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
72 * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure 75 * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
73 * that we are never left without the possibility to connect to an AP. 76 * that we are never left without the possibility to connect to an AP.
@@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {
87 IWL_MVM_DQA_CMD_QUEUE = 0, 90 IWL_MVM_DQA_CMD_QUEUE = 0,
88 IWL_MVM_DQA_AUX_QUEUE = 1, 91 IWL_MVM_DQA_AUX_QUEUE = 1,
89 IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, 92 IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
93 IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
90 IWL_MVM_DQA_GCAST_QUEUE = 3, 94 IWL_MVM_DQA_GCAST_QUEUE = 3,
91 IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, 95 IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
92 IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, 96 IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 9c889a32fe24..223fb77a3aa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
209 209
210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) 210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
211{ 211{
212 iwl_fw_dbg_stop_recording(fwrt);
213
214 fwrt->dump.conf = FW_DBG_INVALID; 212 fwrt->dump.conf = FW_DBG_INVALID;
215} 213}
216 214
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index ca0b5536a8a6..921cab9e2d73 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -117,6 +117,7 @@
117#define FH_RSCSR_FRAME_INVALID 0x55550000 117#define FH_RSCSR_FRAME_INVALID 0x55550000
118#define FH_RSCSR_FRAME_ALIGN 0x40 118#define FH_RSCSR_FRAME_ALIGN 0x40
119#define FH_RSCSR_RPA_EN BIT(25) 119#define FH_RSCSR_RPA_EN BIT(25)
120#define FH_RSCSR_RADA_EN BIT(26)
120#define FH_RSCSR_RXQ_POS 16 121#define FH_RSCSR_RXQ_POS 16
121#define FH_RSCSR_RXQ_MASK 0x3F0000 122#define FH_RSCSR_RXQ_MASK 0x3F0000
122 123
@@ -128,7 +129,8 @@ struct iwl_rx_packet {
128 * 31: flag flush RB request 129 * 31: flag flush RB request
129 * 30: flag ignore TC (terminal counter) request 130 * 30: flag ignore TC (terminal counter) request
130 * 29: flag fast IRQ request 131 * 29: flag fast IRQ request
131 * 28-26: Reserved 132 * 28-27: Reserved
133 * 26: RADA enabled
132 * 25: Offload enabled 134 * 25: Offload enabled
133 * 24: RPF enabled 135 * 24: RPF enabled
134 * 23: RSS enabled 136 * 23: RSS enabled
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index a2bf530eeae4..2f22e14e00fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
787 u32 action) 787 u32 action)
788{ 788{
789 struct iwl_mac_ctx_cmd cmd = {}; 789 struct iwl_mac_ctx_cmd cmd = {};
790 u32 tfd_queue_msk = 0; 790 u32 tfd_queue_msk = BIT(mvm->snif_queue);
791 int ret; 791 int ret;
792 792
793 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); 793 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4575595ab022..55ab5349dd40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -972,6 +972,7 @@ struct iwl_mvm {
972 972
973 /* Tx queues */ 973 /* Tx queues */
974 u16 aux_queue; 974 u16 aux_queue;
975 u16 snif_queue;
975 u16 probe_queue; 976 u16 probe_queue;
976 u16 p2p_dev_queue; 977 u16 p2p_dev_queue;
977 978
@@ -1060,6 +1061,7 @@ struct iwl_mvm {
1060 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running 1061 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
1061 * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done 1062 * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
1062 * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running 1063 * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
1064 * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
1063 */ 1065 */
1064enum iwl_mvm_status { 1066enum iwl_mvm_status {
1065 IWL_MVM_STATUS_HW_RFKILL, 1067 IWL_MVM_STATUS_HW_RFKILL,
@@ -1071,6 +1073,7 @@ enum iwl_mvm_status {
1071 IWL_MVM_STATUS_ROC_AUX_RUNNING, 1073 IWL_MVM_STATUS_ROC_AUX_RUNNING,
1072 IWL_MVM_STATUS_D3_RECONFIG, 1074 IWL_MVM_STATUS_D3_RECONFIG,
1073 IWL_MVM_STATUS_FIRMWARE_RUNNING, 1075 IWL_MVM_STATUS_FIRMWARE_RUNNING,
1076 IWL_MVM_STATUS_NEED_FLUSH_P2P,
1074}; 1077};
1075 1078
1076/* Keep track of completed init configuration */ 1079/* Keep track of completed init configuration */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 7078b7e458be..45470b6b351a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
624 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; 624 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
625 625
626 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; 626 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
627 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
627 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 628 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
628 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 629 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
629 630
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 76dc58381e1c..3b8d44361380 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
213 struct ieee80211_rx_status *rx_status) 213 struct ieee80211_rx_status *rx_status)
214{ 214{
215 int energy_a, energy_b, max_energy; 215 int energy_a, energy_b, max_energy;
216 u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
216 217
217 energy_a = desc->energy_a; 218 energy_a = desc->energy_a;
218 energy_a = energy_a ? -energy_a : S8_MIN; 219 energy_a = energy_a ? -energy_a : S8_MIN;
@@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
224 energy_a, energy_b, max_energy); 225 energy_a, energy_b, max_energy);
225 226
226 rx_status->signal = max_energy; 227 rx_status->signal = max_energy;
227 rx_status->chains = 0; /* TODO: phy info */ 228 rx_status->chains =
229 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
228 rx_status->chain_signal[0] = energy_a; 230 rx_status->chain_signal[0] = energy_a;
229 rx_status->chain_signal[1] = energy_b; 231 rx_status->chain_signal[1] = energy_b;
230 rx_status->chain_signal[2] = S8_MIN; 232 rx_status->chain_signal[2] = S8_MIN;
@@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
232 234
233static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 235static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
234 struct ieee80211_rx_status *stats, 236 struct ieee80211_rx_status *stats,
235 struct iwl_rx_mpdu_desc *desc, int queue, 237 struct iwl_rx_mpdu_desc *desc, u32 pkt_flags,
236 u8 *crypt_len) 238 int queue, u8 *crypt_len)
237{ 239{
238 u16 status = le16_to_cpu(desc->status); 240 u16 status = le16_to_cpu(desc->status);
239 241
@@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
253 return -1; 255 return -1;
254 256
255 stats->flag |= RX_FLAG_DECRYPTED; 257 stats->flag |= RX_FLAG_DECRYPTED;
258 if (pkt_flags & FH_RSCSR_RADA_EN)
259 stats->flag |= RX_FLAG_MIC_STRIPPED;
256 *crypt_len = IEEE80211_CCMP_HDR_LEN; 260 *crypt_len = IEEE80211_CCMP_HDR_LEN;
257 return 0; 261 return 0;
258 case IWL_RX_MPDU_STATUS_SEC_TKIP: 262 case IWL_RX_MPDU_STATUS_SEC_TKIP:
@@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
270 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == 274 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
271 IWL_RX_MPDU_STATUS_SEC_WEP) 275 IWL_RX_MPDU_STATUS_SEC_WEP)
272 *crypt_len = IEEE80211_WEP_IV_LEN; 276 *crypt_len = IEEE80211_WEP_IV_LEN;
277
278 if (pkt_flags & FH_RSCSR_RADA_EN)
279 stats->flag |= RX_FLAG_ICV_STRIPPED;
280
273 return 0; 281 return 0;
274 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: 282 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
275 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) 283 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
@@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
848 856
849 rx_status = IEEE80211_SKB_RXCB(skb); 857 rx_status = IEEE80211_SKB_RXCB(skb);
850 858
851 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { 859 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
860 le32_to_cpu(pkt->len_n_flags), queue,
861 &crypt_len)) {
852 kfree_skb(skb); 862 kfree_skb(skb);
853 return; 863 return;
854 } 864 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c19f98489d4e..1add5615fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1709 sta->sta_id = IWL_MVM_INVALID_STA; 1709 sta->sta_id = IWL_MVM_INVALID_STA;
1710} 1710}
1711 1711
1712static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) 1712static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1713 u8 sta_id, u8 fifo)
1713{ 1714{
1714 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1715 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1715 mvm->cfg->base_params->wd_timeout : 1716 mvm->cfg->base_params->wd_timeout :
1716 IWL_WATCHDOG_DISABLED; 1717 IWL_WATCHDOG_DISABLED;
1717 1718
1718 if (iwl_mvm_has_new_tx_api(mvm)) { 1719 if (iwl_mvm_has_new_tx_api(mvm)) {
1719 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, 1720 int tvqm_queue =
1720 mvm->aux_sta.sta_id, 1721 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1721 IWL_MAX_TID_COUNT, 1722 IWL_MAX_TID_COUNT,
1722 wdg_timeout); 1723 wdg_timeout);
1723 mvm->aux_queue = queue; 1724 *queue = tvqm_queue;
1724 } else { 1725 } else {
1725 struct iwl_trans_txq_scd_cfg cfg = { 1726 struct iwl_trans_txq_scd_cfg cfg = {
1726 .fifo = IWL_MVM_TX_FIFO_MCAST, 1727 .fifo = fifo,
1727 .sta_id = mvm->aux_sta.sta_id, 1728 .sta_id = sta_id,
1728 .tid = IWL_MAX_TID_COUNT, 1729 .tid = IWL_MAX_TID_COUNT,
1729 .aggregate = false, 1730 .aggregate = false,
1730 .frame_limit = IWL_FRAME_LIMIT, 1731 .frame_limit = IWL_FRAME_LIMIT,
1731 }; 1732 };
1732 1733
1733 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, 1734 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1734 wdg_timeout);
1735 } 1735 }
1736} 1736}
1737 1737
@@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1750 1750
1751 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 1751 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1752 if (!iwl_mvm_has_new_tx_api(mvm)) 1752 if (!iwl_mvm_has_new_tx_api(mvm))
1753 iwl_mvm_enable_aux_queue(mvm); 1753 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1754 mvm->aux_sta.sta_id,
1755 IWL_MVM_TX_FIFO_MCAST);
1754 1756
1755 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 1757 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1756 MAC_INDEX_AUX, 0); 1758 MAC_INDEX_AUX, 0);
@@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1764 * to firmware so enable queue here - after the station was added 1766 * to firmware so enable queue here - after the station was added
1765 */ 1767 */
1766 if (iwl_mvm_has_new_tx_api(mvm)) 1768 if (iwl_mvm_has_new_tx_api(mvm))
1767 iwl_mvm_enable_aux_queue(mvm); 1769 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1770 mvm->aux_sta.sta_id,
1771 IWL_MVM_TX_FIFO_MCAST);
1768 1772
1769 return 0; 1773 return 0;
1770} 1774}
@@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1772int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1776int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1773{ 1777{
1774 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1778 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1779 int ret;
1775 1780
1776 lockdep_assert_held(&mvm->mutex); 1781 lockdep_assert_held(&mvm->mutex);
1777 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 1782
1783 /* Map snif queue to fifo - must happen before adding snif station */
1784 if (!iwl_mvm_has_new_tx_api(mvm))
1785 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1786 mvm->snif_sta.sta_id,
1787 IWL_MVM_TX_FIFO_BE);
1788
1789 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1778 mvmvif->id, 0); 1790 mvmvif->id, 0);
1791 if (ret)
1792 return ret;
1793
1794 /*
1795 * For 22000 firmware and on we cannot add queue to a station unknown
1796 * to firmware so enable queue here - after the station was added
1797 */
1798 if (iwl_mvm_has_new_tx_api(mvm))
1799 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1800 mvm->snif_sta.sta_id,
1801 IWL_MVM_TX_FIFO_BE);
1802
1803 return 0;
1779} 1804}
1780 1805
1781int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1806int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1784 1809
1785 lockdep_assert_held(&mvm->mutex); 1810 lockdep_assert_held(&mvm->mutex);
1786 1811
1812 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1813 IWL_MAX_TID_COUNT, 0);
1787 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 1814 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1788 if (ret) 1815 if (ret)
1789 IWL_WARN(mvm, "Failed sending remove station\n"); 1816 IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 4d0314912e94..e25cda9fbf6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
132 * executed, and a new time event means a new command. 132 * executed, and a new time event means a new command.
133 */ 133 */
134 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); 134 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
135
136 /* Do the same for the P2P device queue (STA) */
137 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
138 struct iwl_mvm_vif *mvmvif;
139
140 /*
141 * NB: access to this pointer would be racy, but the flush bit
142 * can only be set when we had a P2P-Device VIF, and we have a
143 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
144 * not really racy.
145 */
146
147 if (!WARN_ON(!mvm->p2p_device_vif)) {
148 mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
149 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
150 CMD_ASYNC);
151 }
152 }
135} 153}
136 154
137static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) 155static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
855 873
856 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 874 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
857 875
858 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) 876 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
859 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 877 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
860 else 878 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
879 } else {
861 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); 880 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
881 }
862 882
863 iwl_mvm_roc_finished(mvm); 883 iwl_mvm_roc_finished(mvm);
864} 884}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 593b7f97b29c..333bcb75b8af 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
657 if (ap_sta_id != IWL_MVM_INVALID_STA) 657 if (ap_sta_id != IWL_MVM_INVALID_STA)
658 sta_id = ap_sta_id; 658 sta_id = ap_sta_id;
659 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { 659 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
660 queue = mvm->aux_queue; 660 queue = mvm->snif_queue;
661 sta_id = mvm->snif_sta.sta_id;
661 } 662 }
662 } 663 }
663 664
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d46115e2d69e..03ffd84786ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1134 unsigned int default_timeout = 1134 unsigned int default_timeout =
1135 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; 1135 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1136 1136
1137 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) 1137 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
1138 /*
1139 * We can't know when the station is asleep or awake, so we
1140 * must disable the queue hang detection.
1141 */
1142 if (fw_has_capa(&mvm->fw->ucode_capa,
1143 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
1144 vif && vif->type == NL80211_IFTYPE_AP)
1145 return IWL_WATCHDOG_DISABLED;
1138 return iwlmvm_mod_params.tfd_q_hang_detect ? 1146 return iwlmvm_mod_params.tfd_q_hang_detect ?
1139 default_timeout : IWL_WATCHDOG_DISABLED; 1147 default_timeout : IWL_WATCHDOG_DISABLED;
1148 }
1140 1149
1141 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); 1150 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1142 txq_timer = (void *)trigger->data; 1151 txq_timer = (void *)trigger->data;
@@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1163 return le32_to_cpu(txq_timer->p2p_go); 1172 return le32_to_cpu(txq_timer->p2p_go);
1164 case NL80211_IFTYPE_P2P_DEVICE: 1173 case NL80211_IFTYPE_P2P_DEVICE:
1165 return le32_to_cpu(txq_timer->p2p_device); 1174 return le32_to_cpu(txq_timer->p2p_device);
1175 case NL80211_IFTYPE_MONITOR:
1176 return default_timeout;
1166 default: 1177 default:
1167 WARN_ON(1); 1178 WARN_ON(1);
1168 return mvm->cfg->base_params->wd_timeout; 1179 return mvm->cfg->base_params->wd_timeout;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f21fe59faccf..ccd7c33c4c28 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -553,6 +553,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
553 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, 553 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
554 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, 554 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
555 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, 555 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
556 {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
556 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, 557 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
557 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, 558 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
558 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, 559 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
@@ -664,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
664 {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, 665 {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
665 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, 666 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
666 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, 667 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
668 {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
667 669
668#endif /* CONFIG_IWLMVM */ 670#endif /* CONFIG_IWLMVM */
669 671
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..403e65c309d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
670 return index & (q->n_window - 1); 670 return index & (q->n_window - 1);
671} 671}
672 672
673static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 673static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
674 struct iwl_txq *txq, int idx) 674 struct iwl_txq *txq, int idx)
675{ 675{
676 return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 idx); 677
678 if (trans->cfg->use_tfh)
679 idx = iwl_pcie_get_cmd_index(txq, idx);
680
681 return txq->tfds + trans_pcie->tfd_size * idx;
678} 682}
679 683
680static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 684static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c59f4581e972..ac05fd1e74c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -49,6 +49,7 @@
49 * 49 *
50 *****************************************************************************/ 50 *****************************************************************************/
51#include "iwl-trans.h" 51#include "iwl-trans.h"
52#include "iwl-prph.h"
52#include "iwl-context-info.h" 53#include "iwl-context-info.h"
53#include "internal.h" 54#include "internal.h"
54 55
@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
156 157
157 trans_pcie->is_down = true; 158 trans_pcie->is_down = true;
158 159
160 /* Stop dbgc before stopping device */
161 iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
162 udelay(100);
163 iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
164
159 /* tell the device to stop sending interrupts */ 165 /* tell the device to stop sending interrupts */
160 iwl_disable_interrupts(trans); 166 iwl_disable_interrupts(trans);
161 167
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b7a51603465b..4541c86881d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
166 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 166 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
167 4, buf, i, 0); 167 4, buf, i, 0);
168 } 168 }
169 goto out;
169 170
170err_read: 171err_read:
171 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 172 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1226 1227
1227 trans_pcie->is_down = true; 1228 trans_pcie->is_down = true;
1228 1229
1230 /* Stop dbgc before stopping device */
1231 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1232 iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
1233 } else {
1234 iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
1235 udelay(100);
1236 iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
1237 }
1238
1229 /* tell the device to stop sending interrupts */ 1239 /* tell the device to stop sending interrupts */
1230 iwl_disable_interrupts(trans); 1240 iwl_disable_interrupts(trans);
1231 1241
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
171 171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{ 173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
177 * idx is bounded by n_window 175 * idx is bounded by n_window
178 */ 176 */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
181 lockdep_assert_held(&txq->lock); 179 lockdep_assert_held(&txq->lock);
182 180
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx)); 182 iwl_pcie_get_tfd(trans, txq, idx));
185 183
186 /* free SKB */ 184 /* free SKB */
187 if (txq->entries) { 185 if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
364 struct sk_buff *skb, 362 struct sk_buff *skb,
365 struct iwl_cmd_meta *out_meta) 363 struct iwl_cmd_meta *out_meta)
366{ 364{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
369 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
370 struct iwl_tfh_tfd *tfd = 367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
371 iwl_pcie_get_tfd(trans_pcie, txq, idx);
372 dma_addr_t tb_phys; 368 dma_addr_t tb_phys;
373 bool amsdu; 369 bool amsdu;
374 int i, len, tb1_len, tb2_len, hdr_len; 370 int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
565 u8 group_id = iwl_cmd_groupid(cmd->id); 561 u8 group_id = iwl_cmd_groupid(cmd->id);
566 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 562 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
567 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 563 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
568 struct iwl_tfh_tfd *tfd = 564 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
569 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
570 565
571 memset(tfd, 0, sizeof(*tfd)); 566 memset(tfd, 0, sizeof(*tfd));
572 567
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373{ 373{
374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
375 int i, num_tbs; 375 int i, num_tbs;
376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 376 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
377 377
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2018 } 2018 }
2019 2019
2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2020 trace_iwlwifi_dev_tx(trans->dev, skb,
2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2021 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2022 trans_pcie->tfd_size, 2022 trans_pcie->tfd_size,
2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2024 hdr_len); 2024 hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2092 IEEE80211_CCMP_HDR_LEN : 0; 2092 IEEE80211_CCMP_HDR_LEN : 0;
2093 2093
2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2094 trace_iwlwifi_dev_tx(trans->dev, skb,
2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2095 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2096 trans_pcie->tfd_size, 2096 trans_pcie->tfd_size,
2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2098 2098
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2426 IWL_FIRST_TB_SIZE); 2426 IWL_FIRST_TB_SIZE);
2427 2427
2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2428 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2429 /* Set up entry for this TFD in Tx byte-count array */ 2429 /* Set up entry for this TFD in Tx byte-count array */
2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 10b075a46b26..f6d4a50f1bdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
489 489
490static spinlock_t hwsim_radio_lock; 490static spinlock_t hwsim_radio_lock;
491static LIST_HEAD(hwsim_radios); 491static LIST_HEAD(hwsim_radios);
492static struct workqueue_struct *hwsim_wq;
492static int hwsim_radio_idx; 493static int hwsim_radio_idx;
493 494
494static struct platform_driver mac80211_hwsim_driver = { 495static struct platform_driver mac80211_hwsim_driver = {
@@ -684,6 +685,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
684 hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); 685 hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
685 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 686 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
686 IEEE80211_STYPE_NULLFUNC | 687 IEEE80211_STYPE_NULLFUNC |
688 IEEE80211_FCTL_TODS |
687 (ps ? IEEE80211_FCTL_PM : 0)); 689 (ps ? IEEE80211_FCTL_PM : 0));
688 hdr->duration_id = cpu_to_le16(0); 690 hdr->duration_id = cpu_to_le16(0);
689 memcpy(hdr->addr1, vp->bssid, ETH_ALEN); 691 memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3119,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3119 if (info->attrs[HWSIM_ATTR_CHANNELS]) 3121 if (info->attrs[HWSIM_ATTR_CHANNELS])
3120 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 3122 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
3121 3123
3124 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
3125 GENL_SET_ERR_MSG(info, "too many channels specified");
3126 return -EINVAL;
3127 }
3128
3122 if (info->attrs[HWSIM_ATTR_NO_VIF]) 3129 if (info->attrs[HWSIM_ATTR_NO_VIF])
3123 param.no_vif = true; 3130 param.no_vif = true;
3124 3131
@@ -3215,7 +3222,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3215 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) 3222 if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
3216 continue; 3223 continue;
3217 3224
3218 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3225 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3219 if (!skb) { 3226 if (!skb) {
3220 res = -ENOMEM; 3227 res = -ENOMEM;
3221 goto out_err; 3228 goto out_err;
@@ -3341,7 +3348,7 @@ static void remove_user_radios(u32 portid)
3341 if (entry->destroy_on_close && entry->portid == portid) { 3348 if (entry->destroy_on_close && entry->portid == portid) {
3342 list_del(&entry->list); 3349 list_del(&entry->list);
3343 INIT_WORK(&entry->destroy_work, destroy_radio); 3350 INIT_WORK(&entry->destroy_work, destroy_radio);
3344 schedule_work(&entry->destroy_work); 3351 queue_work(hwsim_wq, &entry->destroy_work);
3345 } 3352 }
3346 } 3353 }
3347 spin_unlock_bh(&hwsim_radio_lock); 3354 spin_unlock_bh(&hwsim_radio_lock);
@@ -3416,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
3416 3423
3417 list_del(&data->list); 3424 list_del(&data->list);
3418 INIT_WORK(&data->destroy_work, destroy_radio); 3425 INIT_WORK(&data->destroy_work, destroy_radio);
3419 schedule_work(&data->destroy_work); 3426 queue_work(hwsim_wq, &data->destroy_work);
3420 } 3427 }
3421 spin_unlock_bh(&hwsim_radio_lock); 3428 spin_unlock_bh(&hwsim_radio_lock);
3422} 3429}
@@ -3448,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
3448 3455
3449 spin_lock_init(&hwsim_radio_lock); 3456 spin_lock_init(&hwsim_radio_lock);
3450 3457
3458 hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
3459 if (!hwsim_wq)
3460 return -ENOMEM;
3461
3451 err = register_pernet_device(&hwsim_net_ops); 3462 err = register_pernet_device(&hwsim_net_ops);
3452 if (err) 3463 if (err)
3453 return err; 3464 return err;
@@ -3586,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
3586 hwsim_exit_netlink(); 3597 hwsim_exit_netlink();
3587 3598
3588 mac80211_hwsim_free(); 3599 mac80211_hwsim_free();
3600 flush_workqueue(hwsim_wq);
3601
3589 unregister_netdev(hwsim_mon); 3602 unregister_netdev(hwsim_mon);
3590 platform_driver_unregister(&mac80211_hwsim_driver); 3603 platform_driver_unregister(&mac80211_hwsim_driver);
3591 unregister_pernet_device(&hwsim_net_ops); 3604 unregister_pernet_device(&hwsim_net_ops);
3605 destroy_workqueue(hwsim_wq);
3592} 3606}
3593module_exit(exit_mac80211_hwsim); 3607module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d6dff347f896..78ebe494fef0 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
186 /* Obtain the queue to be used to transmit this packet */ 186 /* Obtain the queue to be used to transmit this packet */
187 index = skb_get_queue_mapping(skb); 187 index = skb_get_queue_mapping(skb);
188 if (index >= num_queues) { 188 if (index >= num_queues) {
189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", 189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
190 index, vif->dev->name); 190 index, vif->dev->name);
191 index %= num_queues; 191 index %= num_queues;
192 } 192 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1326 1326
1327 netif_carrier_off(netdev); 1327 netif_carrier_off(netdev);
1328 1328
1329 xenbus_switch_state(dev, XenbusStateInitialising);
1329 return netdev; 1330 return netdev;
1330 1331
1331 exit: 1332 exit:
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e949e3302af4..c586bcdb5190 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
211 return ret; 211 return ret;
212} 212}
213 213
214static int btt_log_read_pair(struct arena_info *arena, u32 lane, 214static int btt_log_group_read(struct arena_info *arena, u32 lane,
215 struct log_entry *ent) 215 struct log_group *log)
216{ 216{
217 return arena_read_bytes(arena, 217 return arena_read_bytes(arena,
218 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, 218 arena->logoff + (lane * LOG_GRP_SIZE), log,
219 2 * LOG_ENT_SIZE, 0); 219 LOG_GRP_SIZE, 0);
220} 220}
221 221
222static struct dentry *debugfs_root; 222static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); 256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); 257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags); 258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
259 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
260 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
259} 261}
260 262
261static void btt_debugfs_init(struct btt *btt) 263static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
274 } 276 }
275} 277}
276 278
279static u32 log_seq(struct log_group *log, int log_idx)
280{
281 return le32_to_cpu(log->ent[log_idx].seq);
282}
283
277/* 284/*
278 * This function accepts two log entries, and uses the 285 * This function accepts two log entries, and uses the
279 * sequence number to find the 'older' entry. 286 * sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
283 * 290 *
284 * TODO The logic feels a bit kludge-y. make it better.. 291 * TODO The logic feels a bit kludge-y. make it better..
285 */ 292 */
286static int btt_log_get_old(struct log_entry *ent) 293static int btt_log_get_old(struct arena_info *a, struct log_group *log)
287{ 294{
295 int idx0 = a->log_index[0];
296 int idx1 = a->log_index[1];
288 int old; 297 int old;
289 298
290 /* 299 /*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
292 * the next time, the following logic works out to put this 301 * the next time, the following logic works out to put this
293 * (next) entry into [1] 302 * (next) entry into [1]
294 */ 303 */
295 if (ent[0].seq == 0) { 304 if (log_seq(log, idx0) == 0) {
296 ent[0].seq = cpu_to_le32(1); 305 log->ent[idx0].seq = cpu_to_le32(1);
297 return 0; 306 return 0;
298 } 307 }
299 308
300 if (ent[0].seq == ent[1].seq) 309 if (log_seq(log, idx0) == log_seq(log, idx1))
301 return -EINVAL; 310 return -EINVAL;
302 if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) 311 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
303 return -EINVAL; 312 return -EINVAL;
304 313
305 if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { 314 if (log_seq(log, idx0) < log_seq(log, idx1)) {
306 if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) 315 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
307 old = 0; 316 old = 0;
308 else 317 else
309 old = 1; 318 old = 1;
310 } else { 319 } else {
311 if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) 320 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
312 old = 1; 321 old = 1;
313 else 322 else
314 old = 0; 323 old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
328{ 337{
329 int ret; 338 int ret;
330 int old_ent, ret_ent; 339 int old_ent, ret_ent;
331 struct log_entry log[2]; 340 struct log_group log;
332 341
333 ret = btt_log_read_pair(arena, lane, log); 342 ret = btt_log_group_read(arena, lane, &log);
334 if (ret) 343 if (ret)
335 return -EIO; 344 return -EIO;
336 345
337 old_ent = btt_log_get_old(log); 346 old_ent = btt_log_get_old(arena, &log);
338 if (old_ent < 0 || old_ent > 1) { 347 if (old_ent < 0 || old_ent > 1) {
339 dev_err(to_dev(arena), 348 dev_err(to_dev(arena),
340 "log corruption (%d): lane %d seq [%d, %d]\n", 349 "log corruption (%d): lane %d seq [%d, %d]\n",
341 old_ent, lane, log[0].seq, log[1].seq); 350 old_ent, lane, log.ent[arena->log_index[0]].seq,
351 log.ent[arena->log_index[1]].seq);
342 /* TODO set error state? */ 352 /* TODO set error state? */
343 return -EIO; 353 return -EIO;
344 } 354 }
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
346 ret_ent = (old_flag ? old_ent : (1 - old_ent)); 356 ret_ent = (old_flag ? old_ent : (1 - old_ent));
347 357
348 if (ent != NULL) 358 if (ent != NULL)
349 memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); 359 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
350 360
351 return ret_ent; 361 return ret_ent;
352} 362}
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
360 u32 sub, struct log_entry *ent, unsigned long flags) 370 u32 sub, struct log_entry *ent, unsigned long flags)
361{ 371{
362 int ret; 372 int ret;
363 /* 373 u32 group_slot = arena->log_index[sub];
364 * Ignore the padding in log_entry for calculating log_half. 374 unsigned int log_half = LOG_ENT_SIZE / 2;
365 * The entry is 'committed' when we write the sequence number,
366 * and we want to ensure that that is the last thing written.
367 * We don't bother writing the padding as that would be extra
368 * media wear and write amplification
369 */
370 unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
371 u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
372 void *src = ent; 375 void *src = ent;
376 u64 ns_off;
373 377
378 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
379 (group_slot * LOG_ENT_SIZE);
374 /* split the 16B write into atomic, durable halves */ 380 /* split the 16B write into atomic, durable halves */
375 ret = arena_write_bytes(arena, ns_off, src, log_half, flags); 381 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
376 if (ret) 382 if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
453{ 459{
454 size_t logsize = arena->info2off - arena->logoff; 460 size_t logsize = arena->info2off - arena->logoff;
455 size_t chunk_size = SZ_4K, offset = 0; 461 size_t chunk_size = SZ_4K, offset = 0;
456 struct log_entry log; 462 struct log_entry ent;
457 void *zerobuf; 463 void *zerobuf;
458 int ret; 464 int ret;
459 u32 i; 465 u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
485 } 491 }
486 492
487 for (i = 0; i < arena->nfree; i++) { 493 for (i = 0; i < arena->nfree; i++) {
488 log.lba = cpu_to_le32(i); 494 ent.lba = cpu_to_le32(i);
489 log.old_map = cpu_to_le32(arena->external_nlba + i); 495 ent.old_map = cpu_to_le32(arena->external_nlba + i);
490 log.new_map = cpu_to_le32(arena->external_nlba + i); 496 ent.new_map = cpu_to_le32(arena->external_nlba + i);
491 log.seq = cpu_to_le32(LOG_SEQ_INIT); 497 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
492 ret = __btt_log_write(arena, i, 0, &log, 0); 498 ret = __btt_log_write(arena, i, 0, &ent, 0);
493 if (ret) 499 if (ret)
494 goto free; 500 goto free;
495 } 501 }
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
594 return 0; 600 return 0;
595} 601}
596 602
603static bool ent_is_padding(struct log_entry *ent)
604{
605 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
606 && (ent->seq == 0);
607}
608
609/*
610 * Detecting valid log indices: We read a log group (see the comments in btt.h
611 * for a description of a 'log_group' and its 'slots'), and iterate over its
612 * four slots. We expect that a padding slot will be all-zeroes, and use this
613 * to detect a padding slot vs. an actual entry.
614 *
615 * If a log_group is in the initial state, i.e. hasn't been used since the
616 * creation of this BTT layout, it will have three of the four slots with
617 * zeroes. We skip over these log_groups for the detection of log_index. If
618 * all log_groups are in the initial state (i.e. the BTT has never been
619 * written to), it is safe to assume the 'new format' of log entries in slots
620 * (0, 1).
621 */
622static int log_set_indices(struct arena_info *arena)
623{
624 bool idx_set = false, initial_state = true;
625 int ret, log_index[2] = {-1, -1};
626 u32 i, j, next_idx = 0;
627 struct log_group log;
628 u32 pad_count = 0;
629
630 for (i = 0; i < arena->nfree; i++) {
631 ret = btt_log_group_read(arena, i, &log);
632 if (ret < 0)
633 return ret;
634
635 for (j = 0; j < 4; j++) {
636 if (!idx_set) {
637 if (ent_is_padding(&log.ent[j])) {
638 pad_count++;
639 continue;
640 } else {
641 /* Skip if index has been recorded */
642 if ((next_idx == 1) &&
643 (j == log_index[0]))
644 continue;
645 /* valid entry, record index */
646 log_index[next_idx] = j;
647 next_idx++;
648 }
649 if (next_idx == 2) {
650 /* two valid entries found */
651 idx_set = true;
652 } else if (next_idx > 2) {
653 /* too many valid indices */
654 return -ENXIO;
655 }
656 } else {
657 /*
658 * once the indices have been set, just verify
659 * that all subsequent log groups are either in
660 * their initial state or follow the same
661 * indices.
662 */
663 if (j == log_index[0]) {
664 /* entry must be 'valid' */
665 if (ent_is_padding(&log.ent[j]))
666 return -ENXIO;
667 } else if (j == log_index[1]) {
668 ;
669 /*
670 * log_index[1] can be padding if the
671 * lane never got used and it is still
672 * in the initial state (three 'padding'
673 * entries)
674 */
675 } else {
676 /* entry must be invalid (padding) */
677 if (!ent_is_padding(&log.ent[j]))
678 return -ENXIO;
679 }
680 }
681 }
682 /*
683 * If any of the log_groups have more than one valid,
684 * non-padding entry, then the we are no longer in the
685 * initial_state
686 */
687 if (pad_count < 3)
688 initial_state = false;
689 pad_count = 0;
690 }
691
692 if (!initial_state && !idx_set)
693 return -ENXIO;
694
695 /*
696 * If all the entries in the log were in the initial state,
697 * assume new padding scheme
698 */
699 if (initial_state)
700 log_index[1] = 1;
701
702 /*
703 * Only allow the known permutations of log/padding indices,
704 * i.e. (0, 1), and (0, 2)
705 */
706 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
707 ; /* known index possibilities */
708 else {
709 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
710 return -ENXIO;
711 }
712
713 arena->log_index[0] = log_index[0];
714 arena->log_index[1] = log_index[1];
715 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
716 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
717 return 0;
718}
719
597static int btt_rtt_init(struct arena_info *arena) 720static int btt_rtt_init(struct arena_info *arena)
598{ 721{
599 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); 722 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
650 available -= 2 * BTT_PG_SIZE; 773 available -= 2 * BTT_PG_SIZE;
651 774
652 /* The log takes a fixed amount of space based on nfree */ 775 /* The log takes a fixed amount of space based on nfree */
653 logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), 776 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
654 BTT_PG_SIZE);
655 available -= logsize; 777 available -= logsize;
656 778
657 /* Calculate optimal split between map and data area */ 779 /* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
668 arena->mapoff = arena->dataoff + datasize; 790 arena->mapoff = arena->dataoff + datasize;
669 arena->logoff = arena->mapoff + mapsize; 791 arena->logoff = arena->mapoff + mapsize;
670 arena->info2off = arena->logoff + logsize; 792 arena->info2off = arena->logoff + logsize;
793
794 /* Default log indices are (0,1) */
795 arena->log_index[0] = 0;
796 arena->log_index[1] = 1;
671 return arena; 797 return arena;
672} 798}
673 799
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
758 arena->external_lba_start = cur_nlba; 884 arena->external_lba_start = cur_nlba;
759 parse_arena_meta(arena, super, cur_off); 885 parse_arena_meta(arena, super, cur_off);
760 886
887 ret = log_set_indices(arena);
888 if (ret) {
889 dev_err(to_dev(arena),
890 "Unable to deduce log/padding indices\n");
891 goto out;
892 }
893
761 mutex_init(&arena->err_lock); 894 mutex_init(&arena->err_lock);
762 ret = btt_freelist_init(arena); 895 ret = btt_freelist_init(arena);
763 if (ret) 896 if (ret)
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 578c2057524d..db3cb6d4d0d4 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -27,6 +27,7 @@
27#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT) 27#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
28#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT))) 28#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
29#define MAP_ENT_NORMAL 0xC0000000 29#define MAP_ENT_NORMAL 0xC0000000
30#define LOG_GRP_SIZE sizeof(struct log_group)
30#define LOG_ENT_SIZE sizeof(struct log_entry) 31#define LOG_ENT_SIZE sizeof(struct log_entry)
31#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */ 32#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
32#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ 33#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
50 INIT_READY 51 INIT_READY
51}; 52};
52 53
54/*
55 * A log group represents one log 'lane', and consists of four log entries.
56 * Two of the four entries are valid entries, and the remaining two are
57 * padding. Due to an old bug in the padding location, we need to perform a
58 * test to determine the padding scheme being used, and use that scheme
59 * thereafter.
60 *
61 * In kernels prior to 4.15, 'log group' would have actual log entries at
62 * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
63 * format has log entries at indices (0, 1) and padding at indices (2, 3).
64 *
65 * Old (pre 4.15) format:
66 * +-----------------+-----------------+
67 * | ent[0] | ent[1] |
68 * | 16B | 16B |
69 * | lba/old/new/seq | pad |
70 * +-----------------------------------+
71 * | ent[2] | ent[3] |
72 * | 16B | 16B |
73 * | lba/old/new/seq | pad |
74 * +-----------------+-----------------+
75 *
76 * New format:
77 * +-----------------+-----------------+
78 * | ent[0] | ent[1] |
79 * | 16B | 16B |
80 * | lba/old/new/seq | lba/old/new/seq |
81 * +-----------------------------------+
82 * | ent[2] | ent[3] |
83 * | 16B | 16B |
84 * | pad | pad |
85 * +-----------------+-----------------+
86 *
87 * We detect during start-up which format is in use, and set
88 * arena->log_index[(0, 1)] with the detected format.
89 */
90
53struct log_entry { 91struct log_entry {
54 __le32 lba; 92 __le32 lba;
55 __le32 old_map; 93 __le32 old_map;
56 __le32 new_map; 94 __le32 new_map;
57 __le32 seq; 95 __le32 seq;
58 __le64 padding[2]; 96};
97
98struct log_group {
99 struct log_entry ent[4];
59}; 100};
60 101
61struct btt_sb { 102struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
125 * @list: List head for list of arenas 166 * @list: List head for list of arenas
126 * @debugfs_dir: Debugfs dentry 167 * @debugfs_dir: Debugfs dentry
127 * @flags: Arena flags - may signify error states. 168 * @flags: Arena flags - may signify error states.
169 * @err_lock: Mutex for synchronizing error clearing.
170 * @log_index: Indices of the valid log entries in a log_group
128 * 171 *
129 * arena_info is a per-arena handle. Once an arena is narrowed down for an 172 * arena_info is a per-arena handle. Once an arena is narrowed down for an
130 * IO, this struct is passed around for the duration of the IO. 173 * IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
157 /* Arena flags */ 200 /* Arena flags */
158 u32 flags; 201 u32 flags;
159 struct mutex err_lock; 202 struct mutex err_lock;
203 int log_index[2];
160}; 204};
161 205
162/** 206/**
@@ -176,6 +220,7 @@ struct arena_info {
176 * @init_lock: Mutex used for the BTT initialization 220 * @init_lock: Mutex used for the BTT initialization
177 * @init_state: Flag describing the initialization state for the BTT 221 * @init_state: Flag describing the initialization state for the BTT
178 * @num_arenas: Number of arenas in the BTT instance 222 * @num_arenas: Number of arenas in the BTT instance
223 * @phys_bb: Pointer to the namespace's badblocks structure
179 */ 224 */
180struct btt { 225struct btt {
181 struct gendisk *btt_disk; 226 struct gendisk *btt_disk;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 65cc171c721d..2adada1a5855 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
364int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 364int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
365{ 365{
366 u64 checksum, offset; 366 u64 checksum, offset;
367 unsigned long align;
368 enum nd_pfn_mode mode; 367 enum nd_pfn_mode mode;
369 struct nd_namespace_io *nsio; 368 struct nd_namespace_io *nsio;
369 unsigned long align, start_pad;
370 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 370 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
371 struct nd_namespace_common *ndns = nd_pfn->ndns; 371 struct nd_namespace_common *ndns = nd_pfn->ndns;
372 const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); 372 const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
410 410
411 align = le32_to_cpu(pfn_sb->align); 411 align = le32_to_cpu(pfn_sb->align);
412 offset = le64_to_cpu(pfn_sb->dataoff); 412 offset = le64_to_cpu(pfn_sb->dataoff);
413 start_pad = le32_to_cpu(pfn_sb->start_pad);
413 if (align == 0) 414 if (align == 0)
414 align = 1UL << ilog2(offset); 415 align = 1UL << ilog2(offset);
415 mode = le32_to_cpu(pfn_sb->mode); 416 mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
468 return -EBUSY; 469 return -EBUSY;
469 } 470 }
470 471
471 if ((align && !IS_ALIGNED(offset, align)) 472 if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
472 || !IS_ALIGNED(offset, PAGE_SIZE)) { 473 || !IS_ALIGNED(offset, PAGE_SIZE)) {
473 dev_err(&nd_pfn->dev, 474 dev_err(&nd_pfn->dev,
474 "bad offset: %#llx dax disabled align: %#lx\n", 475 "bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
582 return altmap; 583 return altmap;
583} 584}
584 585
586static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
587{
588 return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
589 ALIGN_DOWN(phys, nd_pfn->align));
590}
591
585static int nd_pfn_init(struct nd_pfn *nd_pfn) 592static int nd_pfn_init(struct nd_pfn *nd_pfn)
586{ 593{
587 u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; 594 u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
637 start = nsio->res.start; 644 start = nsio->res.start;
638 size = PHYS_SECTION_ALIGN_UP(start + size) - start; 645 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
639 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, 646 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
640 IORES_DESC_NONE) == REGION_MIXED) { 647 IORES_DESC_NONE) == REGION_MIXED
648 || !IS_ALIGNED(start + resource_size(&nsio->res),
649 nd_pfn->align)) {
641 size = resource_size(&nsio->res); 650 size = resource_size(&nsio->res);
642 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); 651 end_trunc = start + size - phys_pmem_align_down(nd_pfn,
652 start + size);
643 } 653 }
644 654
645 if (start_pad + end_trunc) 655 if (start_pad + end_trunc)
646 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", 656 dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
647 dev_name(&ndns->dev), start_pad + end_trunc); 657 dev_name(&ndns->dev), start_pad + end_trunc);
648 658
649 /* 659 /*
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f837d666cbd4..839650e0926a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
1287 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1287 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1288 NVME_DSM_MAX_RANGES); 1288 NVME_DSM_MAX_RANGES);
1289 1289
1290 queue->limits.discard_alignment = size; 1290 queue->limits.discard_alignment = 0;
1291 queue->limits.discard_granularity = size; 1291 queue->limits.discard_granularity = size;
1292 1292
1293 blk_queue_max_discard_sectors(queue, UINT_MAX); 1293 blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
1335 struct nvme_ns *ns, struct nvme_id_ns *id) 1335 struct nvme_ns *ns, struct nvme_id_ns *id)
1336{ 1336{
1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
1338 unsigned short bs = 1 << ns->lba_shift;
1338 unsigned stream_alignment = 0; 1339 unsigned stream_alignment = 0;
1339 1340
1340 if (ns->ctrl->nr_streams && ns->sws && ns->sgs) 1341 if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
1343 blk_mq_freeze_queue(disk->queue); 1344 blk_mq_freeze_queue(disk->queue);
1344 blk_integrity_unregister(disk); 1345 blk_integrity_unregister(disk);
1345 1346
1346 blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift); 1347 blk_queue_logical_block_size(disk->queue, bs);
1348 blk_queue_physical_block_size(disk->queue, bs);
1349 blk_queue_io_min(disk->queue, bs);
1350
1347 if (ns->ms && !ns->ext && 1351 if (ns->ms && !ns->ext &&
1348 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1352 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1349 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1353 nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -1705,7 +1709,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1705 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1709 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1706 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1710 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1707 } 1711 }
1708 if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) 1712 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1713 is_power_of_2(ctrl->max_hw_sectors))
1709 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1714 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1710 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1715 blk_queue_virt_boundary(q, ctrl->page_size - 1);
1711 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1716 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2874,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2869 2874
2870 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2875 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
2871 nvme_set_queue_limits(ctrl, ns->queue); 2876 nvme_set_queue_limits(ctrl, ns->queue);
2872 nvme_setup_streams_ns(ctrl, ns);
2873 2877
2874 id = nvme_identify_ns(ctrl, nsid); 2878 id = nvme_identify_ns(ctrl, nsid);
2875 if (!id) 2879 if (!id)
@@ -2880,6 +2884,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2880 2884
2881 if (nvme_init_ns_head(ns, nsid, id, &new)) 2885 if (nvme_init_ns_head(ns, nsid, id, &new))
2882 goto out_free_id; 2886 goto out_free_id;
2887 nvme_setup_streams_ns(ctrl, ns);
2883 2888
2884#ifdef CONFIG_NVME_MULTIPATH 2889#ifdef CONFIG_NVME_MULTIPATH
2885 /* 2890 /*
@@ -2965,8 +2970,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2965 return; 2970 return;
2966 2971
2967 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 2972 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
2968 if (blk_get_integrity(ns->disk))
2969 blk_integrity_unregister(ns->disk);
2970 nvme_mpath_remove_disk_links(ns); 2973 nvme_mpath_remove_disk_links(ns);
2971 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 2974 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
2972 &nvme_ns_id_attr_group); 2975 &nvme_ns_id_attr_group);
@@ -2974,6 +2977,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2974 nvme_nvm_unregister_sysfs(ns); 2977 nvme_nvm_unregister_sysfs(ns);
2975 del_gendisk(ns->disk); 2978 del_gendisk(ns->disk);
2976 blk_cleanup_queue(ns->queue); 2979 blk_cleanup_queue(ns->queue);
2980 if (blk_get_integrity(ns->disk))
2981 blk_integrity_unregister(ns->disk);
2977 } 2982 }
2978 2983
2979 mutex_lock(&ns->ctrl->subsys->lock); 2984 mutex_lock(&ns->ctrl->subsys->lock);
@@ -2986,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2986 mutex_unlock(&ns->ctrl->namespaces_mutex); 2991 mutex_unlock(&ns->ctrl->namespaces_mutex);
2987 2992
2988 synchronize_srcu(&ns->head->srcu); 2993 synchronize_srcu(&ns->head->srcu);
2994 nvme_mpath_check_last_path(ns);
2989 nvme_put_ns(ns); 2995 nvme_put_ns(ns);
2990} 2996}
2991 2997
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6816a0..894c2ccb3891 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
74 return NULL; 74 return NULL;
75 75
76 kref_init(&host->ref); 76 kref_init(&host->ref);
77 uuid_gen(&host->id);
77 snprintf(host->nqn, NVMF_NQN_SIZE, 78 snprintf(host->nqn, NVMF_NQN_SIZE,
78 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); 79 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
79 80
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0a8af4daef89..794e66e4aa20 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3221,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3221 3221
3222 /* initiate nvme ctrl ref counting teardown */ 3222 /* initiate nvme ctrl ref counting teardown */
3223 nvme_uninit_ctrl(&ctrl->ctrl); 3223 nvme_uninit_ctrl(&ctrl->ctrl);
3224 nvme_put_ctrl(&ctrl->ctrl);
3225 3224
3226 /* Remove core ctrl ref. */ 3225 /* Remove core ctrl ref. */
3227 nvme_put_ctrl(&ctrl->ctrl); 3226 nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..a00eabd06427 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
417 rcu_assign_pointer(head->current_path, NULL); 417 rcu_assign_pointer(head->current_path, NULL);
418} 418}
419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
420
421static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
422{
423 struct nvme_ns_head *head = ns->head;
424
425 if (head->disk && list_empty(&head->list))
426 kblockd_schedule_work(&head->requeue_work);
427}
428
420#else 429#else
421static inline void nvme_failover_req(struct request *req) 430static inline void nvme_failover_req(struct request *req)
422{ 431{
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
448static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 457static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
449{ 458{
450} 459}
460static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
461{
462}
451#endif /* CONFIG_NVME_MULTIPATH */ 463#endif /* CONFIG_NVME_MULTIPATH */
452 464
453#ifdef CONFIG_NVM 465#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c9082..4276ebfff22b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
449} 449}
450 450
451static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
452{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454 int nseg = blk_rq_nr_phys_segments(req);
455 unsigned int avg_seg_size;
456
457 if (nseg == 0)
458 return false;
459
460 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
461
462 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
463 return false;
464 if (!iod->nvmeq->qid)
465 return false;
466 if (!sgl_threshold || avg_seg_size < sgl_threshold)
467 return false;
468 return true;
469}
470
451static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 471static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
452{ 472{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 473 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
454 int nseg = blk_rq_nr_phys_segments(rq); 474 int nseg = blk_rq_nr_phys_segments(rq);
455 unsigned int size = blk_rq_payload_bytes(rq); 475 unsigned int size = blk_rq_payload_bytes(rq);
456 476
477 iod->use_sgl = nvme_pci_use_sgls(dev, rq);
478
457 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 479 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
458 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, 480 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
459 iod->use_sgl); 481 iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
604 dma_addr_t prp_dma; 626 dma_addr_t prp_dma;
605 int nprps, i; 627 int nprps, i;
606 628
607 iod->use_sgl = false;
608
609 length -= (page_size - offset); 629 length -= (page_size - offset);
610 if (length <= 0) { 630 if (length <= 0) {
611 iod->first_dma = 0; 631 iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
705} 725}
706 726
707static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 727static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
708 struct request *req, struct nvme_rw_command *cmd) 728 struct request *req, struct nvme_rw_command *cmd, int entries)
709{ 729{
710 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 730 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
711 int length = blk_rq_payload_bytes(req);
712 struct dma_pool *pool; 731 struct dma_pool *pool;
713 struct nvme_sgl_desc *sg_list; 732 struct nvme_sgl_desc *sg_list;
714 struct scatterlist *sg = iod->sg; 733 struct scatterlist *sg = iod->sg;
715 int entries = iod->nents, i = 0;
716 dma_addr_t sgl_dma; 734 dma_addr_t sgl_dma;
717 735 int i = 0;
718 iod->use_sgl = true;
719 736
720 /* setting the transfer type as SGL */ 737 /* setting the transfer type as SGL */
721 cmd->flags = NVME_CMD_SGL_METABUF; 738 cmd->flags = NVME_CMD_SGL_METABUF;
722 739
723 if (length == sg_dma_len(sg)) { 740 if (entries == 1) {
724 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 741 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
725 return BLK_STS_OK; 742 return BLK_STS_OK;
726 } 743 }
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
760 } 777 }
761 778
762 nvme_pci_sgl_set_data(&sg_list[i++], sg); 779 nvme_pci_sgl_set_data(&sg_list[i++], sg);
763
764 length -= sg_dma_len(sg);
765 sg = sg_next(sg); 780 sg = sg_next(sg);
766 entries--; 781 } while (--entries > 0);
767 } while (length > 0);
768 782
769 WARN_ON(entries > 0);
770 return BLK_STS_OK; 783 return BLK_STS_OK;
771} 784}
772 785
773static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
774{
775 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
776 unsigned int avg_seg_size;
777
778 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
779 blk_rq_nr_phys_segments(req));
780
781 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
782 return false;
783 if (!iod->nvmeq->qid)
784 return false;
785 if (!sgl_threshold || avg_seg_size < sgl_threshold)
786 return false;
787 return true;
788}
789
790static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 786static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
791 struct nvme_command *cmnd) 787 struct nvme_command *cmnd)
792{ 788{
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
795 enum dma_data_direction dma_dir = rq_data_dir(req) ? 791 enum dma_data_direction dma_dir = rq_data_dir(req) ?
796 DMA_TO_DEVICE : DMA_FROM_DEVICE; 792 DMA_TO_DEVICE : DMA_FROM_DEVICE;
797 blk_status_t ret = BLK_STS_IOERR; 793 blk_status_t ret = BLK_STS_IOERR;
794 int nr_mapped;
798 795
799 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 796 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
800 iod->nents = blk_rq_map_sg(q, req, iod->sg); 797 iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
802 goto out; 799 goto out;
803 800
804 ret = BLK_STS_RESOURCE; 801 ret = BLK_STS_RESOURCE;
805 if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 802 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
806 DMA_ATTR_NO_WARN)) 803 DMA_ATTR_NO_WARN);
804 if (!nr_mapped)
807 goto out; 805 goto out;
808 806
809 if (nvme_pci_use_sgls(dev, req)) 807 if (iod->use_sgl)
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 808 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
811 else 809 else
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 810 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
813 811
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af56596be6..2a0bba7f50cf 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
975 nvme_start_queues(&ctrl->ctrl); 975 nvme_start_queues(&ctrl->ctrl);
976 976
977 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
978 /* state change failure should never happen */
979 WARN_ON_ONCE(1);
980 return;
981 }
982
977 nvme_rdma_reconnect_or_remove(ctrl); 983 nvme_rdma_reconnect_or_remove(ctrl);
978} 984}
979 985
980static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 986static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
981{ 987{
982 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) 988 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
983 return; 989 return;
984 990
985 queue_work(nvme_wq, &ctrl->err_work); 991 queue_work(nvme_wq, &ctrl->err_work);
@@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1753 nvme_stop_ctrl(&ctrl->ctrl); 1759 nvme_stop_ctrl(&ctrl->ctrl);
1754 nvme_rdma_shutdown_ctrl(ctrl, false); 1760 nvme_rdma_shutdown_ctrl(ctrl, false);
1755 1761
1762 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1763 /* state change failure should never happen */
1764 WARN_ON_ONCE(1);
1765 return;
1766 }
1767
1756 ret = nvme_rdma_configure_admin_queue(ctrl, false); 1768 ret = nvme_rdma_configure_admin_queue(ctrl, false);
1757 if (ret) 1769 if (ret)
1758 goto out_fail; 1770 goto out_fail;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9de55ab..6a018a0bd6ce 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1085 const char *buf, size_t count) 1085 const char *buf, size_t count)
1086{ 1086{
1087 struct fcloop_nport *nport = NULL, *tmpport; 1087 struct fcloop_nport *nport = NULL, *tmpport;
1088 struct fcloop_tport *tport; 1088 struct fcloop_tport *tport = NULL;
1089 u64 nodename, portname; 1089 u64 nodename, portname;
1090 unsigned long flags; 1090 unsigned long flags;
1091 int ret; 1091 int ret;
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index a346b4923550..41d3a3c1104e 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
156 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 156 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
157 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE); 157 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
158 158
159 for (i = offset; i < offset + bytes; i += efuse->config.word_size) { 159 for (i = 0; i < bytes; i += efuse->config.word_size) {
160 addr = i / efuse->config.word_size; 160 addr = (offset + i) / efuse->config.word_size;
161 161
162 err = meson_mx_efuse_read_addr(efuse, addr, &tmp); 162 err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
163 if (err) 163 if (err)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index c454941b34ec..ab988d88704d 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -695,7 +695,7 @@ int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
695/* 695/*
696 * Returns 0 on success, a negative error value in case of an error. 696 * Returns 0 on success, a negative error value in case of an error.
697 * 697 *
698 * If multiple changset entry notification errors occur then only the 698 * If multiple changeset entry notification errors occur then only the
699 * final notification error is reported. 699 * final notification error is reported.
700 */ 700 */
701int __of_changeset_apply_notify(struct of_changeset *ocs) 701int __of_changeset_apply_notify(struct of_changeset *ocs)
@@ -795,7 +795,7 @@ int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
795} 795}
796 796
797/* 797/*
798 * If multiple changset entry notification errors occur then only the 798 * If multiple changeset entry notification errors occur then only the
799 * final notification error is reported. 799 * final notification error is reported.
800 */ 800 */
801int __of_changeset_revert_notify(struct of_changeset *ocs) 801int __of_changeset_revert_notify(struct of_changeset *ocs)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 98258583abb0..a327be1d264b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
81 * can be looked up later */ 81 * can be looked up later */
82 of_node_get(child); 82 of_node_get(child);
83 phy->mdio.dev.of_node = child; 83 phy->mdio.dev.of_node = child;
84 phy->mdio.dev.fwnode = of_fwnode_handle(child);
84 85
85 /* All data is now stored in the phy struct; 86 /* All data is now stored in the phy struct;
86 * register it */ 87 * register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
111 */ 112 */
112 of_node_get(child); 113 of_node_get(child);
113 mdiodev->dev.of_node = child; 114 mdiodev->dev.of_node = child;
115 mdiodev->dev.fwnode = of_fwnode_handle(child);
114 116
115 /* All data is now stored in the mdiodev struct; register it. */ 117 /* All data is now stored in the mdiodev struct; register it. */
116 rc = mdio_device_register(mdiodev); 118 rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
206 mdio->phy_mask = ~0; 208 mdio->phy_mask = ~0;
207 209
208 mdio->dev.of_node = np; 210 mdio->dev.of_node = np;
211 mdio->dev.fwnode = of_fwnode_handle(np);
209 212
210 /* Get bus level PHY reset GPIO details */ 213 /* Get bus level PHY reset GPIO details */
211 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; 214 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
@@ -228,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
228 rc = of_mdiobus_register_phy(mdio, child, addr); 231 rc = of_mdiobus_register_phy(mdio, child, addr);
229 else 232 else
230 rc = of_mdiobus_register_device(mdio, child, addr); 233 rc = of_mdiobus_register_device(mdio, child, addr);
231 if (rc) 234
235 if (rc == -ENODEV)
236 dev_err(&mdio->dev,
237 "MDIO device at address %d is missing.\n",
238 addr);
239 else if (rc)
232 goto unregister; 240 goto unregister;
233 } 241 }
234 242
@@ -252,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
252 260
253 if (of_mdiobus_child_is_phy(child)) { 261 if (of_mdiobus_child_is_phy(child)) {
254 rc = of_mdiobus_register_phy(mdio, child, addr); 262 rc = of_mdiobus_register_phy(mdio, child, addr);
255 if (rc) 263 if (rc && rc != -ENODEV)
256 goto unregister; 264 goto unregister;
257 } 265 }
258 } 266 }
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c150abb9049d..3981b7da4fa9 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -522,7 +522,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
522 struct device_node *node, *overlay_node; 522 struct device_node *node, *overlay_node;
523 struct fragment *fragment; 523 struct fragment *fragment;
524 struct fragment *fragments; 524 struct fragment *fragments;
525 int cnt, ret; 525 int cnt, id, ret;
526 526
527 /* 527 /*
528 * Warn for some issues. Can not return -EINVAL for these until 528 * Warn for some issues. Can not return -EINVAL for these until
@@ -543,9 +543,9 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
543 543
544 of_changeset_init(&ovcs->cset); 544 of_changeset_init(&ovcs->cset);
545 545
546 ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL); 546 id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
547 if (ovcs->id <= 0) 547 if (id <= 0)
548 return ovcs->id; 548 return id;
549 549
550 cnt = 0; 550 cnt = 0;
551 551
@@ -572,18 +572,20 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
572 572
573 cnt = 0; 573 cnt = 0;
574 for_each_child_of_node(tree, node) { 574 for_each_child_of_node(tree, node) {
575 overlay_node = of_get_child_by_name(node, "__overlay__");
576 if (!overlay_node)
577 continue;
578
575 fragment = &fragments[cnt]; 579 fragment = &fragments[cnt];
576 fragment->overlay = of_get_child_by_name(node, "__overlay__"); 580 fragment->overlay = overlay_node;
577 if (fragment->overlay) { 581 fragment->target = find_target_node(node);
578 fragment->target = find_target_node(node); 582 if (!fragment->target) {
579 if (!fragment->target) { 583 of_node_put(fragment->overlay);
580 of_node_put(fragment->overlay); 584 ret = -EINVAL;
581 ret = -EINVAL; 585 goto err_free_fragments;
582 goto err_free_fragments;
583 } else {
584 cnt++;
585 }
586 } 586 }
587
588 cnt++;
587 } 589 }
588 590
589 /* 591 /*
@@ -611,6 +613,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
611 goto err_free_fragments; 613 goto err_free_fragments;
612 } 614 }
613 615
616 ovcs->id = id;
614 ovcs->count = cnt; 617 ovcs->count = cnt;
615 ovcs->fragments = fragments; 618 ovcs->fragments = fragments;
616 619
@@ -619,7 +622,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
619err_free_fragments: 622err_free_fragments:
620 kfree(fragments); 623 kfree(fragments);
621err_free_idr: 624err_free_idr:
622 idr_remove(&ovcs_idr, ovcs->id); 625 idr_remove(&ovcs_idr, id);
623 626
624 pr_err("%s() failed, ret = %d\n", __func__, ret); 627 pr_err("%s() failed, ret = %d\n", __func__, ret);
625 628
@@ -630,9 +633,8 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
630{ 633{
631 int i; 634 int i;
632 635
633 if (!ovcs->cset.entries.next) 636 if (ovcs->cset.entries.next)
634 return; 637 of_changeset_destroy(&ovcs->cset);
635 of_changeset_destroy(&ovcs->cset);
636 638
637 if (ovcs->id) 639 if (ovcs->id)
638 idr_remove(&ovcs_idr, ovcs->id); 640 idr_remove(&ovcs_idr, ovcs->id);
@@ -660,14 +662,14 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
660 * A non-zero return value will not have created the changeset if error is from: 662 * A non-zero return value will not have created the changeset if error is from:
661 * - parameter checks 663 * - parameter checks
662 * - building the changeset 664 * - building the changeset
663 * - overlay changset pre-apply notifier 665 * - overlay changeset pre-apply notifier
664 * 666 *
665 * If an error is returned by an overlay changeset pre-apply notifier 667 * If an error is returned by an overlay changeset pre-apply notifier
666 * then no further overlay changeset pre-apply notifier will be called. 668 * then no further overlay changeset pre-apply notifier will be called.
667 * 669 *
668 * A non-zero return value will have created the changeset if error is from: 670 * A non-zero return value will have created the changeset if error is from:
669 * - overlay changeset entry notifier 671 * - overlay changeset entry notifier
670 * - overlay changset post-apply notifier 672 * - overlay changeset post-apply notifier
671 * 673 *
672 * If an error is returned by an overlay changeset post-apply notifier 674 * If an error is returned by an overlay changeset post-apply notifier
673 * then no further overlay changeset post-apply notifier will be called. 675 * then no further overlay changeset post-apply notifier will be called.
@@ -706,12 +708,11 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
706 } 708 }
707 709
708 of_overlay_mutex_lock(); 710 of_overlay_mutex_lock();
711 mutex_lock(&of_mutex);
709 712
710 ret = of_resolve_phandles(tree); 713 ret = of_resolve_phandles(tree);
711 if (ret) 714 if (ret)
712 goto err_overlay_unlock; 715 goto err_free_overlay_changeset;
713
714 mutex_lock(&of_mutex);
715 716
716 ret = init_overlay_changeset(ovcs, tree); 717 ret = init_overlay_changeset(ovcs, tree);
717 if (ret) 718 if (ret)
@@ -736,14 +737,13 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
736 devicetree_state_flags |= DTSF_APPLY_FAIL; 737 devicetree_state_flags |= DTSF_APPLY_FAIL;
737 } 738 }
738 goto err_free_overlay_changeset; 739 goto err_free_overlay_changeset;
739 } else {
740 ret = __of_changeset_apply_notify(&ovcs->cset);
741 if (ret)
742 pr_err("overlay changeset entry notify error %d\n",
743 ret);
744 /* fall through */
745 } 740 }
746 741
742 ret = __of_changeset_apply_notify(&ovcs->cset);
743 if (ret)
744 pr_err("overlay changeset entry notify error %d\n", ret);
745 /* notify failure is not fatal, continue */
746
747 list_add_tail(&ovcs->ovcs_list, &ovcs_list); 747 list_add_tail(&ovcs->ovcs_list, &ovcs_list);
748 *ovcs_id = ovcs->id; 748 *ovcs_id = ovcs->id;
749 749
@@ -755,18 +755,14 @@ int of_overlay_apply(struct device_node *tree, int *ovcs_id)
755 ret = ret_tmp; 755 ret = ret_tmp;
756 } 756 }
757 757
758 mutex_unlock(&of_mutex); 758 goto out_unlock;
759 of_overlay_mutex_unlock();
760
761 goto out;
762
763err_overlay_unlock:
764 of_overlay_mutex_unlock();
765 759
766err_free_overlay_changeset: 760err_free_overlay_changeset:
767 free_overlay_changeset(ovcs); 761 free_overlay_changeset(ovcs);
768 762
763out_unlock:
769 mutex_unlock(&of_mutex); 764 mutex_unlock(&of_mutex);
765 of_overlay_mutex_unlock();
770 766
771out: 767out:
772 pr_debug("%s() err=%d\n", __func__, ret); 768 pr_debug("%s() err=%d\n", __func__, ret);
@@ -871,7 +867,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
871 * 867 *
872 * A non-zero return value will not revert the changeset if error is from: 868 * A non-zero return value will not revert the changeset if error is from:
873 * - parameter checks 869 * - parameter checks
874 * - overlay changset pre-remove notifier 870 * - overlay changeset pre-remove notifier
875 * - overlay changeset entry revert 871 * - overlay changeset entry revert
876 * 872 *
877 * If an error is returned by an overlay changeset pre-remove notifier 873 * If an error is returned by an overlay changeset pre-remove notifier
@@ -882,7 +878,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
882 * 878 *
883 * A non-zero return value will revert the changeset if error is from: 879 * A non-zero return value will revert the changeset if error is from:
884 * - overlay changeset entry notifier 880 * - overlay changeset entry notifier
885 * - overlay changset post-remove notifier 881 * - overlay changeset post-remove notifier
886 * 882 *
887 * If an error is returned by an overlay changeset post-remove notifier 883 * If an error is returned by an overlay changeset post-remove notifier
888 * then no further overlay changeset post-remove notifier will be called. 884 * then no further overlay changeset post-remove notifier will be called.
@@ -931,15 +927,13 @@ int of_overlay_remove(int *ovcs_id)
931 if (ret_apply) 927 if (ret_apply)
932 devicetree_state_flags |= DTSF_REVERT_FAIL; 928 devicetree_state_flags |= DTSF_REVERT_FAIL;
933 goto out_unlock; 929 goto out_unlock;
934 } else {
935 ret = __of_changeset_revert_notify(&ovcs->cset);
936 if (ret) {
937 pr_err("overlay changeset entry notify error %d\n",
938 ret);
939 /* fall through - changeset was reverted */
940 }
941 } 930 }
942 931
932 ret = __of_changeset_revert_notify(&ovcs->cset);
933 if (ret)
934 pr_err("overlay changeset entry notify error %d\n", ret);
935 /* notify failure is not fatal, continue */
936
943 *ovcs_id = 0; 937 *ovcs_id = 0;
944 938
945 ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE); 939 ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e568b1e82501..0f8052f1355c 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2165,7 +2165,6 @@ static int __init overlay_data_add(int onum)
2165 ret = of_overlay_apply(info->np_overlay, &info->overlay_id); 2165 ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
2166 if (ret < 0) { 2166 if (ret < 0) {
2167 pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum); 2167 pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
2168 of_overlay_mutex_unlock();
2169 goto out_free_np_overlay; 2168 goto out_free_np_overlay;
2170 } 2169 }
2171 2170
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
305 305
306 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 306 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
307 307
308 /* Clear the matching bit in the IMR register */ 308 /* Clear the matching bit in the IMR register */
309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
317 u32 tmp; 317 u32 tmp;
318 318
319 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 319 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
320 320
321 /* 321 /*
322 ** clear pending IRQ bits 322 ** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
396 if (mask) { 396 if (mask) {
397 if (--ilr_loop > 0) 397 if (--ilr_loop > 0)
398 goto ilr_again; 398 goto ilr_again;
399 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", 399 printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
400 dino_dev->hba.base_addr, mask); 400 dino_dev->hba.base_addr, mask);
401 return IRQ_NONE; 401 return IRQ_NONE;
402 } 402 }
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
553 struct pci_dev *dev; 553 struct pci_dev *dev;
554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); 554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
555 555
556 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", 556 DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
557 __func__, bus, bus->busn_res.start, 557 __func__, bus, bus->busn_res.start,
558 bus->bridge->platform_data); 558 bus->bridge->platform_data);
559 559
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */ 854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */
855 if (request_resource(&ioport_resource, res) < 0) { 855 if (request_resource(&ioport_resource, res) < 0) {
856 printk(KERN_ERR "%s: request I/O Port region failed " 856 printk(KERN_ERR "%s: request I/O Port region failed "
857 "0x%lx/%lx (hpa 0x%p)\n", 857 "0x%lx/%lx (hpa 0x%px)\n",
858 name, (unsigned long)res->start, (unsigned long)res->end, 858 name, (unsigned long)res->start, (unsigned long)res->end,
859 dino_dev->hba.base_addr); 859 dino_dev->hba.base_addr);
860 return 1; 860 return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
106 return retval; 106 return retval;
107 } 107 }
108 108
109 printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); 109 printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
110 return 0; 110 return 0;
111} 111}
112 112
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a25fed52f7e9..41b740aed3a3 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1692 iounmap(base_addr); 1692 iounmap(base_addr);
1693} 1693}
1694 1694
1695
1696/*
1697 * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
1698 * seems rushed, so that many built-in components simply don't work.
1699 * The following quirks disable the serial AUX port and the built-in ATI RV100
1700 * Radeon 7000 graphics card which both don't have any external connectors and
1701 * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
1702 * such makes those machines the only PARISC machines on which we can't use
1703 * ttyS0 as boot console.
1704 */
1705static void quirk_diva_ati_card(struct pci_dev *dev)
1706{
1707 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1708 dev->subsystem_device != 0x1292)
1709 return;
1710
1711 dev_info(&dev->dev, "Hiding Diva built-in ATI card");
1712 dev->device = 0;
1713}
1714DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
1715 quirk_diva_ati_card);
1716
1717static void quirk_diva_aux_disable(struct pci_dev *dev)
1718{
1719 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1720 dev->subsystem_device != 0x1291)
1721 return;
1722
1723 dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
1724 dev->device = 0;
1725}
1726DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
1727 quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 04dac6a42c9f..6b8d060d07de 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
985 int_pkt->wslot.slot = slot; 985 int_pkt->wslot.slot = slot;
986 int_pkt->int_desc.vector = vector; 986 int_pkt->int_desc.vector = vector;
987 int_pkt->int_desc.vector_count = 1; 987 int_pkt->int_desc.vector_count = 1;
988 int_pkt->int_desc.delivery_mode = 988 int_pkt->int_desc.delivery_mode = dest_Fixed;
989 (apic->irq_delivery_mode == dest_LowestPrio) ?
990 dest_LowestPrio : dest_Fixed;
991 989
992 /* 990 /*
993 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in 991 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
1008 int_pkt->wslot.slot = slot; 1006 int_pkt->wslot.slot = slot;
1009 int_pkt->int_desc.vector = vector; 1007 int_pkt->int_desc.vector = vector;
1010 int_pkt->int_desc.vector_count = 1; 1008 int_pkt->int_desc.vector_count = 1;
1011 int_pkt->int_desc.delivery_mode = 1009 int_pkt->int_desc.delivery_mode = dest_Fixed;
1012 (apic->irq_delivery_mode == dest_LowestPrio) ?
1013 dest_LowestPrio : dest_Fixed;
1014 1010
1015 /* 1011 /*
1016 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten 1012 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796eccb2be..52ab3cb0a0bf 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1128 err = rcar_pcie_get_resources(pcie); 1128 err = rcar_pcie_get_resources(pcie);
1129 if (err < 0) { 1129 if (err < 0) {
1130 dev_err(dev, "failed to request resources: %d\n", err); 1130 dev_err(dev, "failed to request resources: %d\n", err);
1131 goto err_free_bridge; 1131 goto err_free_resource_list;
1132 } 1132 }
1133 1133
1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); 1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
1135 if (err) 1135 if (err)
1136 goto err_free_bridge; 1136 goto err_free_resource_list;
1137 1137
1138 pm_runtime_enable(dev); 1138 pm_runtime_enable(dev);
1139 err = pm_runtime_get_sync(dev); 1139 err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
1176err_pm_disable: 1176err_pm_disable:
1177 pm_runtime_disable(dev); 1177 pm_runtime_disable(dev);
1178 1178
1179err_free_bridge: 1179err_free_resource_list:
1180 pci_free_host_bridge(bridge);
1181 pci_free_resource_list(&pcie->resources); 1180 pci_free_resource_list(&pcie->resources);
1181 pci_free_host_bridge(bridge);
1182 1182
1183 return err; 1183 return err;
1184} 1184}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..14fd865a5120 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
999 * the subsequent "thaw" callbacks for the device. 999 * the subsequent "thaw" callbacks for the device.
1000 */ 1000 */
1001 if (dev_pm_smart_suspend_and_suspended(dev)) { 1001 if (dev_pm_smart_suspend_and_suspended(dev)) {
1002 dev->power.direct_complete = true; 1002 dev_pm_skip_next_resume_phases(dev);
1003 return 0; 1003 return 0;
1004 } 1004 }
1005 1005
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
1012 if (pci_has_legacy_pm_support(pci_dev)) 1012 if (pci_has_legacy_pm_support(pci_dev))
1013 return pci_legacy_resume_early(dev); 1013 return pci_legacy_resume_early(dev);
1014 1014
1015 pci_update_current_state(pci_dev, PCI_D0); 1015 /*
1016 * pci_restore_state() requires the device to be in D0 (because of MSI
1017 * restoration among other things), so force it into D0 in case the
1018 * driver's "freeze" callbacks put it into a low-power state directly.
1019 */
1020 pci_set_power_state(pci_dev, PCI_D0);
1016 pci_restore_state(pci_dev); 1021 pci_restore_state(pci_dev);
1017 1022
1018 if (drv && drv->pm && drv->pm->thaw_noirq) 1023 if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index accaaaccb662..6601ad0dfb3a 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
310 int irq, error; 310 int irq, error;
311 311
312 irq = platform_get_irq_byname(pdev, name); 312 irq = platform_get_irq_byname(pdev, name);
313 if (!irq) 313 if (irq < 0)
314 return -ENODEV; 314 return -ENODEV;
315 315
316 error = devm_request_threaded_irq(ddata->dev, irq, NULL, 316 error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b4964b067aec..8f6e8e28996d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
410 if (ret) 410 if (ret)
411 return ERR_PTR(-ENODEV); 411 return ERR_PTR(-ENODEV);
412 412
413 /* This phy type handled by the usb-phy subsystem for now */
414 if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
415 return ERR_PTR(-ENODEV);
416
413 mutex_lock(&phy_provider_mutex); 417 mutex_lock(&phy_provider_mutex);
414 phy_provider = of_phy_provider_lookup(args.np); 418 phy_provider = of_phy_provider_lookup(args.np);
415 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 419 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index cb09245e9b4c..c845facacb06 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
12 tristate "Renesas R-Car generation 3 USB 2.0 PHY driver" 12 tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
13 depends on ARCH_RENESAS 13 depends on ARCH_RENESAS
14 depends on EXTCON 14 depends on EXTCON
15 depends on USB_SUPPORT
15 select GENERIC_PHY 16 select GENERIC_PHY
17 select USB_COMMON
16 help 18 help
17 Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs. 19 Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
18 20
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index ee85fa0ca4b0..7492c8978217 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
1137 if (IS_ERR(phy)) { 1137 if (IS_ERR(phy)) {
1138 dev_err(dev, "failed to create phy: %s\n", 1138 dev_err(dev, "failed to create phy: %s\n",
1139 child_np->name); 1139 child_np->name);
1140 pm_runtime_disable(dev);
1140 return PTR_ERR(phy); 1141 return PTR_ERR(phy);
1141 } 1142 }
1142 1143
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
1146 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 1147 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
1147 if (IS_ERR(phy_provider)) { 1148 if (IS_ERR(phy_provider)) {
1148 dev_err(dev, "Failed to register phy provider\n"); 1149 dev_err(dev, "Failed to register phy provider\n");
1150 pm_runtime_disable(dev);
1149 return PTR_ERR(phy_provider); 1151 return PTR_ERR(phy_provider);
1150 } 1152 }
1151 1153
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 4307bf0013e1..63e916d4d069 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
75static struct device_node * 75static struct device_node *
76tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) 76tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
77{ 77{
78 /* 78 struct device_node *pads, *np;
79 * of_find_node_by_name() drops a reference, so make sure to grab one. 79
80 */ 80 pads = of_get_child_by_name(padctl->dev->of_node, "pads");
81 struct device_node *np = of_node_get(padctl->dev->of_node); 81 if (!pads)
82 return NULL;
82 83
83 np = of_find_node_by_name(np, "pads"); 84 np = of_get_child_by_name(pads, name);
84 if (np) 85 of_node_put(pads);
85 np = of_find_node_by_name(np, name);
86 86
87 return np; 87 return np;
88} 88}
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
90static struct device_node * 90static struct device_node *
91tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index) 91tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
92{ 92{
93 /* 93 struct device_node *np, *lanes;
94 * of_find_node_by_name() drops a reference, so make sure to grab one.
95 */
96 struct device_node *np = of_node_get(pad->dev.of_node);
97 94
98 np = of_find_node_by_name(np, "lanes"); 95 lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
99 if (!np) 96 if (!lanes)
100 return NULL; 97 return NULL;
101 98
102 return of_find_node_by_name(np, pad->soc->lanes[index].name); 99 np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
100 of_node_put(lanes);
101
102 return np;
103} 103}
104 104
105static int 105static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
195 unsigned int i; 195 unsigned int i;
196 int err; 196 int err;
197 197
198 children = of_find_node_by_name(pad->dev.of_node, "lanes"); 198 children = of_get_child_by_name(pad->dev.of_node, "lanes");
199 if (!children) 199 if (!children)
200 return -ENODEV; 200 return -ENODEV;
201 201
@@ -444,21 +444,21 @@ static struct device_node *
444tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, 444tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
445 unsigned int index) 445 unsigned int index)
446{ 446{
447 /* 447 struct device_node *ports, *np;
448 * of_find_node_by_name() drops a reference, so make sure to grab one. 448 char *name;
449 */
450 struct device_node *np = of_node_get(padctl->dev->of_node);
451 449
452 np = of_find_node_by_name(np, "ports"); 450 ports = of_get_child_by_name(padctl->dev->of_node, "ports");
453 if (np) { 451 if (!ports)
454 char *name; 452 return NULL;
455 453
456 name = kasprintf(GFP_KERNEL, "%s-%u", type, index); 454 name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
457 if (!name) 455 if (!name) {
458 return ERR_PTR(-ENOMEM); 456 of_node_put(ports);
459 np = of_find_node_by_name(np, name); 457 return ERR_PTR(-ENOMEM);
460 kfree(name);
461 } 458 }
459 np = of_get_child_by_name(ports, name);
460 kfree(name);
461 of_node_put(ports);
462 462
463 return np; 463 return np;
464} 464}
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
847 847
848static int tegra_xusb_padctl_probe(struct platform_device *pdev) 848static int tegra_xusb_padctl_probe(struct platform_device *pdev)
849{ 849{
850 struct device_node *np = of_node_get(pdev->dev.of_node); 850 struct device_node *np = pdev->dev.of_node;
851 const struct tegra_xusb_padctl_soc *soc; 851 const struct tegra_xusb_padctl_soc *soc;
852 struct tegra_xusb_padctl *padctl; 852 struct tegra_xusb_padctl *padctl;
853 const struct of_device_id *match; 853 const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
855 int err; 855 int err;
856 856
857 /* for backwards compatibility with old device trees */ 857 /* for backwards compatibility with old device trees */
858 np = of_find_node_by_name(np, "pads"); 858 np = of_get_child_by_name(np, "pads");
859 if (!np) { 859 if (!np) {
860 dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n"); 860 dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
861 return tegra_xusb_padctl_legacy_probe(pdev); 861 return tegra_xusb_padctl_legacy_probe(pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bdedb6325c72..4471fd94e1fe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1620 clear_bit(i, chip->irq.valid_mask); 1620 clear_bit(i, chip->irq.valid_mask);
1621 } 1621 }
1622 1622
1623 /*
1624 * The same set of machines in chv_no_valid_mask[] have incorrectly
1625 * configured GPIOs that generate spurious interrupts so we use
1626 * this same list to apply another quirk for them.
1627 *
1628 * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
1629 */
1630 if (!need_valid_mask) {
1631 /*
1632 * Mask all interrupts the community is able to generate
1633 * but leave the ones that can only generate GPEs unmasked.
1634 */
1635 chv_writel(GENMASK(31, pctrl->community->nirqs),
1636 pctrl->regs + CHV_INTMASK);
1637 }
1638
1623 /* Clear all interrupts */ 1639 /* Clear all interrupts */
1624 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1640 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1625 1641
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 4500880240f2..6572550cfe78 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 };
207static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 }; 207static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 };
208static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 }; 208static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 };
209static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 }; 209static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 };
210static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 }; 210static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 };
211static const unsigned int dnv_emmc_pins[] = { 211static const unsigned int dnv_emmc_pins[] = {
212 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 212 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
213}; 213};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index d45af31b86b4..bdb8d174efef 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
408{ 408{
409 struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); 409 struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
410 unsigned int reg = OUTPUT_EN; 410 unsigned int reg = OUTPUT_EN;
411 unsigned int mask; 411 unsigned int mask, val, ret;
412 412
413 armada_37xx_update_reg(&reg, offset); 413 armada_37xx_update_reg(&reg, offset);
414 mask = BIT(offset); 414 mask = BIT(offset);
415 415
416 return regmap_update_bits(info->regmap, reg, mask, mask); 416 ret = regmap_update_bits(info->regmap, reg, mask, mask);
417
418 if (ret)
419 return ret;
420
421 reg = OUTPUT_VAL;
422 val = value ? mask : 0;
423 regmap_update_bits(info->regmap, reg, mask, val);
424
425 return 0;
417} 426}
418 427
419static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) 428static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index e9b83e291edf..c11b8f14d841 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -2322,7 +2322,7 @@ static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
2322 int i; 2322 int i;
2323 2323
2324 for (i = 0; i < pmx->nconfs; i++) { 2324 for (i = 0; i < pmx->nconfs; i++) {
2325 retconf = &gemini_confs_3516[i]; 2325 retconf = &pmx->confs[i];
2326 if (retconf->pin == pin) 2326 if (retconf->pin == pin)
2327 return retconf; 2327 return retconf;
2328 } 2328 }
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e6cd8de793e2..3501491e5bfc 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
222 */ 222 */
223static struct lock_class_key pcs_lock_class; 223static struct lock_class_key pcs_lock_class;
224 224
225/* Class for the IRQ request mutex */
226static struct lock_class_key pcs_request_class;
227
225/* 228/*
226 * REVISIT: Reads and writes could eventually use regmap or something 229 * REVISIT: Reads and writes could eventually use regmap or something
227 * generic. But at least on omaps, some mux registers are performance 230 * generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
1486 irq_set_chip_data(irq, pcs_soc); 1489 irq_set_chip_data(irq, pcs_soc);
1487 irq_set_chip_and_handler(irq, &pcs->chip, 1490 irq_set_chip_and_handler(irq, &pcs->chip,
1488 handle_level_irq); 1491 handle_level_irq);
1489 irq_set_lockdep_class(irq, &pcs_lock_class); 1492 irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
1490 irq_set_noprobe(irq); 1493 irq_set_noprobe(irq);
1491 1494
1492 return 0; 1495 return 0;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a276c61be217..e62ab087bfd8 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
290} 290}
291 291
292static int stm32_gpio_domain_activate(struct irq_domain *d, 292static int stm32_gpio_domain_activate(struct irq_domain *d,
293 struct irq_data *irq_data, bool early) 293 struct irq_data *irq_data, bool reserve)
294{ 294{
295 struct stm32_gpio_bank *bank = d->host_data; 295 struct stm32_gpio_bank *bank = d->host_data;
296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); 296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
index 4f2a726bbaeb..f5f77432ce6f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
@@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = {
428 SUNXI_FUNCTION(0x0, "gpio_in"), 428 SUNXI_FUNCTION(0x0, "gpio_in"),
429 SUNXI_FUNCTION(0x1, "gpio_out"), 429 SUNXI_FUNCTION(0x1, "gpio_out"),
430 SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ 430 SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
431 SUNXI_FUNCTION(0x4, "uart0")), /* RX */ 431 SUNXI_FUNCTION(0x3, "uart0")), /* RX */
432 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), 432 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
433 SUNXI_FUNCTION(0x0, "gpio_in"), 433 SUNXI_FUNCTION(0x0, "gpio_in"),
434 SUNXI_FUNCTION(0x1, "gpio_out"), 434 SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index 97b48336f84a..a78d7b922ef4 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -535,14 +535,16 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data_broken = {
535 .pins = sun50i_h5_pins, 535 .pins = sun50i_h5_pins,
536 .npins = ARRAY_SIZE(sun50i_h5_pins), 536 .npins = ARRAY_SIZE(sun50i_h5_pins),
537 .irq_banks = 2, 537 .irq_banks = 2,
538 .irq_read_needs_mux = true 538 .irq_read_needs_mux = true,
539 .disable_strict_mode = true,
539}; 540};
540 541
541static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = { 542static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
542 .pins = sun50i_h5_pins, 543 .pins = sun50i_h5_pins,
543 .npins = ARRAY_SIZE(sun50i_h5_pins), 544 .npins = ARRAY_SIZE(sun50i_h5_pins),
544 .irq_banks = 3, 545 .irq_banks = 3,
545 .irq_read_needs_mux = true 546 .irq_read_needs_mux = true,
547 .disable_strict_mode = true,
546}; 548};
547 549
548static int sun50i_h5_pinctrl_probe(struct platform_device *pdev) 550static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index 472ef0d91b99..5553c0eb0f41 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
145 SUNXI_FUNCTION(0x0, "gpio_in"), 145 SUNXI_FUNCTION(0x0, "gpio_in"),
146 SUNXI_FUNCTION(0x1, "gpio_out"), 146 SUNXI_FUNCTION(0x1, "gpio_out"),
147 SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */ 147 SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
148 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */ 148 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
149 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15), 149 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
150 SUNXI_FUNCTION(0x0, "gpio_in"), 150 SUNXI_FUNCTION(0x0, "gpio_in"),
151 SUNXI_FUNCTION(0x1, "gpio_out"), 151 SUNXI_FUNCTION(0x1, "gpio_out"),
152 SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */ 152 SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
153 SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */ 153 SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
154 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */ 154 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
155 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16), 155 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
156 SUNXI_FUNCTION(0x0, "gpio_in"), 156 SUNXI_FUNCTION(0x0, "gpio_in"),
157 SUNXI_FUNCTION(0x1, "gpio_out"), 157 SUNXI_FUNCTION(0x1, "gpio_out"),
158 SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */ 158 SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
159 SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */ 159 SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
160 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */ 160 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
161 161
162 /* Hole */ 162 /* Hole */
163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), 163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index f3796164329e..d4aeac3477f5 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
118 return; 118 return;
119 } 119 }
120 input_report_key(data->idev, KEY_RFKILL, 1); 120 input_report_key(data->idev, KEY_RFKILL, 1);
121 input_sync(data->idev);
121 input_report_key(data->idev, KEY_RFKILL, 0); 122 input_report_key(data->idev, KEY_RFKILL, 0);
122 input_sync(data->idev); 123 input_sync(data->idev);
123} 124}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bf897b1832b1..cd4725e7e0b5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -37,6 +37,7 @@
37 37
38struct quirk_entry { 38struct quirk_entry {
39 u8 touchpad_led; 39 u8 touchpad_led;
40 u8 kbd_led_levels_off_1;
40 41
41 int needs_kbd_timeouts; 42 int needs_kbd_timeouts;
42 /* 43 /*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
67 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, 68 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
68}; 69};
69 70
71static struct quirk_entry quirk_dell_latitude_e6410 = {
72 .kbd_led_levels_off_1 = 1,
73};
74
70static struct platform_driver platform_driver = { 75static struct platform_driver platform_driver = {
71 .driver = { 76 .driver = {
72 .name = "dell-laptop", 77 .name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
269 }, 274 },
270 .driver_data = &quirk_dell_xps13_9333, 275 .driver_data = &quirk_dell_xps13_9333,
271 }, 276 },
277 {
278 .callback = dmi_matched,
279 .ident = "Dell Latitude E6410",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
282 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
283 },
284 .driver_data = &quirk_dell_latitude_e6410,
285 },
272 { } 286 { }
273}; 287};
274 288
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
1149 units = (buffer->output[2] >> 8) & 0xFF; 1163 units = (buffer->output[2] >> 8) & 0xFF;
1150 info->levels = (buffer->output[2] >> 16) & 0xFF; 1164 info->levels = (buffer->output[2] >> 16) & 0xFF;
1151 1165
1166 if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
1167 info->levels--;
1168
1152 if (units & BIT(0)) 1169 if (units & BIT(0))
1153 info->seconds = (buffer->output[3] >> 0) & 0xFF; 1170 info->seconds = (buffer->output[3] >> 0) & 0xFF;
1154 if (units & BIT(1)) 1171 if (units & BIT(1))
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 39d2f4518483..fb25b20df316 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
639 int ret; 639 int ret;
640 640
641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); 641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
642 if (!buffer)
643 return -ENOMEM;
642 buffer->cmd_class = CLASS_INFO; 644 buffer->cmd_class = CLASS_INFO;
643 buffer->cmd_select = SELECT_APP_REGISTRATION; 645 buffer->cmd_select = SELECT_APP_REGISTRATION;
644 buffer->input[0] = 0x10000; 646 buffer->input[0] = 0x10000;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
1458 class_unregister(&wmi_bus_class); 1458 class_unregister(&wmi_bus_class);
1459} 1459}
1460 1460
1461subsys_initcall(acpi_wmi_init); 1461subsys_initcall_sync(acpi_wmi_init);
1462module_exit(acpi_wmi_exit); 1462module_exit(acpi_wmi_exit);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index e5225ad9c5b1..2fdab400c1fe 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the S/390 specific device drivers 3# Makefile for the S/390 specific device drivers
3# 4#
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 31f014b57bfc..bc27d716aa6b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1comment "S/390 block device drivers" 2comment "S/390 block device drivers"
2 depends on S390 && BLOCK 3 depends on S390 && BLOCK
3 4
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 66e008f7adb6..d4e8dff673cc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -759,7 +759,7 @@ static void dasd_profile_end_add_data(struct dasd_profile_info *data,
759 /* in case of an overflow, reset the whole profile */ 759 /* in case of an overflow, reset the whole profile */
760 if (data->dasd_io_reqs == UINT_MAX) { 760 if (data->dasd_io_reqs == UINT_MAX) {
761 memset(data, 0, sizeof(*data)); 761 memset(data, 0, sizeof(*data));
762 getnstimeofday(&data->starttod); 762 ktime_get_real_ts64(&data->starttod);
763 } 763 }
764 data->dasd_io_reqs++; 764 data->dasd_io_reqs++;
765 data->dasd_io_sects += sectors; 765 data->dasd_io_sects += sectors;
@@ -894,7 +894,7 @@ void dasd_profile_reset(struct dasd_profile *profile)
894 return; 894 return;
895 } 895 }
896 memset(data, 0, sizeof(*data)); 896 memset(data, 0, sizeof(*data));
897 getnstimeofday(&data->starttod); 897 ktime_get_real_ts64(&data->starttod);
898 spin_unlock_bh(&profile->lock); 898 spin_unlock_bh(&profile->lock);
899} 899}
900 900
@@ -911,7 +911,7 @@ int dasd_profile_on(struct dasd_profile *profile)
911 kfree(data); 911 kfree(data);
912 return 0; 912 return 0;
913 } 913 }
914 getnstimeofday(&data->starttod); 914 ktime_get_real_ts64(&data->starttod);
915 profile->data = data; 915 profile->data = data;
916 spin_unlock_bh(&profile->lock); 916 spin_unlock_bh(&profile->lock);
917 return 0; 917 return 0;
@@ -995,8 +995,8 @@ static void dasd_stats_array(struct seq_file *m, unsigned int *array)
995static void dasd_stats_seq_print(struct seq_file *m, 995static void dasd_stats_seq_print(struct seq_file *m,
996 struct dasd_profile_info *data) 996 struct dasd_profile_info *data)
997{ 997{
998 seq_printf(m, "start_time %ld.%09ld\n", 998 seq_printf(m, "start_time %lld.%09ld\n",
999 data->starttod.tv_sec, data->starttod.tv_nsec); 999 (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1000 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 1000 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
1001 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 1001 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
1002 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1002 seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606e0df8..ee14d8e45c97 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2803 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2803 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2804 } 2804 }
2805 2805
2806
2807 /*
2808 * For path verification work we need to stick with the path that was
2809 * originally chosen so that the per path configuration data is
2810 * assigned correctly.
2811 */
2812 if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
2813 erp->lpm = cqr->lpm;
2814 }
2815
2806 if (device->features & DASD_FEATURE_ERPLOG) { 2816 if (device->features & DASD_FEATURE_ERPLOG) {
2807 /* print current erp_chain */ 2817 /* print current erp_chain */
2808 dev_err(&device->cdev->dev, 2818 dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1a41ef496338..a2edf2a7ace9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -531,10 +531,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
531 pfxdata->validity.define_extent = 1; 531 pfxdata->validity.define_extent = 1;
532 532
533 /* private uid is kept up to date, conf_data may be outdated */ 533 /* private uid is kept up to date, conf_data may be outdated */
534 if (startpriv->uid.type != UA_BASE_DEVICE) { 534 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
535 pfxdata->validity.verify_base = 1; 535 pfxdata->validity.verify_base = 1;
536 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 536
537 pfxdata->validity.hyper_pav = 1; 537 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
538 pfxdata->validity.verify_base = 1;
539 pfxdata->validity.hyper_pav = 1;
538 } 540 }
539 541
540 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 542 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
@@ -3415,10 +3417,12 @@ static int prepare_itcw(struct itcw *itcw,
3415 pfxdata.validity.define_extent = 1; 3417 pfxdata.validity.define_extent = 1;
3416 3418
3417 /* private uid is kept up to date, conf_data may be outdated */ 3419 /* private uid is kept up to date, conf_data may be outdated */
3418 if (startpriv->uid.type != UA_BASE_DEVICE) { 3420 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
3421 pfxdata.validity.verify_base = 1;
3422
3423 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
3419 pfxdata.validity.verify_base = 1; 3424 pfxdata.validity.verify_base = 1;
3420 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 3425 pfxdata.validity.hyper_pav = 1;
3421 pfxdata.validity.hyper_pav = 1;
3422 } 3426 }
3423 3427
3424 switch (cmd) { 3428 switch (cmd) {
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b095a23bcc0c..96709b1a7bf8 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -441,7 +441,7 @@ struct dasd_profile_info {
441 unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */ 441 unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
442 442
443 /* new data */ 443 /* new data */
444 struct timespec starttod; /* time of start or last reset */ 444 struct timespec64 starttod; /* time of start or last reset */
445 unsigned int dasd_io_alias; /* requests using an alias */ 445 unsigned int dasd_io_alias; /* requests using an alias */
446 unsigned int dasd_io_tpm; /* requests using transport mode */ 446 unsigned int dasd_io_tpm; /* requests using transport mode */
447 unsigned int dasd_read_reqs; /* total number of read requests */ 447 unsigned int dasd_read_reqs; /* total number of read requests */
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 97c4c9fdd53d..ab0b243a947d 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1comment "S/390 character device drivers" 2comment "S/390 character device drivers"
2 depends on S390 3 depends on S390
3 4
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba15a53..614b44e70a28 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
17CFLAGS_sclp_early_core.o += -march=z900 17CFLAGS_sclp_early_core.o += -march=z900
18endif 18endif
19 19
20CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
21
20obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 22obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
21 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ 23 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
22 sclp_early.o sclp_early_core.o 24 sclp_early.o sclp_early_core.o
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
index 353b3f268824..f4c095612a02 100644
--- a/drivers/s390/char/defkeymap.map
+++ b/drivers/s390/char/defkeymap.map
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# Default keymap for 3270 (ebcdic codepage 037). 2# Default keymap for 3270 (ebcdic codepage 037).
2keymaps 0-1,4-5 3keymaps 0-1,4-5
3 4
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index 95e25c1df922..140e3e4ee2fd 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1#ifndef S390_BLACKLIST_H 2#ifndef S390_BLACKLIST_H
2#define S390_BLACKLIST_H 3#define S390_BLACKLIST_H
3 4
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 59b4a3370cd5..95b0efe28afb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -431,8 +431,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
431 q->qdio_error = QDIO_ERROR_SLSB_STATE; 431 q->qdio_error = QDIO_ERROR_SLSB_STATE;
432 432
433 /* special handling for no target buffer empty */ 433 /* special handling for no target buffer empty */
434 if ((!q->is_input_q && 434 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
435 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { 435 q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
436 qperf_inc(q, target_full); 436 qperf_inc(q, target_full);
437 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 437 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
438 q->first_to_check); 438 q->first_to_check);
@@ -536,7 +536,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
536 case SLSB_P_INPUT_ERROR: 536 case SLSB_P_INPUT_ERROR:
537 process_buffer_error(q, count); 537 process_buffer_error(q, count);
538 q->first_to_check = add_buf(q->first_to_check, count); 538 q->first_to_check = add_buf(q->first_to_check, count);
539 atomic_sub(count, &q->nr_buf_used); 539 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
540 qperf_inc(q, inbound_queue_full);
540 if (q->irq_ptr->perf_stat_enabled) 541 if (q->irq_ptr->perf_stat_enabled)
541 account_sbals_error(q, count); 542 account_sbals_error(q, count);
542 break; 543 break;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index b2837b1c70b7..a782a207ad31 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1menu "S/390 network device drivers" 2menu "S/390 network device drivers"
2 depends on NETDEVICES && S390 3 depends on NETDEVICES && S390
3 4
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 9cd569ef43ec..badf42acbf95 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -565,9 +565,9 @@ enum qeth_cq {
565}; 565};
566 566
567struct qeth_ipato { 567struct qeth_ipato {
568 int enabled; 568 bool enabled;
569 int invert4; 569 bool invert4;
570 int invert6; 570 bool invert6;
571 struct list_head entries; 571 struct list_head entries;
572}; 572};
573 573
@@ -987,6 +987,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
987int qeth_set_features(struct net_device *, netdev_features_t); 987int qeth_set_features(struct net_device *, netdev_features_t);
988void qeth_recover_features(struct net_device *dev); 988void qeth_recover_features(struct net_device *dev);
989netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); 989netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
990netdev_features_t qeth_features_check(struct sk_buff *skb,
991 struct net_device *dev,
992 netdev_features_t features);
990int qeth_vm_request_mac(struct qeth_card *card); 993int qeth_vm_request_mac(struct qeth_card *card);
991int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); 994int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
992 995
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 98a7f84540ab..3614df68830f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,11 @@
20#include <linux/mii.h> 20#include <linux/mii.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/if_vlan.h>
24#include <linux/netdevice.h>
25#include <linux/netdev_features.h>
26#include <linux/skbuff.h>
27
23#include <net/iucv/af_iucv.h> 28#include <net/iucv/af_iucv.h>
24#include <net/dsfield.h> 29#include <net/dsfield.h>
25 30
@@ -1475,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
1475 qeth_set_intial_options(card); 1480 qeth_set_intial_options(card);
1476 /* IP address takeover */ 1481 /* IP address takeover */
1477 INIT_LIST_HEAD(&card->ipato.entries); 1482 INIT_LIST_HEAD(&card->ipato.entries);
1478 card->ipato.enabled = 0; 1483 card->ipato.enabled = false;
1479 card->ipato.invert4 = 0; 1484 card->ipato.invert4 = false;
1480 card->ipato.invert6 = 0; 1485 card->ipato.invert6 = false;
1481 /* init QDIO stuff */ 1486 /* init QDIO stuff */
1482 qeth_init_qdio_info(card); 1487 qeth_init_qdio_info(card);
1483 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -5381,6 +5386,13 @@ out:
5381} 5386}
5382EXPORT_SYMBOL_GPL(qeth_poll); 5387EXPORT_SYMBOL_GPL(qeth_poll);
5383 5388
5389static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5390{
5391 if (!cmd->hdr.return_code)
5392 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5393 return cmd->hdr.return_code;
5394}
5395
5384int qeth_setassparms_cb(struct qeth_card *card, 5396int qeth_setassparms_cb(struct qeth_card *card,
5385 struct qeth_reply *reply, unsigned long data) 5397 struct qeth_reply *reply, unsigned long data)
5386{ 5398{
@@ -6237,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6237 (struct qeth_checksum_cmd *)reply->param; 6249 (struct qeth_checksum_cmd *)reply->param;
6238 6250
6239 QETH_CARD_TEXT(card, 4, "chkdoccb"); 6251 QETH_CARD_TEXT(card, 4, "chkdoccb");
6240 if (cmd->hdr.return_code) 6252 if (qeth_setassparms_inspect_rc(cmd))
6241 return 0; 6253 return 0;
6242 6254
6243 memset(chksum_cb, 0, sizeof(*chksum_cb)); 6255 memset(chksum_cb, 0, sizeof(*chksum_cb));
@@ -6439,6 +6451,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
6439} 6451}
6440EXPORT_SYMBOL_GPL(qeth_fix_features); 6452EXPORT_SYMBOL_GPL(qeth_fix_features);
6441 6453
6454netdev_features_t qeth_features_check(struct sk_buff *skb,
6455 struct net_device *dev,
6456 netdev_features_t features)
6457{
6458 /* GSO segmentation builds skbs with
6459 * a (small) linear part for the headers, and
6460 * page frags for the data.
6461 * Compared to a linear skb, the header-only part consumes an
6462 * additional buffer element. This reduces buffer utilization, and
6463 * hurts throughput. So compress small segments into one element.
6464 */
6465 if (netif_needs_gso(skb, features)) {
6466 /* match skb_segment(): */
6467 unsigned int doffset = skb->data - skb_mac_header(skb);
6468 unsigned int hsize = skb_shinfo(skb)->gso_size;
6469 unsigned int hroom = skb_headroom(skb);
6470
6471 /* linearize only if resulting skb allocations are order-0: */
6472 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6473 features &= ~NETIF_F_SG;
6474 }
6475
6476 return vlan_features_check(skb, features);
6477}
6478EXPORT_SYMBOL_GPL(qeth_features_check);
6479
6442static int __init qeth_core_init(void) 6480static int __init qeth_core_init(void)
6443{ 6481{
6444 int rc; 6482 int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 93d7e345d180..5863ea170ff2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -961,6 +961,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
961 .ndo_stop = qeth_l2_stop, 961 .ndo_stop = qeth_l2_stop,
962 .ndo_get_stats = qeth_get_stats, 962 .ndo_get_stats = qeth_get_stats,
963 .ndo_start_xmit = qeth_l2_hard_start_xmit, 963 .ndo_start_xmit = qeth_l2_hard_start_xmit,
964 .ndo_features_check = qeth_features_check,
964 .ndo_validate_addr = eth_validate_addr, 965 .ndo_validate_addr = eth_validate_addr,
965 .ndo_set_rx_mode = qeth_l2_set_rx_mode, 966 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
966 .ndo_do_ioctl = qeth_do_ioctl, 967 .ndo_do_ioctl = qeth_do_ioctl,
@@ -1011,6 +1012,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1011 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 1012 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
1012 card->dev->hw_features = NETIF_F_SG; 1013 card->dev->hw_features = NETIF_F_SG;
1013 card->dev->vlan_features = NETIF_F_SG; 1014 card->dev->vlan_features = NETIF_F_SG;
1015 card->dev->features |= NETIF_F_SG;
1014 /* OSA 3S and earlier has no RX/TX support */ 1016 /* OSA 3S and earlier has no RX/TX support */
1015 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { 1017 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1016 card->dev->hw_features |= NETIF_F_IP_CSUM; 1018 card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1029,8 +1031,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1029 1031
1030 card->info.broadcast_capable = 1; 1032 card->info.broadcast_capable = 1;
1031 qeth_l2_request_initial_mac(card); 1033 qeth_l2_request_initial_mac(card);
1032 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
1033 PAGE_SIZE;
1034 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 1034 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
1035 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 1035 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
1036 netif_carrier_off(card->dev); 1036 netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b577cc..e5833837b799 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
84 const u8 *); 84 const u8 *);
85int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 85void qeth_l3_update_ipato(struct qeth_card *card);
86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); 86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); 87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); 88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0f8c12738b06..ef0961e18686 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -164,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
164 } 164 }
165} 165}
166 166
167int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 167static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
168 struct qeth_ipaddr *addr) 168 struct qeth_ipaddr *addr)
169{ 169{
170 struct qeth_ipato_entry *ipatoe; 170 struct qeth_ipato_entry *ipatoe;
171 u8 addr_bits[128] = {0, }; 171 u8 addr_bits[128] = {0, };
@@ -174,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
174 174
175 if (!card->ipato.enabled) 175 if (!card->ipato.enabled)
176 return 0; 176 return 0;
177 if (addr->type != QETH_IP_TYPE_NORMAL)
178 return 0;
177 179
178 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 180 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
179 (addr->proto == QETH_PROT_IPV4)? 4:16); 181 (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -290,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
290 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 292 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
291 addr->ref_counter = 1; 293 addr->ref_counter = 1;
292 294
293 if (addr->type == QETH_IP_TYPE_NORMAL && 295 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
294 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
295 QETH_CARD_TEXT(card, 2, "tkovaddr"); 296 QETH_CARD_TEXT(card, 2, "tkovaddr");
296 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 297 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
297 } 298 }
@@ -605,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
605/* 606/*
606 * IP address takeover related functions 607 * IP address takeover related functions
607 */ 608 */
609
610/**
611 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
612 *
613 * Caller must hold ip_lock.
614 */
615void qeth_l3_update_ipato(struct qeth_card *card)
616{
617 struct qeth_ipaddr *addr;
618 unsigned int i;
619
620 hash_for_each(card->ip_htable, i, addr, hnode) {
621 if (addr->type != QETH_IP_TYPE_NORMAL)
622 continue;
623 if (qeth_l3_is_addr_covered_by_ipato(card, addr))
624 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
625 else
626 addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
627 }
628}
629
608static void qeth_l3_clear_ipato_list(struct qeth_card *card) 630static void qeth_l3_clear_ipato_list(struct qeth_card *card)
609{ 631{
610 struct qeth_ipato_entry *ipatoe, *tmp; 632 struct qeth_ipato_entry *ipatoe, *tmp;
@@ -616,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
616 kfree(ipatoe); 638 kfree(ipatoe);
617 } 639 }
618 640
641 qeth_l3_update_ipato(card);
619 spin_unlock_bh(&card->ip_lock); 642 spin_unlock_bh(&card->ip_lock);
620} 643}
621 644
@@ -640,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
640 } 663 }
641 } 664 }
642 665
643 if (!rc) 666 if (!rc) {
644 list_add_tail(&new->entry, &card->ipato.entries); 667 list_add_tail(&new->entry, &card->ipato.entries);
668 qeth_l3_update_ipato(card);
669 }
645 670
646 spin_unlock_bh(&card->ip_lock); 671 spin_unlock_bh(&card->ip_lock);
647 672
@@ -664,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
664 (proto == QETH_PROT_IPV4)? 4:16) && 689 (proto == QETH_PROT_IPV4)? 4:16) &&
665 (ipatoe->mask_bits == mask_bits)) { 690 (ipatoe->mask_bits == mask_bits)) {
666 list_del(&ipatoe->entry); 691 list_del(&ipatoe->entry);
692 qeth_l3_update_ipato(card);
667 kfree(ipatoe); 693 kfree(ipatoe);
668 } 694 }
669 } 695 }
@@ -1377,6 +1403,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1377 1403
1378 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1404 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1379 memcpy(tmp->mac, buf, sizeof(tmp->mac)); 1405 memcpy(tmp->mac, buf, sizeof(tmp->mac));
1406 tmp->is_multicast = 1;
1380 1407
1381 ipm = qeth_l3_ip_from_hash(card, tmp); 1408 ipm = qeth_l3_ip_from_hash(card, tmp);
1382 if (ipm) { 1409 if (ipm) {
@@ -2918,6 +2945,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
2918 .ndo_stop = qeth_l3_stop, 2945 .ndo_stop = qeth_l3_stop,
2919 .ndo_get_stats = qeth_get_stats, 2946 .ndo_get_stats = qeth_get_stats,
2920 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2947 .ndo_start_xmit = qeth_l3_hard_start_xmit,
2948 .ndo_features_check = qeth_features_check,
2921 .ndo_validate_addr = eth_validate_addr, 2949 .ndo_validate_addr = eth_validate_addr,
2922 .ndo_set_rx_mode = qeth_l3_set_multicast_list, 2950 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
2923 .ndo_do_ioctl = qeth_do_ioctl, 2951 .ndo_do_ioctl = qeth_do_ioctl,
@@ -2958,6 +2986,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2958 card->dev->vlan_features = NETIF_F_SG | 2986 card->dev->vlan_features = NETIF_F_SG |
2959 NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 2987 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2960 NETIF_F_TSO; 2988 NETIF_F_TSO;
2989 card->dev->features |= NETIF_F_SG;
2961 } 2990 }
2962 } 2991 }
2963 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 2992 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2985,8 +3014,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2985 NETIF_F_HW_VLAN_CTAG_RX | 3014 NETIF_F_HW_VLAN_CTAG_RX |
2986 NETIF_F_HW_VLAN_CTAG_FILTER; 3015 NETIF_F_HW_VLAN_CTAG_FILTER;
2987 netif_keep_dst(card->dev); 3016 netif_keep_dst(card->dev);
2988 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * 3017 netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
2989 PAGE_SIZE; 3018 PAGE_SIZE);
2990 3019
2991 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3020 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
2992 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 3021 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf678be..6ea2b528a64e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
370 struct device_attribute *attr, const char *buf, size_t count) 370 struct device_attribute *attr, const char *buf, size_t count)
371{ 371{
372 struct qeth_card *card = dev_get_drvdata(dev); 372 struct qeth_card *card = dev_get_drvdata(dev);
373 struct qeth_ipaddr *addr; 373 bool enable;
374 int i, rc = 0; 374 int rc = 0;
375 375
376 if (!card) 376 if (!card)
377 return -EINVAL; 377 return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
384 } 384 }
385 385
386 if (sysfs_streq(buf, "toggle")) { 386 if (sysfs_streq(buf, "toggle")) {
387 card->ipato.enabled = (card->ipato.enabled)? 0 : 1; 387 enable = !card->ipato.enabled;
388 } else if (sysfs_streq(buf, "1")) { 388 } else if (kstrtobool(buf, &enable)) {
389 card->ipato.enabled = 1;
390 hash_for_each(card->ip_htable, i, addr, hnode) {
391 if ((addr->type == QETH_IP_TYPE_NORMAL) &&
392 qeth_l3_is_addr_covered_by_ipato(card, addr))
393 addr->set_flags |=
394 QETH_IPA_SETIP_TAKEOVER_FLAG;
395 }
396 } else if (sysfs_streq(buf, "0")) {
397 card->ipato.enabled = 0;
398 hash_for_each(card->ip_htable, i, addr, hnode) {
399 if (addr->set_flags &
400 QETH_IPA_SETIP_TAKEOVER_FLAG)
401 addr->set_flags &=
402 ~QETH_IPA_SETIP_TAKEOVER_FLAG;
403 }
404 } else
405 rc = -EINVAL; 389 rc = -EINVAL;
390 goto out;
391 }
392
393 if (card->ipato.enabled != enable) {
394 card->ipato.enabled = enable;
395 spin_lock_bh(&card->ip_lock);
396 qeth_l3_update_ipato(card);
397 spin_unlock_bh(&card->ip_lock);
398 }
406out: 399out:
407 mutex_unlock(&card->conf_mutex); 400 mutex_unlock(&card->conf_mutex);
408 return rc ? rc : count; 401 return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
428 const char *buf, size_t count) 421 const char *buf, size_t count)
429{ 422{
430 struct qeth_card *card = dev_get_drvdata(dev); 423 struct qeth_card *card = dev_get_drvdata(dev);
424 bool invert;
431 int rc = 0; 425 int rc = 0;
432 426
433 if (!card) 427 if (!card)
434 return -EINVAL; 428 return -EINVAL;
435 429
436 mutex_lock(&card->conf_mutex); 430 mutex_lock(&card->conf_mutex);
437 if (sysfs_streq(buf, "toggle")) 431 if (sysfs_streq(buf, "toggle")) {
438 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; 432 invert = !card->ipato.invert4;
439 else if (sysfs_streq(buf, "1")) 433 } else if (kstrtobool(buf, &invert)) {
440 card->ipato.invert4 = 1;
441 else if (sysfs_streq(buf, "0"))
442 card->ipato.invert4 = 0;
443 else
444 rc = -EINVAL; 434 rc = -EINVAL;
435 goto out;
436 }
437
438 if (card->ipato.invert4 != invert) {
439 card->ipato.invert4 = invert;
440 spin_lock_bh(&card->ip_lock);
441 qeth_l3_update_ipato(card);
442 spin_unlock_bh(&card->ip_lock);
443 }
444out:
445 mutex_unlock(&card->conf_mutex); 445 mutex_unlock(&card->conf_mutex);
446 return rc ? rc : count; 446 return rc ? rc : count;
447} 447}
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
607 struct device_attribute *attr, const char *buf, size_t count) 607 struct device_attribute *attr, const char *buf, size_t count)
608{ 608{
609 struct qeth_card *card = dev_get_drvdata(dev); 609 struct qeth_card *card = dev_get_drvdata(dev);
610 bool invert;
610 int rc = 0; 611 int rc = 0;
611 612
612 if (!card) 613 if (!card)
613 return -EINVAL; 614 return -EINVAL;
614 615
615 mutex_lock(&card->conf_mutex); 616 mutex_lock(&card->conf_mutex);
616 if (sysfs_streq(buf, "toggle")) 617 if (sysfs_streq(buf, "toggle")) {
617 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; 618 invert = !card->ipato.invert6;
618 else if (sysfs_streq(buf, "1")) 619 } else if (kstrtobool(buf, &invert)) {
619 card->ipato.invert6 = 1;
620 else if (sysfs_streq(buf, "0"))
621 card->ipato.invert6 = 0;
622 else
623 rc = -EINVAL; 620 rc = -EINVAL;
621 goto out;
622 }
623
624 if (card->ipato.invert6 != invert) {
625 card->ipato.invert6 = invert;
626 spin_lock_bh(&card->ip_lock);
627 qeth_l3_update_ipato(card);
628 spin_unlock_bh(&card->ip_lock);
629 }
630out:
624 mutex_unlock(&card->conf_mutex); 631 mutex_unlock(&card->conf_mutex);
625 return rc ? rc : count; 632 return rc ? rc : count;
626} 633}
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9259039e886d..9dda431ec8f3 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the S/390 specific device drivers 3# Makefile for the S/390 specific device drivers
3# 4#
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 403a639574e5..d52265416da2 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1673,6 +1673,7 @@ struct aac_dev
1673 struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS]; 1673 struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
1674 u8 adapter_shutdown; 1674 u8 adapter_shutdown;
1675 u32 handle_pci_error; 1675 u32 handle_pci_error;
1676 bool init_reset;
1676}; 1677};
1677 1678
1678#define aac_adapter_interrupt(dev) \ 1679#define aac_adapter_interrupt(dev) \
@@ -1724,6 +1725,7 @@ struct aac_dev
1724#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) 1725#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
1725#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) 1726#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
1726#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) 1727#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040)
1728#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080)
1727 1729
1728/* 1730/*
1729 * Define the command values 1731 * Define the command values
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 525a652dab48..80a8cb26cdea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -467,35 +467,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
467 return 0; 467 return 0;
468} 468}
469 469
470#ifdef CONFIG_EEH
471static inline int aac_check_eeh_failure(struct aac_dev *dev)
472{
473 /* Check for an EEH failure for the given
474 * device node. Function eeh_dev_check_failure()
475 * returns 0 if there has not been an EEH error
476 * otherwise returns a non-zero value.
477 *
478 * Need to be called before any PCI operation,
479 * i.e.,before aac_adapter_check_health()
480 */
481 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
482
483 if (eeh_dev_check_failure(edev)) {
484 /* The EEH mechanisms will handle this
485 * error and reset the device if
486 * necessary.
487 */
488 return 1;
489 }
490 return 0;
491}
492#else
493static inline int aac_check_eeh_failure(struct aac_dev *dev)
494{
495 return 0;
496}
497#endif
498
499/* 470/*
500 * Define the highest level of host to adapter communication routines. 471 * Define the highest level of host to adapter communication routines.
501 * These routines will support host to adapter FS commuication. These 472 * These routines will support host to adapter FS commuication. These
@@ -701,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
701 return -ETIMEDOUT; 672 return -ETIMEDOUT;
702 } 673 }
703 674
704 if (aac_check_eeh_failure(dev)) 675 if (unlikely(pci_channel_offline(dev->pdev)))
705 return -EFAULT; 676 return -EFAULT;
706 677
707 if ((blink = aac_adapter_check_health(dev)) > 0) { 678 if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -801,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
801 772
802 spin_unlock_irqrestore(&fibptr->event_lock, flags); 773 spin_unlock_irqrestore(&fibptr->event_lock, flags);
803 774
804 if (aac_check_eeh_failure(dev)) 775 if (unlikely(pci_channel_offline(dev->pdev)))
805 return -EFAULT; 776 return -EFAULT;
806 777
807 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
@@ -1583,6 +1554,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1583 * will ensure that i/o is queisced and the card is flushed in that 1554 * will ensure that i/o is queisced and the card is flushed in that
1584 * case. 1555 * case.
1585 */ 1556 */
1557 aac_free_irq(aac);
1586 aac_fib_map_free(aac); 1558 aac_fib_map_free(aac);
1587 dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, 1559 dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
1588 aac->comm_phys); 1560 aac->comm_phys);
@@ -1590,7 +1562,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1590 aac->comm_phys = 0; 1562 aac->comm_phys = 0;
1591 kfree(aac->queues); 1563 kfree(aac->queues);
1592 aac->queues = NULL; 1564 aac->queues = NULL;
1593 aac_free_irq(aac);
1594 kfree(aac->fsa_dev); 1565 kfree(aac->fsa_dev);
1595 aac->fsa_dev = NULL; 1566 aac->fsa_dev = NULL;
1596 1567
@@ -2511,8 +2482,8 @@ int aac_command_thread(void *data)
2511 /* Synchronize our watches */ 2482 /* Synchronize our watches */
2512 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) 2483 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2513 && (now.tv_nsec > (NSEC_PER_SEC / HZ))) 2484 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2514 difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) 2485 difference = HZ + HZ / 2 -
2515 + NSEC_PER_SEC / 2) / NSEC_PER_SEC; 2486 now.tv_nsec / (NSEC_PER_SEC / HZ);
2516 else { 2487 else {
2517 if (now.tv_nsec > NSEC_PER_SEC / 2) 2488 if (now.tv_nsec > NSEC_PER_SEC / 2)
2518 ++now.tv_sec; 2489 ++now.tv_sec;
@@ -2536,6 +2507,10 @@ int aac_command_thread(void *data)
2536 if (kthread_should_stop()) 2507 if (kthread_should_stop())
2537 break; 2508 break;
2538 2509
2510 /*
2511 * we probably want usleep_range() here instead of the
2512 * jiffies computation
2513 */
2539 schedule_timeout(difference); 2514 schedule_timeout(difference);
2540 2515
2541 if (kthread_should_stop()) 2516 if (kthread_should_stop())
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c9252b138c1f..d55332de08f9 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
1037 info = &aac->hba_map[bus][cid]; 1037 info = &aac->hba_map[bus][cid];
1038 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || 1038 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
1039 info->devtype != AAC_DEVTYPE_NATIVE_RAW) { 1039 info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
1040 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 1040 fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
1041 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; 1041 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1042 } 1042 }
1043 } 1043 }
@@ -1680,6 +1680,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1680 aac->cardtype = index; 1680 aac->cardtype = index;
1681 INIT_LIST_HEAD(&aac->entry); 1681 INIT_LIST_HEAD(&aac->entry);
1682 1682
1683 if (aac_reset_devices || reset_devices)
1684 aac->init_reset = true;
1685
1683 aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); 1686 aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
1684 if (!aac->fibs) 1687 if (!aac->fibs)
1685 goto out_free_host; 1688 goto out_free_host;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 93ef7c37e568..620166694171 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
561 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 561 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
562 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; 562 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
563 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 563 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
564 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && 564
565 !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) 565 if (((status & 0x0c) != 0x0c) || dev->init_reset) {
566 /* Make sure the Hardware FIFO is empty */ 566 dev->init_reset = false;
567 while ((++restart < 512) && 567 if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
568 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); 568 /* Make sure the Hardware FIFO is empty */
569 while ((++restart < 512) &&
570 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
571 }
572 }
573
569 /* 574 /*
570 * Check to see if the board panic'd while booting. 575 * Check to see if the board panic'd while booting.
571 */ 576 */
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 0c9361c87ec8..fde6b6aa86e3 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
868 /* Failure to reset here is an option ... */ 868 /* Failure to reset here is an option ... */
869 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 869 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
870 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 870 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
871 if ((aac_reset_devices || reset_devices) && 871
872 !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) 872 if (dev->init_reset) {
873 ++restart; 873 dev->init_reset = false;
874 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
875 ++restart;
876 }
877
874 /* 878 /*
875 * Check to see if the board panic'd while booting. 879 * Check to see if the board panic'd while booting.
876 */ 880 */
@@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
1014 /* Failure to reset here is an option ... */ 1018 /* Failure to reset here is an option ... */
1015 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 1019 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1016 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 1020 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1017 if ((aac_reset_devices || reset_devices) && 1021
1018 !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) 1022 if (dev->init_reset) {
1019 ++restart; 1023 dev->init_reset = false;
1024 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
1025 ++restart;
1026 }
1027
1020 /* 1028 /*
1021 * Check to see if flash update is running. 1029 * Check to see if flash update is running.
1022 * Wait for the adapter to be up and running. Wait up to 5 minutes 1030 * Wait for the adapter to be up and running. Wait up to 5 minutes
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 72ca2a2e08e2..b2fa195adc7a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
3135 struct fc_bsg_request *bsg_request = job->request; 3135 struct fc_bsg_request *bsg_request = job->request;
3136 struct fc_bsg_reply *bsg_reply = job->reply; 3136 struct fc_bsg_reply *bsg_reply = job->reply;
3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3138 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3139 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3139 struct bfad_s *bfad = im_port->bfad; 3140 struct bfad_s *bfad = im_port->bfad;
3140 void *payload_kbuf; 3141 void *payload_kbuf;
3141 int rc = -EINVAL; 3142 int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
3350bfad_im_bsg_els_ct_request(struct bsg_job *job) 3351bfad_im_bsg_els_ct_request(struct bsg_job *job)
3351{ 3352{
3352 struct bfa_bsg_data *bsg_data; 3353 struct bfa_bsg_data *bsg_data;
3353 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3354 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3355 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3354 struct bfad_s *bfad = im_port->bfad; 3356 struct bfad_s *bfad = im_port->bfad;
3355 bfa_bsg_fcpt_t *bsg_fcpt; 3357 bfa_bsg_fcpt_t *bsg_fcpt;
3356 struct bfad_fcxp *drv_fcxp; 3358 struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a4ec80..c05d6e91e4bd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
547 struct device *dev) 547 struct device *dev)
548{ 548{
549 struct bfad_im_port_pointer *im_portp;
549 int error = 1; 550 int error = 1;
550 551
551 mutex_lock(&bfad_mutex); 552 mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
564 goto out_free_idr; 565 goto out_free_idr;
565 } 566 }
566 567
567 im_port->shost->hostdata[0] = (unsigned long)im_port; 568 im_portp = shost_priv(im_port->shost);
569 im_portp->p = im_port;
568 im_port->shost->unique_id = im_port->idr_id; 570 im_port->shost->unique_id = im_port->idr_id;
569 im_port->shost->this_id = -1; 571 im_port->shost->this_id = -1;
570 im_port->shost->max_id = MAX_FCP_TARGET; 572 im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
748 750
749 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 751 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
750 752
751 return scsi_host_alloc(sht, sizeof(unsigned long)); 753 return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
752} 754}
753 755
754void 756void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c81ec2a77ef5..06ce4ba2b7bc 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
69 struct fc_vport *fc_vport; 69 struct fc_vport *fc_vport;
70}; 70};
71 71
72struct bfad_im_port_pointer {
73 struct bfad_im_port_s *p;
74};
75
76static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
77{
78 struct bfad_im_port_pointer *im_portp = shost_priv(host);
79 return im_portp->p;
80}
81
72enum bfad_itnim_state { 82enum bfad_itnim_state {
73 ITNIM_STATE_NONE, 83 ITNIM_STATE_NONE,
74 ITNIM_STATE_ONLINE, 84 ITNIM_STATE_ONLINE,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da46052e179..21be672679fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
904 case ELS_FLOGI: 904 case ELS_FLOGI:
905 if (!lport->point_to_multipoint) 905 if (!lport->point_to_multipoint)
906 fc_lport_recv_flogi_req(lport, fp); 906 fc_lport_recv_flogi_req(lport, fp);
907 else
908 fc_rport_recv_req(lport, fp);
907 break; 909 break;
908 case ELS_LOGO: 910 case ELS_LOGO:
909 if (fc_frame_sid(fp) == FC_FID_FLOGI) 911 if (fc_frame_sid(fp) == FC_FID_FLOGI)
910 fc_lport_recv_logo_req(lport, fp); 912 fc_lport_recv_logo_req(lport, fp);
913 else
914 fc_rport_recv_req(lport, fp);
911 break; 915 break;
912 case ELS_RSCN: 916 case ELS_RSCN:
913 lport->tt.disc_recv_req(lport, fp); 917 lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ca1566237ae7..3183d63de4da 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2145 struct sas_rphy *rphy) 2145 struct sas_rphy *rphy)
2146{ 2146{
2147 struct domain_device *dev; 2147 struct domain_device *dev;
2148 unsigned int reslen = 0; 2148 unsigned int rcvlen = 0;
2149 int ret = -EINVAL; 2149 int ret = -EINVAL;
2150 2150
2151 /* no rphy means no smp target support (ie aic94xx host) */ 2151 /* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2179 2179
2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list, 2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
2181 job->reply_payload.sg_list); 2181 job->reply_payload.sg_list);
2182 if (ret > 0) { 2182 if (ret >= 0) {
2183 /* positive number is the untransferred residual */ 2183 /* bsg_job_done() requires the length received */
2184 reslen = ret; 2184 rcvlen = job->reply_payload.payload_len - ret;
2185 ret = 0; 2185 ret = 0;
2186 } 2186 }
2187 2187
2188out: 2188out:
2189 bsg_job_done(job, ret, reslen); 2189 bsg_job_done(job, ret, rcvlen);
2190} 2190}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b728c57..c9406852c3e9 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
486 486
487int sas_eh_abort_handler(struct scsi_cmnd *cmd) 487int sas_eh_abort_handler(struct scsi_cmnd *cmd)
488{ 488{
489 int res; 489 int res = TMF_RESP_FUNC_FAILED;
490 struct sas_task *task = TO_SAS_TASK(cmd); 490 struct sas_task *task = TO_SAS_TASK(cmd);
491 struct Scsi_Host *host = cmd->device->host; 491 struct Scsi_Host *host = cmd->device->host;
492 struct domain_device *dev = cmd_to_domain_dev(cmd);
492 struct sas_internal *i = to_sas_internal(host->transportt); 493 struct sas_internal *i = to_sas_internal(host->transportt);
494 unsigned long flags;
493 495
494 if (!i->dft->lldd_abort_task) 496 if (!i->dft->lldd_abort_task)
495 return FAILED; 497 return FAILED;
496 498
497 res = i->dft->lldd_abort_task(task); 499 spin_lock_irqsave(host->host_lock, flags);
500 /* We cannot do async aborts for SATA devices */
501 if (dev_is_sata(dev) && !host->host_eh_scheduled) {
502 spin_unlock_irqrestore(host->host_lock, flags);
503 return FAILED;
504 }
505 spin_unlock_irqrestore(host->host_lock, flags);
506
507 if (task)
508 res = i->dft->lldd_abort_task(task);
509 else
510 SAS_DPRINTK("no task to abort\n");
498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 511 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
499 return SUCCESS; 512 return SUCCESS;
500 513
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb049b4a..87c08ff37ddd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
755 if (rc < 0) { 755 if (rc < 0) {
756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n", 757 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id, 758 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index, 759 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index); 760 rqb_entry->hrq->hba_index);
761 (rqbp->rqb_free_buffer)(phba, rqb_entry);
762 } else { 762 } else {
763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
764 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index a4f28b7e4c65..e18877177f1b 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
1576 return req; 1576 return req;
1577 1577
1578 for_each_bio(bio) { 1578 for_each_bio(bio) {
1579 ret = blk_rq_append_bio(req, bio); 1579 struct bio *bounce_bio = bio;
1580
1581 ret = blk_rq_append_bio(req, &bounce_bio);
1580 if (ret) 1582 if (ret)
1581 return ERR_PTR(ret); 1583 return ERR_PTR(ret);
1582 } 1584 }
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c03f2c1..c3765d29fd3f 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
8{ 8{
9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); 9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); 10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
11 char buf[80]; 11 const u8 *const cdb = READ_ONCE(cmd->cmnd);
12 char buf[80] = "(?)";
12 13
13 __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); 14 if (cdb)
15 __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
14 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, 16 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
15 cmd->retries, msecs / 1000, msecs % 1000); 17 cmd->retries, msecs / 1000, msecs % 1000);
16} 18}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 78d4aa8df675..dfb8da83fa50 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
34}; 34};
35 35
36 36
37static const char spaces[] = " "; /* 16 of them */
38static blist_flags_t scsi_default_dev_flags; 37static blist_flags_t scsi_default_dev_flags;
39static LIST_HEAD(scsi_dev_info_list); 38static LIST_HEAD(scsi_dev_info_list);
40static char scsi_dev_flags[256]; 39static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
298 size_t from_length; 297 size_t from_length;
299 298
300 from_length = strlen(from); 299 from_length = strlen(from);
301 strncpy(to, from, min(to_length, from_length)); 300 /* This zero-pads the destination */
302 if (from_length < to_length) { 301 strncpy(to, from, to_length);
303 if (compatible) { 302 if (from_length < to_length && !compatible) {
304 /* 303 /*
305 * NUL terminate the string if it is short. 304 * space pad the string if it is short.
306 */ 305 */
307 to[from_length] = '\0'; 306 memset(&to[from_length], ' ', to_length - from_length);
308 } else {
309 /*
310 * space pad the string if it is short.
311 */
312 strncpy(&to[from_length], spaces,
313 to_length - from_length);
314 }
315 } 307 }
316 if (from_length > to_length) 308 if (from_length > to_length)
317 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 309 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -382,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
382 model, compatible); 374 model, compatible);
383 375
384 if (strflags) 376 if (strflags)
385 devinfo->flags = simple_strtoul(strflags, NULL, 0); 377 flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
386 else 378 devinfo->flags = flags;
387 devinfo->flags = flags;
388
389 devinfo->compatible = compatible; 379 devinfo->compatible = compatible;
390 380
391 if (compatible) 381 if (compatible)
@@ -458,7 +448,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
458 /* 448 /*
459 * vendor strings must be an exact match 449 * vendor strings must be an exact match
460 */ 450 */
461 if (vmax != strlen(devinfo->vendor) || 451 if (vmax != strnlen(devinfo->vendor,
452 sizeof(devinfo->vendor)) ||
462 memcmp(devinfo->vendor, vskip, vmax)) 453 memcmp(devinfo->vendor, vskip, vmax))
463 continue; 454 continue;
464 455
@@ -466,7 +457,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
466 * @model specifies the full string, and 457 * @model specifies the full string, and
467 * must be larger or equal to devinfo->model 458 * must be larger or equal to devinfo->model
468 */ 459 */
469 mlen = strlen(devinfo->model); 460 mlen = strnlen(devinfo->model, sizeof(devinfo->model));
470 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) 461 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
471 continue; 462 continue;
472 return devinfo; 463 return devinfo;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1cbc497e00bd..d9ca1dfab154 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1967out_put_device: 1967out_put_device:
1968 put_device(&sdev->sdev_gendev); 1968 put_device(&sdev->sdev_gendev);
1969out: 1969out:
1970 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1971 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1970 return false; 1972 return false;
1971} 1973}
1972 1974
@@ -2148,11 +2150,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2148 q->limits.cluster = 0; 2150 q->limits.cluster = 0;
2149 2151
2150 /* 2152 /*
2151 * set a reasonable default alignment on word boundaries: the 2153 * Set a reasonable default alignment: The larger of 32-byte (dword),
2152 * host and device may alter it using 2154 * which is a common minimum for HBAs, and the minimum DMA alignment,
2153 * blk_queue_update_dma_alignment() later. 2155 * which is set by the platform.
2156 *
2157 * Devices that require a bigger alignment can increase it later.
2154 */ 2158 */
2155 blk_queue_dma_alignment(q, 0x03); 2159 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
2156} 2160}
2157EXPORT_SYMBOL_GPL(__scsi_init_queue); 2161EXPORT_SYMBOL_GPL(__scsi_init_queue);
2158 2162
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index be5e919db0e8..0880d975eed3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
770 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 770 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
771 **/ 771 **/
772static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 772static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
773 int *bflags, int async) 773 blist_flags_t *bflags, int async)
774{ 774{
775 int ret; 775 int ret;
776 776
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1049 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 1049 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1050 **/ 1050 **/
1051static int scsi_probe_and_add_lun(struct scsi_target *starget, 1051static int scsi_probe_and_add_lun(struct scsi_target *starget,
1052 u64 lun, int *bflagsp, 1052 u64 lun, blist_flags_t *bflagsp,
1053 struct scsi_device **sdevp, 1053 struct scsi_device **sdevp,
1054 enum scsi_scan_mode rescan, 1054 enum scsi_scan_mode rescan,
1055 void *hostdata) 1055 void *hostdata)
1056{ 1056{
1057 struct scsi_device *sdev; 1057 struct scsi_device *sdev;
1058 unsigned char *result; 1058 unsigned char *result;
1059 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256; 1059 blist_flags_t bflags;
1060 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1060 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1061 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1061 1062
1062 /* 1063 /*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1201 * Modifies sdevscan->lun. 1202 * Modifies sdevscan->lun.
1202 **/ 1203 **/
1203static void scsi_sequential_lun_scan(struct scsi_target *starget, 1204static void scsi_sequential_lun_scan(struct scsi_target *starget,
1204 int bflags, int scsi_level, 1205 blist_flags_t bflags, int scsi_level,
1205 enum scsi_scan_mode rescan) 1206 enum scsi_scan_mode rescan)
1206{ 1207{
1207 uint max_dev_lun; 1208 uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1292 * 0: scan completed (or no memory, so further scanning is futile) 1293 * 0: scan completed (or no memory, so further scanning is futile)
1293 * 1: could not scan with REPORT LUN 1294 * 1: could not scan with REPORT LUN
1294 **/ 1295 **/
1295static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, 1296static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1296 enum scsi_scan_mode rescan) 1297 enum scsi_scan_mode rescan)
1297{ 1298{
1298 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 1299 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1538 unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1539 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1539{ 1540{
1540 struct Scsi_Host *shost = dev_to_shost(parent); 1541 struct Scsi_Host *shost = dev_to_shost(parent);
1541 int bflags = 0; 1542 blist_flags_t bflags = 0;
1542 int res; 1543 int res;
1543 struct scsi_target *starget; 1544 struct scsi_target *starget;
1544 1545
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 50e7d7e4a861..26ce17178401 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
967} 967}
968static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); 968static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
969 969
970#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name 970#define BLIST_FLAG_NAME(name) \
971 [ilog2((__force unsigned int)BLIST_##name)] = #name
971static const char *const sdev_bflags_name[] = { 972static const char *const sdev_bflags_name[] = {
972#include "scsi_devinfo_tbl.c" 973#include "scsi_devinfo_tbl.c"
973}; 974};
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
984 for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) { 985 for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
985 const char *name = NULL; 986 const char *name = NULL;
986 987
987 if (!(sdev->sdev_bflags & BIT(i))) 988 if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
988 continue; 989 continue;
989 if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i]) 990 if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
990 name = sdev_bflags_name[i]; 991 name = sdev_bflags_name[i];
@@ -1414,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
1414 * check. 1415 * check.
1415 */ 1416 */
1416 if (sdev->channel != starget->channel || 1417 if (sdev->channel != starget->channel ||
1417 sdev->id != starget->id || 1418 sdev->id != starget->id)
1419 continue;
1420 if (sdev->sdev_state == SDEV_DEL ||
1421 sdev->sdev_state == SDEV_CANCEL ||
1418 !get_device(&sdev->sdev_gendev)) 1422 !get_device(&sdev->sdev_gendev))
1419 continue; 1423 continue;
1420 spin_unlock_irqrestore(shost->host_lock, flags); 1424 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index d0219e36080c..10ebb213ddb3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -50,14 +50,14 @@
50 50
51/* Our blacklist flags */ 51/* Our blacklist flags */
52enum { 52enum {
53 SPI_BLIST_NOIUS = 0x1, 53 SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
54}; 54};
55 55
56/* blacklist table, modelled on scsi_devinfo.c */ 56/* blacklist table, modelled on scsi_devinfo.c */
57static struct { 57static struct {
58 char *vendor; 58 char *vendor;
59 char *model; 59 char *model;
60 unsigned flags; 60 blist_flags_t flags;
61} spi_static_device_list[] __initdata = { 61} spi_static_device_list[] __initdata = {
62 {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, 62 {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
63 {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, 63 {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +221,11 @@ static int spi_device_configure(struct transport_container *tc,
221{ 221{
222 struct scsi_device *sdev = to_scsi_device(dev); 222 struct scsi_device *sdev = to_scsi_device(dev);
223 struct scsi_target *starget = sdev->sdev_target; 223 struct scsi_target *starget = sdev->sdev_target;
224 unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], 224 blist_flags_t bflags;
225 &sdev->inquiry[16], 225
226 SCSI_DEVINFO_SPI); 226 bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
227 &sdev->inquiry[16],
228 SCSI_DEVINFO_SPI);
227 229
228 /* Populate the target capability fields with the values 230 /* Populate the target capability fields with the values
229 * gleaned from the device inquiry */ 231 * gleaned from the device inquiry */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 24fe68522716..a028ab3322a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1312static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1312static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1313{ 1313{
1314 struct request *rq = SCpnt->request; 1314 struct request *rq = SCpnt->request;
1315 u8 *cmnd;
1315 1316
1316 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) 1317 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
1317 sd_zbc_write_unlock_zone(SCpnt); 1318 sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1320 __free_page(rq->special_vec.bv_page); 1321 __free_page(rq->special_vec.bv_page);
1321 1322
1322 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1323 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1323 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1324 cmnd = SCpnt->cmnd;
1324 SCpnt->cmnd = NULL; 1325 SCpnt->cmnd = NULL;
1325 SCpnt->cmd_len = 0; 1326 SCpnt->cmd_len = 0;
1327 mempool_free(cmnd, sd_cdb_pool);
1326 } 1328 }
1327} 1329}
1328 1330
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1b06cf0375dc..3b3d1d050cac 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
953 case TEST_UNIT_READY: 953 case TEST_UNIT_READY:
954 break; 954 break;
955 default: 955 default:
956 set_host_byte(scmnd, DID_TARGET_FAILURE); 956 set_host_byte(scmnd, DID_ERROR);
957 } 957 }
958 break; 958 break;
959 case SRB_STATUS_INVALID_LUN: 959 case SRB_STATUS_INVALID_LUN:
960 set_host_byte(scmnd, DID_NO_CONNECT);
960 do_work = true; 961 do_work = true;
961 process_err_fn = storvsc_remove_lun; 962 process_err_fn = storvsc_remove_lun;
962 break; 963 break;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 011c3369082c..a355d989b414 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6559,12 +6559,15 @@ static int ufshcd_config_vreg(struct device *dev,
6559 struct ufs_vreg *vreg, bool on) 6559 struct ufs_vreg *vreg, bool on)
6560{ 6560{
6561 int ret = 0; 6561 int ret = 0;
6562 struct regulator *reg = vreg->reg; 6562 struct regulator *reg;
6563 const char *name = vreg->name; 6563 const char *name;
6564 int min_uV, uA_load; 6564 int min_uV, uA_load;
6565 6565
6566 BUG_ON(!vreg); 6566 BUG_ON(!vreg);
6567 6567
6568 reg = vreg->reg;
6569 name = vreg->name;
6570
6568 if (regulator_count_voltages(reg) > 0) { 6571 if (regulator_count_voltages(reg) > 0) {
6569 min_uV = on ? vreg->min_uV : 0; 6572 min_uV = on ? vreg->min_uV : 0;
6570 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); 6573 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 89f4cf507be6..f2d8c3c53ea4 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -20,8 +20,8 @@
20#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8 20#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
21 21
22#define SOCINFO_MAJOR GENMASK(31, 24) 22#define SOCINFO_MAJOR GENMASK(31, 24)
23#define SOCINFO_MINOR GENMASK(23, 16) 23#define SOCINFO_PACK GENMASK(23, 16)
24#define SOCINFO_PACK GENMASK(15, 8) 24#define SOCINFO_MINOR GENMASK(15, 8)
25#define SOCINFO_MISC GENMASK(7, 0) 25#define SOCINFO_MISC GENMASK(7, 0)
26 26
27static const struct meson_gx_soc_id { 27static const struct meson_gx_soc_id {
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 77fe55ce790c..d65345312527 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -79,6 +79,7 @@
79#define A3700_SPI_BYTE_LEN BIT(5) 79#define A3700_SPI_BYTE_LEN BIT(5)
80#define A3700_SPI_CLK_PRESCALE BIT(0) 80#define A3700_SPI_CLK_PRESCALE BIT(0)
81#define A3700_SPI_CLK_PRESCALE_MASK (0x1f) 81#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
82#define A3700_SPI_CLK_EVEN_OFFS (0x10)
82 83
83#define A3700_SPI_WFIFO_THRS_BIT 28 84#define A3700_SPI_WFIFO_THRS_BIT 28
84#define A3700_SPI_RFIFO_THRS_BIT 24 85#define A3700_SPI_RFIFO_THRS_BIT 24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
220 221
221 prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); 222 prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
222 223
224 /* For prescaler values over 15, we can only set it by steps of 2.
225 * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
226 * 30. We only use this range from 16 to 30.
227 */
228 if (prescale > 15)
229 prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
230
223 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 231 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
224 val = val & ~A3700_SPI_CLK_PRESCALE_MASK; 232 val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
225 233
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f95da364c283..669470971023 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
1661 pm_runtime_get_sync(&pdev->dev); 1661 pm_runtime_get_sync(&pdev->dev);
1662 1662
1663 /* reset the hardware and block queue progress */ 1663 /* reset the hardware and block queue progress */
1664 spin_lock_irq(&as->lock);
1665 if (as->use_dma) { 1664 if (as->use_dma) {
1666 atmel_spi_stop_dma(master); 1665 atmel_spi_stop_dma(master);
1667 atmel_spi_release_dma(master); 1666 atmel_spi_release_dma(master);
1668 } 1667 }
1669 1668
1669 spin_lock_irq(&as->lock);
1670 spi_writel(as, CR, SPI_BIT(SWRST)); 1670 spi_writel(as, CR, SPI_BIT(SWRST));
1671 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1671 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1672 spi_readl(as, SR); 1672 spi_readl(as, SR);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2ce875764ca6..0835a8d88fb8 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
377 /* Sets SPCMD */ 377 /* Sets SPCMD */
378 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); 378 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
379 379
380 /* Enables SPI function in master mode */ 380 /* Sets RSPI mode */
381 rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR); 381 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
382 382
383 return 0; 383 return 0;
384} 384}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index c5cd635c28f3..41410031f8e9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -525,7 +525,7 @@ err_free_master:
525 525
526static int sun4i_spi_remove(struct platform_device *pdev) 526static int sun4i_spi_remove(struct platform_device *pdev)
527{ 527{
528 pm_runtime_disable(&pdev->dev); 528 pm_runtime_force_suspend(&pdev->dev);
529 529
530 return 0; 530 return 0;
531} 531}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b93dfc..e0b9fe1d0e37 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
271 while (remaining_words) { 271 while (remaining_words) {
272 int n_words, tx_words, rx_words; 272 int n_words, tx_words, rx_words;
273 u32 sr; 273 u32 sr;
274 int stalled;
274 275
275 n_words = min(remaining_words, xspi->buffer_size); 276 n_words = min(remaining_words, xspi->buffer_size);
276 277
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
299 300
300 /* Read out all the data from the Rx FIFO */ 301 /* Read out all the data from the Rx FIFO */
301 rx_words = n_words; 302 rx_words = n_words;
303 stalled = 10;
302 while (rx_words) { 304 while (rx_words) {
305 if (rx_words == n_words && !(stalled--) &&
306 !(sr & XSPI_SR_TX_EMPTY_MASK) &&
307 (sr & XSPI_SR_RX_EMPTY_MASK)) {
308 dev_err(&spi->dev,
309 "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
310 xspi_init_hw(xspi);
311 return -EIO;
312 }
313
303 if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { 314 if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
304 xilinx_spi_rx(xspi); 315 xilinx_spi_rx(xspi);
305 rx_words--; 316 rx_words--;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219c2324..71c73766ee22 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -32,7 +32,7 @@ config SSB_BLOCKIO
32 32
33config SSB_PCIHOST_POSSIBLE 33config SSB_PCIHOST_POSSIBLE
34 bool 34 bool
35 depends on SSB && (PCI = y || PCI = SSB) 35 depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
36 default y 36 default y
37 37
38config SSB_PCIHOST 38config SSB_PCIHOST
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df14c9d..372ce9913e6d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
765 break; 765 break;
766 case ASHMEM_SET_SIZE: 766 case ASHMEM_SET_SIZE:
767 ret = -EINVAL; 767 ret = -EINVAL;
768 mutex_lock(&ashmem_mutex);
768 if (!asma->file) { 769 if (!asma->file) {
769 ret = 0; 770 ret = 0;
770 asma->size = (size_t)arg; 771 asma->size = (size_t)arg;
771 } 772 }
773 mutex_unlock(&ashmem_mutex);
772 break; 774 break;
773 case ASHMEM_GET_SIZE: 775 case ASHMEM_GET_SIZE:
774 ret = asma->size; 776 ret = asma->size;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index a517b2d29f1b..8f6494158d3d 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
37 37
38config ION_CMA_HEAP 38config ION_CMA_HEAP
39 bool "Ion CMA heap support" 39 bool "Ion CMA heap support"
40 depends on ION && CMA 40 depends on ION && DMA_CMA
41 help 41 help
42 Choose this option to enable CMA heaps with Ion. This heap is backed 42 Choose this option to enable CMA heaps with Ion. This heap is backed
43 by the Contiguous Memory Allocator (CMA). If your system has these 43 by the Contiguous Memory Allocator (CMA). If your system has these
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a7d9b0e98572..f480885e346b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
346 mutex_lock(&buffer->lock); 346 mutex_lock(&buffer->lock);
347 list_for_each_entry(a, &buffer->attachments, list) { 347 list_for_each_entry(a, &buffer->attachments, list) {
348 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, 348 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
349 DMA_BIDIRECTIONAL); 349 direction);
350 } 350 }
351 mutex_unlock(&buffer->lock); 351 mutex_unlock(&buffer->lock);
352 352
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
368 mutex_lock(&buffer->lock); 368 mutex_lock(&buffer->lock);
369 list_for_each_entry(a, &buffer->attachments, list) { 369 list_for_each_entry(a, &buffer->attachments, list) {
370 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, 370 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
371 DMA_BIDIRECTIONAL); 371 direction);
372 } 372 }
373 mutex_unlock(&buffer->lock); 373 mutex_unlock(&buffer->lock);
374 374
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dd5545d9990a..86196ffd2faf 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
39 struct ion_cma_heap *cma_heap = to_cma_heap(heap); 39 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
40 struct sg_table *table; 40 struct sg_table *table;
41 struct page *pages; 41 struct page *pages;
42 unsigned long size = PAGE_ALIGN(len);
43 unsigned long nr_pages = size >> PAGE_SHIFT;
44 unsigned long align = get_order(size);
42 int ret; 45 int ret;
43 46
44 pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); 47 if (align > CONFIG_CMA_ALIGNMENT)
48 align = CONFIG_CMA_ALIGNMENT;
49
50 pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
45 if (!pages) 51 if (!pages)
46 return -ENOMEM; 52 return -ENOMEM;
47 53
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
53 if (ret) 59 if (ret)
54 goto free_mem; 60 goto free_mem;
55 61
56 sg_set_page(table->sgl, pages, len, 0); 62 sg_set_page(table->sgl, pages, size, 0);
57 63
58 buffer->priv_virt = pages; 64 buffer->priv_virt = pages;
59 buffer->sg_table = table; 65 buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
62free_mem: 68free_mem:
63 kfree(table); 69 kfree(table);
64err: 70err:
65 cma_release(cma_heap->cma, pages, buffer->size); 71 cma_release(cma_heap->cma, pages, nr_pages);
66 return -ENOMEM; 72 return -ENOMEM;
67} 73}
68 74
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
70{ 76{
71 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); 77 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
72 struct page *pages = buffer->priv_virt; 78 struct page *pages = buffer->priv_virt;
79 unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
73 80
74 /* release memory */ 81 /* release memory */
75 cma_release(cma_heap->cma, pages, buffer->size); 82 cma_release(cma_heap->cma, pages, nr_pages);
76 /* release sg table */ 83 /* release sg table */
77 sg_free_table(buffer->sg_table); 84 sg_free_table(buffer->sg_table);
78 kfree(buffer->sg_table); 85 kfree(buffer->sg_table);
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index d79090ed7f9c..2035835b62dc 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1769 struct device *dev = drvdata_to_dev(ctx->drvdata); 1769 struct device *dev = drvdata_to_dev(ctx->drvdata);
1770 struct ahash_req_ctx *state = ahash_request_ctx(req); 1770 struct ahash_req_ctx *state = ahash_request_ctx(req);
1771 u32 tmp; 1771 u32 tmp;
1772 int rc; 1772 int rc = 0;
1773 1773
1774 memcpy(&tmp, in, sizeof(u32)); 1774 memcpy(&tmp, in, sizeof(u32));
1775 if (tmp != CC_EXPORT_MAGIC) { 1775 if (tmp != CC_EXPORT_MAGIC) {
@@ -1778,9 +1778,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1778 } 1778 }
1779 in += sizeof(u32); 1779 in += sizeof(u32);
1780 1780
1781 rc = ssi_hash_init(state, ctx); 1781 /* call init() to allocate bufs if the user hasn't */
1782 if (rc) 1782 if (!state->digest_buff) {
1783 goto out; 1783 rc = ssi_hash_init(state, ctx);
1784 if (rc)
1785 goto out;
1786 }
1784 1787
1785 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr, 1788 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1786 ctx->inter_digestsize, DMA_BIDIRECTIONAL); 1789 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 2d62a8c57332..ae6ed96d7874 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -361,3 +361,8 @@ static struct comedi_driver ni_atmio_driver = {
361 .detach = ni_atmio_detach, 361 .detach = ni_atmio_detach,
362}; 362};
363module_comedi_driver(ni_atmio_driver); 363module_comedi_driver(ni_atmio_driver);
364
365MODULE_AUTHOR("Comedi http://www.comedi.org");
366MODULE_DESCRIPTION("Comedi low-level driver");
367MODULE_LICENSE("GPL");
368
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 986c2a40d978..8267119ccc8e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
487 ksocknal_nid2peerlist(id.nid)); 487 ksocknal_nid2peerlist(id.nid));
488 } 488 }
489 489
490 route2 = NULL;
491 list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) { 490 list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
492 if (route2->ksnr_ipaddr == ipaddr) 491 if (route2->ksnr_ipaddr == ipaddr) {
493 break; 492 /* Route already exists, use the old one */
494 493 ksocknal_route_decref(route);
495 route2 = NULL; 494 route2->ksnr_share_count++;
496 } 495 goto out;
497 if (!route2) { 496 }
498 ksocknal_add_route_locked(peer, route);
499 route->ksnr_share_count++;
500 } else {
501 ksocknal_route_decref(route);
502 route2->ksnr_share_count++;
503 } 497 }
504 498 /* Route doesn't already exist, add the new one */
499 ksocknal_add_route_locked(peer, route);
500 route->ksnr_share_count++;
501out:
505 write_unlock_bh(&ksocknal_data.ksnd_global_lock); 502 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
506 503
507 return 0; 504 return 0;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 539a26444f31..7d49d4865298 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -71,16 +71,12 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
71 } 71 }
72 72
73 sock_filp = sock_alloc_file(sock, 0, NULL); 73 sock_filp = sock_alloc_file(sock, 0, NULL);
74 if (IS_ERR(sock_filp)) { 74 if (IS_ERR(sock_filp))
75 sock_release(sock); 75 return PTR_ERR(sock_filp);
76 rc = PTR_ERR(sock_filp);
77 goto out;
78 }
79 76
80 rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg); 77 rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg);
81 78
82 fput(sock_filp); 79 fput(sock_filp);
83out:
84 return rc; 80 return rc;
85} 81}
86 82
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index b5533197226d..15fa5679bae7 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -208,14 +208,14 @@ struct atomisp_dis_vector {
208}; 208};
209 209
210 210
211/** DVS 2.0 Coefficient types. This structure contains 4 pointers to 211/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
212 * arrays that contain the coeffients for each type. 212 * arrays that contain the coeffients for each type.
213 */ 213 */
214struct atomisp_dvs2_coef_types { 214struct atomisp_dvs2_coef_types {
215 short __user *odd_real; /**< real part of the odd coefficients*/ 215 short __user *odd_real; /** real part of the odd coefficients*/
216 short __user *odd_imag; /**< imaginary part of the odd coefficients*/ 216 short __user *odd_imag; /** imaginary part of the odd coefficients*/
217 short __user *even_real;/**< real part of the even coefficients*/ 217 short __user *even_real;/** real part of the even coefficients*/
218 short __user *even_imag;/**< imaginary part of the even coefficients*/ 218 short __user *even_imag;/** imaginary part of the even coefficients*/
219}; 219};
220 220
221/* 221/*
@@ -223,10 +223,10 @@ struct atomisp_dvs2_coef_types {
223 * arrays that contain the statistics for each type. 223 * arrays that contain the statistics for each type.
224 */ 224 */
225struct atomisp_dvs2_stat_types { 225struct atomisp_dvs2_stat_types {
226 int __user *odd_real; /**< real part of the odd statistics*/ 226 int __user *odd_real; /** real part of the odd statistics*/
227 int __user *odd_imag; /**< imaginary part of the odd statistics*/ 227 int __user *odd_imag; /** imaginary part of the odd statistics*/
228 int __user *even_real;/**< real part of the even statistics*/ 228 int __user *even_real;/** real part of the even statistics*/
229 int __user *even_imag;/**< imaginary part of the even statistics*/ 229 int __user *even_imag;/** imaginary part of the even statistics*/
230}; 230};
231 231
232struct atomisp_dis_coefficients { 232struct atomisp_dis_coefficients {
@@ -390,16 +390,16 @@ struct atomisp_metadata_config {
390 * Generic resolution structure. 390 * Generic resolution structure.
391 */ 391 */
392struct atomisp_resolution { 392struct atomisp_resolution {
393 uint32_t width; /**< Width */ 393 uint32_t width; /** Width */
394 uint32_t height; /**< Height */ 394 uint32_t height; /** Height */
395}; 395};
396 396
397/* 397/*
398 * This specifies the coordinates (x,y) 398 * This specifies the coordinates (x,y)
399 */ 399 */
400struct atomisp_zoom_point { 400struct atomisp_zoom_point {
401 int32_t x; /**< x coordinate */ 401 int32_t x; /** x coordinate */
402 int32_t y; /**< y coordinate */ 402 int32_t y; /** y coordinate */
403}; 403};
404 404
405/* 405/*
@@ -411,9 +411,9 @@ struct atomisp_zoom_region {
411}; 411};
412 412
413struct atomisp_dz_config { 413struct atomisp_dz_config {
414 uint32_t dx; /**< Horizontal zoom factor */ 414 uint32_t dx; /** Horizontal zoom factor */
415 uint32_t dy; /**< Vertical zoom factor */ 415 uint32_t dy; /** Vertical zoom factor */
416 struct atomisp_zoom_region zoom_region; /**< region for zoom */ 416 struct atomisp_zoom_region zoom_region; /** region for zoom */
417}; 417};
418 418
419struct atomisp_parm { 419struct atomisp_parm {
@@ -758,7 +758,7 @@ enum atomisp_acc_arg_type {
758 ATOMISP_ACC_ARG_FRAME /* Frame argument */ 758 ATOMISP_ACC_ARG_FRAME /* Frame argument */
759}; 759};
760 760
761/** ISP memories, isp2400 */ 761/* ISP memories, isp2400 */
762enum atomisp_acc_memory { 762enum atomisp_acc_memory {
763 ATOMISP_ACC_MEMORY_PMEM0 = 0, 763 ATOMISP_ACC_MEMORY_PMEM0 = 0,
764 ATOMISP_ACC_MEMORY_DMEM0, 764 ATOMISP_ACC_MEMORY_DMEM0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
index 8a18c528cad4..debf0e3853ff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
@@ -5187,7 +5187,7 @@ int get_frame_info_nop(struct atomisp_sub_device *asd,
5187 return 0; 5187 return 0;
5188} 5188}
5189 5189
5190/** 5190/*
5191 * Resets CSS parameters that depend on input resolution. 5191 * Resets CSS parameters that depend on input resolution.
5192 * 5192 *
5193 * Update params like CSS RAW binning, 2ppc mode and pp_input 5193 * Update params like CSS RAW binning, 2ppc mode and pp_input
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
index 6e87aa5aab4c..b7f9da014641 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -4051,7 +4051,7 @@ int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
4051int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd, 4051int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
4052 unsigned int *zoom) 4052 unsigned int *zoom)
4053{ 4053{
4054 struct ia_css_dz_config dz_config; /**< Digital Zoom */ 4054 struct ia_css_dz_config dz_config; /** Digital Zoom */
4055 struct ia_css_isp_config isp_config; 4055 struct ia_css_isp_config isp_config;
4056 struct atomisp_device *isp = asd->isp; 4056 struct atomisp_device *isp = asd->isp;
4057 4057
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
index 685da0f48bab..95669eedaad1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
@@ -28,17 +28,17 @@ struct atomisp_histogram32 {
28}; 28};
29 29
30struct atomisp_dvs2_stat_types32 { 30struct atomisp_dvs2_stat_types32 {
31 compat_uptr_t odd_real; /**< real part of the odd statistics*/ 31 compat_uptr_t odd_real; /** real part of the odd statistics*/
32 compat_uptr_t odd_imag; /**< imaginary part of the odd statistics*/ 32 compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
33 compat_uptr_t even_real;/**< real part of the even statistics*/ 33 compat_uptr_t even_real;/** real part of the even statistics*/
34 compat_uptr_t even_imag;/**< imaginary part of the even statistics*/ 34 compat_uptr_t even_imag;/** imaginary part of the even statistics*/
35}; 35};
36 36
37struct atomisp_dvs2_coef_types32 { 37struct atomisp_dvs2_coef_types32 {
38 compat_uptr_t odd_real; /**< real part of the odd coefficients*/ 38 compat_uptr_t odd_real; /** real part of the odd coefficients*/
39 compat_uptr_t odd_imag; /**< imaginary part of the odd coefficients*/ 39 compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
40 compat_uptr_t even_real;/**< real part of the even coefficients*/ 40 compat_uptr_t even_real;/** real part of the even coefficients*/
41 compat_uptr_t even_imag;/**< imaginary part of the even coefficients*/ 41 compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
42}; 42};
43 43
44struct atomisp_dvs2_statistics32 { 44struct atomisp_dvs2_statistics32 {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
index f3d61827ae8c..c3eba675da06 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
@@ -223,7 +223,7 @@ struct atomisp_subdev_params {
223 223
224 bool dis_proj_data_valid; 224 bool dis_proj_data_valid;
225 225
226 struct ia_css_dz_config dz_config; /**< Digital Zoom */ 226 struct ia_css_dz_config dz_config; /** Digital Zoom */
227 struct ia_css_capture_config capture_config; 227 struct ia_css_capture_config capture_config;
228 228
229 struct atomisp_css_isp_config config; 229 struct atomisp_css_isp_config config;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
index 19bae1610fb6..050d60f0894f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
@@ -21,7 +21,7 @@
21 * Forward declarations. 21 * Forward declarations.
22 * 22 *
23 **********************************************************************/ 23 **********************************************************************/
24/** 24/*
25 * @brief Read the oldest element from the circular buffer. 25 * @brief Read the oldest element from the circular buffer.
26 * Read the oldest element WITHOUT checking whehter the 26 * Read the oldest element WITHOUT checking whehter the
27 * circular buffer is empty or not. The oldest element is 27 * circular buffer is empty or not. The oldest element is
@@ -34,7 +34,7 @@
34static inline ia_css_circbuf_elem_t 34static inline ia_css_circbuf_elem_t
35ia_css_circbuf_read(ia_css_circbuf_t *cb); 35ia_css_circbuf_read(ia_css_circbuf_t *cb);
36 36
37/** 37/*
38 * @brief Shift a chunk of elements in the circular buffer. 38 * @brief Shift a chunk of elements in the circular buffer.
39 * A chunk of elements (i.e. the ones from the "start" position 39 * A chunk of elements (i.e. the ones from the "start" position
40 * to the "chunk_src" position) are shifted in the circular buffer, 40 * to the "chunk_src" position) are shifted in the circular buffer,
@@ -48,7 +48,7 @@ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
48 uint32_t chunk_src, 48 uint32_t chunk_src,
49 uint32_t chunk_dest); 49 uint32_t chunk_dest);
50 50
51/** 51/*
52 * @brief Get the "val" field in the element. 52 * @brief Get the "val" field in the element.
53 * 53 *
54 * @param elem The pointer to the element. 54 * @param elem The pointer to the element.
@@ -63,7 +63,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
63 * Non-inline functions. 63 * Non-inline functions.
64 * 64 *
65 **********************************************************************/ 65 **********************************************************************/
66/** 66/*
67 * @brief Create the circular buffer. 67 * @brief Create the circular buffer.
68 * Refer to "ia_css_circbuf.h" for details. 68 * Refer to "ia_css_circbuf.h" for details.
69 */ 69 */
@@ -88,7 +88,7 @@ ia_css_circbuf_create(ia_css_circbuf_t *cb,
88 cb->elems = elems; 88 cb->elems = elems;
89} 89}
90 90
91/** 91/*
92 * @brief Destroy the circular buffer. 92 * @brief Destroy the circular buffer.
93 * Refer to "ia_css_circbuf.h" for details. 93 * Refer to "ia_css_circbuf.h" for details.
94 */ 94 */
@@ -99,7 +99,7 @@ void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
99 cb->elems = NULL; 99 cb->elems = NULL;
100} 100}
101 101
102/** 102/*
103 * @brief Pop a value out of the circular buffer. 103 * @brief Pop a value out of the circular buffer.
104 * Refer to "ia_css_circbuf.h" for details. 104 * Refer to "ia_css_circbuf.h" for details.
105 */ 105 */
@@ -116,7 +116,7 @@ uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
116 return ret; 116 return ret;
117} 117}
118 118
119/** 119/*
120 * @brief Extract a value out of the circular buffer. 120 * @brief Extract a value out of the circular buffer.
121 * Refer to "ia_css_circbuf.h" for details. 121 * Refer to "ia_css_circbuf.h" for details.
122 */ 122 */
@@ -166,7 +166,7 @@ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
166 return val; 166 return val;
167} 167}
168 168
169/** 169/*
170 * @brief Peek an element from the circular buffer. 170 * @brief Peek an element from the circular buffer.
171 * Refer to "ia_css_circbuf.h" for details. 171 * Refer to "ia_css_circbuf.h" for details.
172 */ 172 */
@@ -180,7 +180,7 @@ uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
180 return cb->elems[pos].val; 180 return cb->elems[pos].val;
181} 181}
182 182
183/** 183/*
184 * @brief Get the value of an element from the circular buffer. 184 * @brief Get the value of an element from the circular buffer.
185 * Refer to "ia_css_circbuf.h" for details. 185 * Refer to "ia_css_circbuf.h" for details.
186 */ 186 */
@@ -194,7 +194,7 @@ uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
194 return cb->elems[pos].val; 194 return cb->elems[pos].val;
195} 195}
196 196
197/** @brief increase size of a circular buffer. 197/* @brief increase size of a circular buffer.
198 * Use 'CAUTION' before using this function. This was added to 198 * Use 'CAUTION' before using this function. This was added to
199 * support / fix issue with increasing size for tagger only 199 * support / fix issue with increasing size for tagger only
200 * Please refer to "ia_css_circbuf.h" for details. 200 * Please refer to "ia_css_circbuf.h" for details.
@@ -252,7 +252,7 @@ bool ia_css_circbuf_increase_size(
252 * Inline functions. 252 * Inline functions.
253 * 253 *
254 ****************************************************************/ 254 ****************************************************************/
255/** 255/*
256 * @brief Get the "val" field in the element. 256 * @brief Get the "val" field in the element.
257 * Refer to "Forward declarations" for details. 257 * Refer to "Forward declarations" for details.
258 */ 258 */
@@ -262,7 +262,7 @@ ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
262 return elem->val; 262 return elem->val;
263} 263}
264 264
265/** 265/*
266 * @brief Read the oldest element from the circular buffer. 266 * @brief Read the oldest element from the circular buffer.
267 * Refer to "Forward declarations" for details. 267 * Refer to "Forward declarations" for details.
268 */ 268 */
@@ -282,7 +282,7 @@ ia_css_circbuf_read(ia_css_circbuf_t *cb)
282 return elem; 282 return elem;
283} 283}
284 284
285/** 285/*
286 * @brief Shift a chunk of elements in the circular buffer. 286 * @brief Shift a chunk of elements in the circular buffer.
287 * Refer to "Forward declarations" for details. 287 * Refer to "Forward declarations" for details.
288 */ 288 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
index 616789d9b3f6..a6d650a9a1f4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
@@ -19,7 +19,7 @@
19#include <ia_css_frame_public.h> /* ia_css_frame_info */ 19#include <ia_css_frame_public.h> /* ia_css_frame_info */
20#include <ia_css_binary.h> /* ia_css_binary_descr */ 20#include <ia_css_binary.h> /* ia_css_binary_descr */
21 21
22/** @brief Get a binary descriptor for copy. 22/* @brief Get a binary descriptor for copy.
23 * 23 *
24 * @param[in] pipe 24 * @param[in] pipe
25 * @param[out] copy_desc 25 * @param[out] copy_desc
@@ -36,7 +36,7 @@ extern void ia_css_pipe_get_copy_binarydesc(
36 struct ia_css_frame_info *out_info, 36 struct ia_css_frame_info *out_info,
37 struct ia_css_frame_info *vf_info); 37 struct ia_css_frame_info *vf_info);
38 38
39/** @brief Get a binary descriptor for vfpp. 39/* @brief Get a binary descriptor for vfpp.
40 * 40 *
41 * @param[in] pipe 41 * @param[in] pipe
42 * @param[out] vfpp_descr 42 * @param[out] vfpp_descr
@@ -51,7 +51,7 @@ extern void ia_css_pipe_get_vfpp_binarydesc(
51 struct ia_css_frame_info *in_info, 51 struct ia_css_frame_info *in_info,
52 struct ia_css_frame_info *out_info); 52 struct ia_css_frame_info *out_info);
53 53
54/** @brief Get numerator and denominator of bayer downscaling factor. 54/* @brief Get numerator and denominator of bayer downscaling factor.
55 * 55 *
56 * @param[in] bds_factor: The bayer downscaling factor. 56 * @param[in] bds_factor: The bayer downscaling factor.
57 * (= The bds_factor member in the sh_css_bds_factor structure.) 57 * (= The bds_factor member in the sh_css_bds_factor structure.)
@@ -67,7 +67,7 @@ extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
67 unsigned int *bds_factor_numerator, 67 unsigned int *bds_factor_numerator,
68 unsigned int *bds_factor_denominator); 68 unsigned int *bds_factor_denominator);
69 69
70/** @brief Get a binary descriptor for preview stage. 70/* @brief Get a binary descriptor for preview stage.
71 * 71 *
72 * @param[in] pipe 72 * @param[in] pipe
73 * @param[out] preview_descr 73 * @param[out] preview_descr
@@ -86,7 +86,7 @@ extern enum ia_css_err ia_css_pipe_get_preview_binarydesc(
86 struct ia_css_frame_info *out_info, 86 struct ia_css_frame_info *out_info,
87 struct ia_css_frame_info *vf_info); 87 struct ia_css_frame_info *vf_info);
88 88
89/** @brief Get a binary descriptor for video stage. 89/* @brief Get a binary descriptor for video stage.
90 * 90 *
91 * @param[in/out] pipe 91 * @param[in/out] pipe
92 * @param[out] video_descr 92 * @param[out] video_descr
@@ -105,7 +105,7 @@ extern enum ia_css_err ia_css_pipe_get_video_binarydesc(
105 struct ia_css_frame_info *vf_info, 105 struct ia_css_frame_info *vf_info,
106 int stream_config_left_padding); 106 int stream_config_left_padding);
107 107
108/** @brief Get a binary descriptor for yuv scaler stage. 108/* @brief Get a binary descriptor for yuv scaler stage.
109 * 109 *
110 * @param[in/out] pipe 110 * @param[in/out] pipe
111 * @param[out] yuv_scaler_descr 111 * @param[out] yuv_scaler_descr
@@ -124,7 +124,7 @@ void ia_css_pipe_get_yuvscaler_binarydesc(
124 struct ia_css_frame_info *internal_out_info, 124 struct ia_css_frame_info *internal_out_info,
125 struct ia_css_frame_info *vf_info); 125 struct ia_css_frame_info *vf_info);
126 126
127/** @brief Get a binary descriptor for capture pp stage. 127/* @brief Get a binary descriptor for capture pp stage.
128 * 128 *
129 * @param[in/out] pipe 129 * @param[in/out] pipe
130 * @param[out] capture_pp_descr 130 * @param[out] capture_pp_descr
@@ -140,7 +140,7 @@ extern void ia_css_pipe_get_capturepp_binarydesc(
140 struct ia_css_frame_info *out_info, 140 struct ia_css_frame_info *out_info,
141 struct ia_css_frame_info *vf_info); 141 struct ia_css_frame_info *vf_info);
142 142
143/** @brief Get a binary descriptor for primary capture. 143/* @brief Get a binary descriptor for primary capture.
144 * 144 *
145 * @param[in] pipe 145 * @param[in] pipe
146 * @param[out] prim_descr 146 * @param[out] prim_descr
@@ -158,7 +158,7 @@ extern void ia_css_pipe_get_primary_binarydesc(
158 struct ia_css_frame_info *vf_info, 158 struct ia_css_frame_info *vf_info,
159 unsigned int stage_idx); 159 unsigned int stage_idx);
160 160
161/** @brief Get a binary descriptor for pre gdc stage. 161/* @brief Get a binary descriptor for pre gdc stage.
162 * 162 *
163 * @param[in] pipe 163 * @param[in] pipe
164 * @param[out] pre_gdc_descr 164 * @param[out] pre_gdc_descr
@@ -173,7 +173,7 @@ extern void ia_css_pipe_get_pre_gdc_binarydesc(
173 struct ia_css_frame_info *in_info, 173 struct ia_css_frame_info *in_info,
174 struct ia_css_frame_info *out_info); 174 struct ia_css_frame_info *out_info);
175 175
176/** @brief Get a binary descriptor for gdc stage. 176/* @brief Get a binary descriptor for gdc stage.
177 * 177 *
178 * @param[in] pipe 178 * @param[in] pipe
179 * @param[out] gdc_descr 179 * @param[out] gdc_descr
@@ -188,7 +188,7 @@ extern void ia_css_pipe_get_gdc_binarydesc(
188 struct ia_css_frame_info *in_info, 188 struct ia_css_frame_info *in_info,
189 struct ia_css_frame_info *out_info); 189 struct ia_css_frame_info *out_info);
190 190
191/** @brief Get a binary descriptor for post gdc. 191/* @brief Get a binary descriptor for post gdc.
192 * 192 *
193 * @param[in] pipe 193 * @param[in] pipe
194 * @param[out] post_gdc_descr 194 * @param[out] post_gdc_descr
@@ -205,7 +205,7 @@ extern void ia_css_pipe_get_post_gdc_binarydesc(
205 struct ia_css_frame_info *out_info, 205 struct ia_css_frame_info *out_info,
206 struct ia_css_frame_info *vf_info); 206 struct ia_css_frame_info *vf_info);
207 207
208/** @brief Get a binary descriptor for de. 208/* @brief Get a binary descriptor for de.
209 * 209 *
210 * @param[in] pipe 210 * @param[in] pipe
211 * @param[out] pre_de_descr 211 * @param[out] pre_de_descr
@@ -220,7 +220,7 @@ extern void ia_css_pipe_get_pre_de_binarydesc(
220 struct ia_css_frame_info *in_info, 220 struct ia_css_frame_info *in_info,
221 struct ia_css_frame_info *out_info); 221 struct ia_css_frame_info *out_info);
222 222
223/** @brief Get a binary descriptor for pre anr stage. 223/* @brief Get a binary descriptor for pre anr stage.
224 * 224 *
225 * @param[in] pipe 225 * @param[in] pipe
226 * @param[out] pre_anr_descr 226 * @param[out] pre_anr_descr
@@ -235,7 +235,7 @@ extern void ia_css_pipe_get_pre_anr_binarydesc(
235 struct ia_css_frame_info *in_info, 235 struct ia_css_frame_info *in_info,
236 struct ia_css_frame_info *out_info); 236 struct ia_css_frame_info *out_info);
237 237
238/** @brief Get a binary descriptor for ANR stage. 238/* @brief Get a binary descriptor for ANR stage.
239 * 239 *
240 * @param[in] pipe 240 * @param[in] pipe
241 * @param[out] anr_descr 241 * @param[out] anr_descr
@@ -250,7 +250,7 @@ extern void ia_css_pipe_get_anr_binarydesc(
250 struct ia_css_frame_info *in_info, 250 struct ia_css_frame_info *in_info,
251 struct ia_css_frame_info *out_info); 251 struct ia_css_frame_info *out_info);
252 252
253/** @brief Get a binary descriptor for post anr stage. 253/* @brief Get a binary descriptor for post anr stage.
254 * 254 *
255 * @param[in] pipe 255 * @param[in] pipe
256 * @param[out] post_anr_descr 256 * @param[out] post_anr_descr
@@ -267,7 +267,7 @@ extern void ia_css_pipe_get_post_anr_binarydesc(
267 struct ia_css_frame_info *out_info, 267 struct ia_css_frame_info *out_info,
268 struct ia_css_frame_info *vf_info); 268 struct ia_css_frame_info *vf_info);
269 269
270/** @brief Get a binary descriptor for ldc stage. 270/* @brief Get a binary descriptor for ldc stage.
271 * 271 *
272 * @param[in/out] pipe 272 * @param[in/out] pipe
273 * @param[out] capture_pp_descr 273 * @param[out] capture_pp_descr
@@ -282,7 +282,7 @@ extern void ia_css_pipe_get_ldc_binarydesc(
282 struct ia_css_frame_info *in_info, 282 struct ia_css_frame_info *in_info,
283 struct ia_css_frame_info *out_info); 283 struct ia_css_frame_info *out_info);
284 284
285/** @brief Calculates the required BDS factor 285/* @brief Calculates the required BDS factor
286 * 286 *
287 * @param[in] input_res 287 * @param[in] input_res
288 * @param[in] output_res 288 * @param[in] output_res
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
index ba8858759b30..155b6fb4722b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
@@ -18,7 +18,7 @@
18#include <ia_css_types.h> 18#include <ia_css_types.h>
19#include <ia_css_frame_public.h> 19#include <ia_css_frame_public.h>
20 20
21/** @brief Get Input format bits per pixel based on stream configuration of this 21/* @brief Get Input format bits per pixel based on stream configuration of this
22 * pipe. 22 * pipe.
23 * 23 *
24 * @param[in] pipe 24 * @param[in] pipe
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
index f8b2e458f876..a8c27676a38b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
@@ -22,7 +22,7 @@
22#include <ia_css_stream_public.h> 22#include <ia_css_stream_public.h>
23#include <ia_css_stream_format.h> 23#include <ia_css_stream_format.h>
24 24
25/** @brief convert "errno" error code to "ia_css_err" error code 25/* @brief convert "errno" error code to "ia_css_err" error code
26 * 26 *
27 * @param[in] "errno" error code 27 * @param[in] "errno" error code
28 * @return "ia_css_err" error code 28 * @return "ia_css_err" error code
@@ -31,7 +31,7 @@
31enum ia_css_err ia_css_convert_errno( 31enum ia_css_err ia_css_convert_errno(
32 int in_err); 32 int in_err);
33 33
34/** @brief check vf frame info. 34/* @brief check vf frame info.
35 * 35 *
36 * @param[in] info 36 * @param[in] info
37 * @return IA_CSS_SUCCESS or error code upon error. 37 * @return IA_CSS_SUCCESS or error code upon error.
@@ -40,7 +40,7 @@ enum ia_css_err ia_css_convert_errno(
40extern enum ia_css_err ia_css_util_check_vf_info( 40extern enum ia_css_err ia_css_util_check_vf_info(
41 const struct ia_css_frame_info * const info); 41 const struct ia_css_frame_info * const info);
42 42
43/** @brief check input configuration. 43/* @brief check input configuration.
44 * 44 *
45 * @param[in] stream_config 45 * @param[in] stream_config
46 * @param[in] must_be_raw 46 * @param[in] must_be_raw
@@ -52,7 +52,7 @@ extern enum ia_css_err ia_css_util_check_input(
52 bool must_be_raw, 52 bool must_be_raw,
53 bool must_be_yuv); 53 bool must_be_yuv);
54 54
55/** @brief check vf and out frame info. 55/* @brief check vf and out frame info.
56 * 56 *
57 * @param[in] out_info 57 * @param[in] out_info
58 * @param[in] vf_info 58 * @param[in] vf_info
@@ -63,7 +63,7 @@ extern enum ia_css_err ia_css_util_check_vf_out_info(
63 const struct ia_css_frame_info * const out_info, 63 const struct ia_css_frame_info * const out_info,
64 const struct ia_css_frame_info * const vf_info); 64 const struct ia_css_frame_info * const vf_info);
65 65
66/** @brief check width and height 66/* @brief check width and height
67 * 67 *
68 * @param[in] width 68 * @param[in] width
69 * @param[in] height 69 * @param[in] height
@@ -75,7 +75,7 @@ extern enum ia_css_err ia_css_util_check_res(
75 unsigned int height); 75 unsigned int height);
76 76
77#ifdef ISP2401 77#ifdef ISP2401
78/** @brief compare resolutions (less or equal) 78/* @brief compare resolutions (less or equal)
79 * 79 *
80 * @param[in] a resolution 80 * @param[in] a resolution
81 * @param[in] b resolution 81 * @param[in] b resolution
@@ -108,7 +108,7 @@ extern bool ia_css_util_resolution_is_even(
108 const struct ia_css_resolution resolution); 108 const struct ia_css_resolution resolution);
109 109
110#endif 110#endif
111/** @brief check width and height 111/* @brief check width and height
112 * 112 *
113 * @param[in] stream_format 113 * @param[in] stream_format
114 * @param[in] two_ppc 114 * @param[in] two_ppc
@@ -119,7 +119,7 @@ extern unsigned int ia_css_util_input_format_bpp(
119 enum ia_css_stream_format stream_format, 119 enum ia_css_stream_format stream_format,
120 bool two_ppc); 120 bool two_ppc);
121 121
122/** @brief check if input format it raw 122/* @brief check if input format it raw
123 * 123 *
124 * @param[in] stream_format 124 * @param[in] stream_format
125 * @return true if the input format is raw or false otherwise 125 * @return true if the input format is raw or false otherwise
@@ -128,7 +128,7 @@ extern unsigned int ia_css_util_input_format_bpp(
128extern bool ia_css_util_is_input_format_raw( 128extern bool ia_css_util_is_input_format_raw(
129 enum ia_css_stream_format stream_format); 129 enum ia_css_stream_format stream_format);
130 130
131/** @brief check if input format it yuv 131/* @brief check if input format it yuv
132 * 132 *
133 * @param[in] stream_format 133 * @param[in] stream_format
134 * @return true if the input format is yuv or false otherwise 134 * @return true if the input format is yuv or false otherwise
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
index 6720ab55d6f5..9c0cb4a63862 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
@@ -277,6 +277,6 @@ static inline void csi_rx_be_ctrl_reg_store(
277 277
278 ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); 278 ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
279} 279}
280/** end of DLI */ 280/* end of DLI */
281 281
282#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */ 282#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
index 470c92d287fe..4d07c2fe1469 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
@@ -192,7 +192,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
192 ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state); 192 ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state);
193 } 193 }
194} 194}
195/** end of NCI */ 195/* end of NCI */
196 196
197/***************************************************** 197/*****************************************************
198 * 198 *
@@ -227,7 +227,7 @@ STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
227 227
228 ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); 228 ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
229} 229}
230/** end of DLI */ 230/* end of DLI */
231 231
232 232
233#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */ 233#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
index 14d1d3b627a9..842ae340ae13 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
@@ -26,7 +26,7 @@
26#include "isys_irq_private.h" 26#include "isys_irq_private.h"
27#endif 27#endif
28 28
29/** Public interface */ 29/* Public interface */
30STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable( 30STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
31 const isys_irq_ID_t isys_irqc_id) 31 const isys_irq_ID_t isys_irqc_id)
32{ 32{
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
index c17ce131c9e1..e69f39893bd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
@@ -59,7 +59,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
59 state->status, state->edge, state->mask, state->enable, state->level_no); 59 state->status, state->edge, state->mask, state->enable, state->level_no);
60} 60}
61 61
62/** end of NCI */ 62/* end of NCI */
63 63
64/* -------------------------------------------------------+ 64/* -------------------------------------------------------+
65 | Device level interface (DLI) | 65 | Device level interface (DLI) |
@@ -101,7 +101,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
101 return value; 101 return value;
102} 102}
103 103
104/** end of DLI */ 104/* end of DLI */
105 105
106#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */ 106#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
107 107
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
index 1603a09b621a..f946105ddf43 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
@@ -122,7 +122,7 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
122 stream2mmio_print_sid_state(&(state->sid_state[i])); 122 stream2mmio_print_sid_state(&(state->sid_state[i]));
123 } 123 }
124} 124}
125/** end of NCI */ 125/* end of NCI */
126 126
127/***************************************************** 127/*****************************************************
128 * 128 *
@@ -163,6 +163,6 @@ STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
163 ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] + 163 ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
164 reg * sizeof(hrt_data), value); 164 reg * sizeof(hrt_data), value);
165} 165}
166/** end of DLI */ 166/* end of DLI */
167 167
168#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */ 168#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
index 3f34b508f0bf..c5bf540eadf1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
@@ -160,5 +160,5 @@ STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
160 160
161 ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value); 161 ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
162} 162}
163/** end of DLI */ 163/* end of DLI */
164#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */ 164#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
index e7a734a9fc43..1be5c6956d65 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
@@ -46,7 +46,7 @@ struct isys2401_dma_port_cfg_s {
46 uint32_t cropping; 46 uint32_t cropping;
47 uint32_t width; 47 uint32_t width;
48 }; 48 };
49/** end of DMA Port */ 49/* end of DMA Port */
50 50
51/************************************************ 51/************************************************
52 * 52 *
@@ -79,7 +79,7 @@ struct isys2401_dma_cfg_s {
79 isys2401_dma_extension extension; 79 isys2401_dma_extension extension;
80 uint32_t height; 80 uint32_t height;
81}; 81};
82/** end of DMA Device */ 82/* end of DMA Device */
83 83
84/* isys2401_dma_channel limits per DMA ID */ 84/* isys2401_dma_channel limits per DMA ID */
85extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID]; 85extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
index 216813e42a0a..0bf2feb8bbfb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
@@ -86,6 +86,6 @@ struct pixelgen_prbs_cfg_s {
86 sync_generator_cfg_t sync_gen_cfg; 86 sync_generator_cfg_t sync_gen_cfg;
87}; 87};
88 88
89/** end of Pixel-generator: TPG. ("pixelgen_global.h") */ 89/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
90#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */ 90#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
91 91
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
index 9f7ecac46273..d2e3a2deea2e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
@@ -331,7 +331,7 @@ typedef enum {
331 IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ 331 IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
332 N_IBUF_CTRL_ID 332 N_IBUF_CTRL_ID
333} ibuf_ctrl_ID_t; 333} ibuf_ctrl_ID_t;
334/** end of Input-buffer Controller */ 334/* end of Input-buffer Controller */
335 335
336/* 336/*
337 * Stream2MMIO. 337 * Stream2MMIO.
@@ -364,7 +364,7 @@ typedef enum {
364 STREAM2MMIO_SID7_ID, 364 STREAM2MMIO_SID7_ID,
365 N_STREAM2MMIO_SID_ID 365 N_STREAM2MMIO_SID_ID
366} stream2mmio_sid_ID_t; 366} stream2mmio_sid_ID_t;
367/** end of Stream2MMIO */ 367/* end of Stream2MMIO */
368 368
369/** 369/**
370 * Input System 2401: CSI-MIPI recevier. 370 * Input System 2401: CSI-MIPI recevier.
@@ -390,7 +390,7 @@ typedef enum {
390 CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ 390 CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
391 N_CSI_RX_DLANE_ID 391 N_CSI_RX_DLANE_ID
392} csi_rx_fe_dlane_ID_t; 392} csi_rx_fe_dlane_ID_t;
393/** end of CSI-MIPI receiver */ 393/* end of CSI-MIPI receiver */
394 394
395typedef enum { 395typedef enum {
396 ISYS2401_DMA0_ID = 0, 396 ISYS2401_DMA0_ID = 0,
@@ -406,7 +406,7 @@ typedef enum {
406 PIXELGEN2_ID, 406 PIXELGEN2_ID,
407 N_PIXELGEN_ID 407 N_PIXELGEN_ID
408} pixelgen_ID_t; 408} pixelgen_ID_t;
409/** end of pixel-generator. ("system_global.h") */ 409/* end of pixel-generator. ("system_global.h") */
410 410
411typedef enum { 411typedef enum {
412 INPUT_SYSTEM_CSI_PORT0_ID = 0, 412 INPUT_SYSTEM_CSI_PORT0_ID = 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
index 1f6a55ff5db8..efcd6e1679e8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
@@ -31,7 +31,7 @@ more details.
31#ifndef __CSS_API_VERSION_H 31#ifndef __CSS_API_VERSION_H
32#define __CSS_API_VERSION_H 32#define __CSS_API_VERSION_H
33 33
34/** @file 34/* @file
35 * CSS API version file. This file contains the version number of the CSS-API. 35 * CSS API version file. This file contains the version number of the CSS-API.
36 * 36 *
37 * This file is generated from a set of input files describing the CSS-API 37 * This file is generated from a set of input files describing the CSS-API
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
index 5a4eabf79ee2..bcfd443f5202 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
@@ -21,7 +21,7 @@
21#endif /* __INLINE_GP_TIMER__ */ 21#endif /* __INLINE_GP_TIMER__ */
22#include "system_local.h" 22#include "system_local.h"
23 23
24/** FIXME: not sure if reg_load(), reg_store() should be API. 24/* FIXME: not sure if reg_load(), reg_store() should be API.
25 */ 25 */
26static uint32_t 26static uint32_t
27gp_timer_reg_load(uint32_t reg); 27gp_timer_reg_load(uint32_t reg);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
index 3b5df85fc510..426d022d3a26 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
@@ -73,7 +73,7 @@ extern void csi_rx_be_ctrl_get_state(
73extern void csi_rx_be_ctrl_dump_state( 73extern void csi_rx_be_ctrl_dump_state(
74 const csi_rx_backend_ID_t ID, 74 const csi_rx_backend_ID_t ID,
75 csi_rx_be_ctrl_state_t *state); 75 csi_rx_be_ctrl_state_t *state);
76/** end of NCI */ 76/* end of NCI */
77 77
78/***************************************************** 78/*****************************************************
79 * 79 *
@@ -130,6 +130,6 @@ extern void csi_rx_be_ctrl_reg_store(
130 const csi_rx_backend_ID_t ID, 130 const csi_rx_backend_ID_t ID,
131 const hrt_address reg, 131 const hrt_address reg,
132 const hrt_data value); 132 const hrt_data value);
133/** end of DLI */ 133/* end of DLI */
134#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ 134#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
135#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */ 135#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
index 1ac0e64e539c..98ee9947fb8e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
@@ -54,7 +54,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
54STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state( 54STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
55 const ibuf_ctrl_ID_t ID, 55 const ibuf_ctrl_ID_t ID,
56 ibuf_ctrl_state_t *state); 56 ibuf_ctrl_state_t *state);
57/** end of NCI */ 57/* end of NCI */
58 58
59/***************************************************** 59/*****************************************************
60 * 60 *
@@ -87,7 +87,7 @@ STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
87 const ibuf_ctrl_ID_t ID, 87 const ibuf_ctrl_ID_t ID,
88 const hrt_address reg, 88 const hrt_address reg,
89 const hrt_data value); 89 const hrt_data value);
90/** end of DLI */ 90/* end of DLI */
91 91
92#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ 92#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
93#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */ 93#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
index a025ad562bd2..0d978e5911c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
@@ -49,7 +49,7 @@
49 49
50/* Arithmetic */ 50/* Arithmetic */
51 51
52/** @brief bitwise AND 52/* @brief bitwise AND
53 * 53 *
54 * @param[in] _a first argument 54 * @param[in] _a first argument
55 * @param[in] _b second argument 55 * @param[in] _b second argument
@@ -63,7 +63,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_and(
63 const tvector1w _a, 63 const tvector1w _a,
64 const tvector1w _b); 64 const tvector1w _b);
65 65
66/** @brief bitwise OR 66/* @brief bitwise OR
67 * 67 *
68 * @param[in] _a first argument 68 * @param[in] _a first argument
69 * @param[in] _b second argument 69 * @param[in] _b second argument
@@ -77,7 +77,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_or(
77 const tvector1w _a, 77 const tvector1w _a,
78 const tvector1w _b); 78 const tvector1w _b);
79 79
80/** @brief bitwise XOR 80/* @brief bitwise XOR
81 * 81 *
82 * @param[in] _a first argument 82 * @param[in] _a first argument
83 * @param[in] _b second argument 83 * @param[in] _b second argument
@@ -91,7 +91,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_xor(
91 const tvector1w _a, 91 const tvector1w _a,
92 const tvector1w _b); 92 const tvector1w _b);
93 93
94/** @brief bitwise inverse 94/* @brief bitwise inverse
95 * 95 *
96 * @param[in] _a first argument 96 * @param[in] _a first argument
97 * 97 *
@@ -105,7 +105,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_inv(
105 105
106/* Additive */ 106/* Additive */
107 107
108/** @brief addition 108/* @brief addition
109 * 109 *
110 * @param[in] _a first argument 110 * @param[in] _a first argument
111 * @param[in] _b second argument 111 * @param[in] _b second argument
@@ -120,7 +120,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_add(
120 const tvector1w _a, 120 const tvector1w _a,
121 const tvector1w _b); 121 const tvector1w _b);
122 122
123/** @brief subtraction 123/* @brief subtraction
124 * 124 *
125 * @param[in] _a first argument 125 * @param[in] _a first argument
126 * @param[in] _b second argument 126 * @param[in] _b second argument
@@ -135,7 +135,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_sub(
135 const tvector1w _a, 135 const tvector1w _a,
136 const tvector1w _b); 136 const tvector1w _b);
137 137
138/** @brief saturated addition 138/* @brief saturated addition
139 * 139 *
140 * @param[in] _a first argument 140 * @param[in] _a first argument
141 * @param[in] _b second argument 141 * @param[in] _b second argument
@@ -150,7 +150,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_addsat(
150 const tvector1w _a, 150 const tvector1w _a,
151 const tvector1w _b); 151 const tvector1w _b);
152 152
153/** @brief saturated subtraction 153/* @brief saturated subtraction
154 * 154 *
155 * @param[in] _a first argument 155 * @param[in] _a first argument
156 * @param[in] _b second argument 156 * @param[in] _b second argument
@@ -166,7 +166,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subsat(
166 const tvector1w _b); 166 const tvector1w _b);
167 167
168#ifdef ISP2401 168#ifdef ISP2401
169/** @brief Unsigned saturated subtraction 169/* @brief Unsigned saturated subtraction
170 * 170 *
171 * @param[in] _a first argument 171 * @param[in] _a first argument
172 * @param[in] _b second argument 172 * @param[in] _b second argument
@@ -182,7 +182,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_subsat_u(
182 const tvector1w_unsigned _b); 182 const tvector1w_unsigned _b);
183 183
184#endif 184#endif
185/** @brief subtraction with shift right and rounding 185/* @brief subtraction with shift right and rounding
186 * 186 *
187 * @param[in] _a first argument 187 * @param[in] _a first argument
188 * @param[in] _b second argument 188 * @param[in] _b second argument
@@ -202,7 +202,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subasr1(
202 const tvector1w _a, 202 const tvector1w _a,
203 const tvector1w _b); 203 const tvector1w _b);
204 204
205/** @brief Subtraction with shift right and rounding 205/* @brief Subtraction with shift right and rounding
206 * 206 *
207 * @param[in] _a first operand 207 * @param[in] _a first operand
208 * @param[in] _b second operand 208 * @param[in] _b second operand
@@ -217,7 +217,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalfrnd(
217 const tvector1w _a, 217 const tvector1w _a,
218 const tvector1w _b); 218 const tvector1w _b);
219 219
220/** @brief Subtraction with shift right and no rounding 220/* @brief Subtraction with shift right and no rounding
221 * 221 *
222 * @param[in] _a first operand 222 * @param[in] _a first operand
223 * @param[in] _b second operand 223 * @param[in] _b second operand
@@ -233,7 +233,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
233 const tvector1w _b); 233 const tvector1w _b);
234 234
235 235
236/** @brief saturated absolute value 236/* @brief saturated absolute value
237 * 237 *
238 * @param[in] _a input 238 * @param[in] _a input
239 * 239 *
@@ -247,7 +247,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
247STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs( 247STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs(
248 const tvector1w _a); 248 const tvector1w _a);
249 249
250/** @brief saturated absolute difference 250/* @brief saturated absolute difference
251 * 251 *
252 * @param[in] _a first argument 252 * @param[in] _a first argument
253 * @param[in] _b second argument 253 * @param[in] _b second argument
@@ -264,7 +264,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subabssat(
264 264
265/* Multiplicative */ 265/* Multiplicative */
266 266
267/** @brief doubling multiply 267/* @brief doubling multiply
268 * 268 *
269 * @param[in] _a first argument 269 * @param[in] _a first argument
270 * @param[in] _b second argument 270 * @param[in] _b second argument
@@ -281,7 +281,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_muld(
281 const tvector1w _a, 281 const tvector1w _a,
282 const tvector1w _b); 282 const tvector1w _b);
283 283
284/** @brief integer multiply 284/* @brief integer multiply
285 * 285 *
286 * @param[in] _a first argument 286 * @param[in] _a first argument
287 * @param[in] _b second argument 287 * @param[in] _b second argument
@@ -298,7 +298,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mul(
298 const tvector1w _a, 298 const tvector1w _a,
299 const tvector1w _b); 299 const tvector1w _b);
300 300
301/** @brief fractional saturating multiply 301/* @brief fractional saturating multiply
302 * 302 *
303 * @param[in] _a first argument 303 * @param[in] _a first argument
304 * @param[in] _b second argument 304 * @param[in] _b second argument
@@ -316,7 +316,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qmul(
316 const tvector1w _a, 316 const tvector1w _a,
317 const tvector1w _b); 317 const tvector1w _b);
318 318
319/** @brief fractional saturating multiply with rounding 319/* @brief fractional saturating multiply with rounding
320 * 320 *
321 * @param[in] _a first argument 321 * @param[in] _a first argument
322 * @param[in] _b second argument 322 * @param[in] _b second argument
@@ -337,7 +337,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qrmul(
337 337
338/* Comparative */ 338/* Comparative */
339 339
340/** @brief equal 340/* @brief equal
341 * 341 *
342 * @param[in] _a first argument 342 * @param[in] _a first argument
343 * @param[in] _b second argument 343 * @param[in] _b second argument
@@ -351,7 +351,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_eq(
351 const tvector1w _a, 351 const tvector1w _a,
352 const tvector1w _b); 352 const tvector1w _b);
353 353
354/** @brief not equal 354/* @brief not equal
355 * 355 *
356 * @param[in] _a first argument 356 * @param[in] _a first argument
357 * @param[in] _b second argument 357 * @param[in] _b second argument
@@ -365,7 +365,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ne(
365 const tvector1w _a, 365 const tvector1w _a,
366 const tvector1w _b); 366 const tvector1w _b);
367 367
368/** @brief less or equal 368/* @brief less or equal
369 * 369 *
370 * @param[in] _a first argument 370 * @param[in] _a first argument
371 * @param[in] _b second argument 371 * @param[in] _b second argument
@@ -379,7 +379,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_le(
379 const tvector1w _a, 379 const tvector1w _a,
380 const tvector1w _b); 380 const tvector1w _b);
381 381
382/** @brief less then 382/* @brief less then
383 * 383 *
384 * @param[in] _a first argument 384 * @param[in] _a first argument
385 * @param[in] _b second argument 385 * @param[in] _b second argument
@@ -393,7 +393,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_lt(
393 const tvector1w _a, 393 const tvector1w _a,
394 const tvector1w _b); 394 const tvector1w _b);
395 395
396/** @brief greater or equal 396/* @brief greater or equal
397 * 397 *
398 * @param[in] _a first argument 398 * @param[in] _a first argument
399 * @param[in] _b second argument 399 * @param[in] _b second argument
@@ -407,7 +407,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ge(
407 const tvector1w _a, 407 const tvector1w _a,
408 const tvector1w _b); 408 const tvector1w _b);
409 409
410/** @brief greater than 410/* @brief greater than
411 * 411 *
412 * @param[in] _a first argument 412 * @param[in] _a first argument
413 * @param[in] _b second argument 413 * @param[in] _b second argument
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_gt(
423 423
424/* Shift */ 424/* Shift */
425 425
426/** @brief aritmetic shift right 426/* @brief aritmetic shift right
427 * 427 *
428 * @param[in] _a input 428 * @param[in] _a input
429 * @param[in] _b shift amount 429 * @param[in] _b shift amount
@@ -441,7 +441,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asr(
441 const tvector1w _a, 441 const tvector1w _a,
442 const tvector1w _b); 442 const tvector1w _b);
443 443
444/** @brief aritmetic shift right with rounding 444/* @brief aritmetic shift right with rounding
445 * 445 *
446 * @param[in] _a input 446 * @param[in] _a input
447 * @param[in] _b shift amount 447 * @param[in] _b shift amount
@@ -460,7 +460,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asrrnd(
460 const tvector1w _a, 460 const tvector1w _a,
461 const tvector1w _b); 461 const tvector1w _b);
462 462
463/** @brief saturating arithmetic shift left 463/* @brief saturating arithmetic shift left
464 * 464 *
465 * @param[in] _a input 465 * @param[in] _a input
466 * @param[in] _b shift amount 466 * @param[in] _b shift amount
@@ -480,7 +480,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asl(
480 const tvector1w _a, 480 const tvector1w _a,
481 const tvector1w _b); 481 const tvector1w _b);
482 482
483/** @brief saturating aritmetic shift left 483/* @brief saturating aritmetic shift left
484 * 484 *
485 * @param[in] _a input 485 * @param[in] _a input
486 * @param[in] _b shift amount 486 * @param[in] _b shift amount
@@ -493,7 +493,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_aslsat(
493 const tvector1w _a, 493 const tvector1w _a,
494 const tvector1w _b); 494 const tvector1w _b);
495 495
496/** @brief logical shift left 496/* @brief logical shift left
497 * 497 *
498 * @param[in] _a input 498 * @param[in] _a input
499 * @param[in] _b shift amount 499 * @param[in] _b shift amount
@@ -510,7 +510,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsl(
510 const tvector1w _a, 510 const tvector1w _a,
511 const tvector1w _b); 511 const tvector1w _b);
512 512
513/** @brief logical shift right 513/* @brief logical shift right
514 * 514 *
515 * @param[in] _a input 515 * @param[in] _a input
516 * @param[in] _b shift amount 516 * @param[in] _b shift amount
@@ -528,7 +528,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsr(
528 const tvector1w _b); 528 const tvector1w _b);
529 529
530#ifdef ISP2401 530#ifdef ISP2401
531/** @brief bidirectional saturating arithmetic shift 531/* @brief bidirectional saturating arithmetic shift
532 * 532 *
533 * @param[in] _a input 533 * @param[in] _a input
534 * @param[in] _b shift amount 534 * @param[in] _b shift amount
@@ -546,7 +546,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift_sat(
546 const tvector1w _a, 546 const tvector1w _a,
547 const tvector1w _b); 547 const tvector1w _b);
548 548
549/** @brief bidirectional non-saturating arithmetic shift 549/* @brief bidirectional non-saturating arithmetic shift
550 * 550 *
551 * @param[in] _a input 551 * @param[in] _a input
552 * @param[in] _b shift amount 552 * @param[in] _b shift amount
@@ -565,7 +565,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift(
565 const tvector1w _b); 565 const tvector1w _b);
566 566
567 567
568/** @brief bidirectional logical shift 568/* @brief bidirectional logical shift
569 * 569 *
570 * @param[in] _a input 570 * @param[in] _a input
571 * @param[in] _b shift amount 571 * @param[in] _b shift amount
@@ -588,7 +588,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
588#endif 588#endif
589/* Cast */ 589/* Cast */
590 590
591/** @brief Cast from int to 1w 591/* @brief Cast from int to 1w
592 * 592 *
593 * @param[in] _a input 593 * @param[in] _a input
594 * 594 *
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
601STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w( 601STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
602 const int _a); 602 const int _a);
603 603
604/** @brief Cast from 1w to int 604/* @brief Cast from 1w to int
605 * 605 *
606 * @param[in] _a input 606 * @param[in] _a input
607 * 607 *
@@ -614,7 +614,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
614STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int( 614STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
615 const tvector1w _a); 615 const tvector1w _a);
616 616
617/** @brief Cast from 1w to 2w 617/* @brief Cast from 1w to 2w
618 * 618 *
619 * @param[in] _a input 619 * @param[in] _a input
620 * 620 *
@@ -627,7 +627,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
627STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w( 627STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w(
628 const tvector1w _a); 628 const tvector1w _a);
629 629
630/** @brief Cast from 2w to 1w 630/* @brief Cast from 2w to 1w
631 * 631 *
632 * @param[in] _a input 632 * @param[in] _a input
633 * 633 *
@@ -641,7 +641,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_cast_to_1w(
641 const tvector2w _a); 641 const tvector2w _a);
642 642
643 643
644/** @brief Cast from 2w to 1w with saturation 644/* @brief Cast from 2w to 1w with saturation
645 * 645 *
646 * @param[in] _a input 646 * @param[in] _a input
647 * 647 *
@@ -657,7 +657,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_sat_cast_to_1w(
657 657
658/* clipping */ 658/* clipping */
659 659
660/** @brief Clip asymmetrical 660/* @brief Clip asymmetrical
661 * 661 *
662 * @param[in] _a first argument 662 * @param[in] _a first argument
663 * @param[in] _b second argument 663 * @param[in] _b second argument
@@ -673,7 +673,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clip_asym(
673 const tvector1w _a, 673 const tvector1w _a,
674 const tvector1w _b); 674 const tvector1w _b);
675 675
676/** @brief Clip zero 676/* @brief Clip zero
677 * 677 *
678 * @param[in] _a first argument 678 * @param[in] _a first argument
679 * @param[in] _b second argument 679 * @param[in] _b second argument
@@ -691,7 +691,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clipz(
691 691
692/* division */ 692/* division */
693 693
694/** @brief Truncated division 694/* @brief Truncated division
695 * 695 *
696 * @param[in] _a first argument 696 * @param[in] _a first argument
697 * @param[in] _b second argument 697 * @param[in] _b second argument
@@ -708,7 +708,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_div(
708 const tvector1w _a, 708 const tvector1w _a,
709 const tvector1w _b); 709 const tvector1w _b);
710 710
711/** @brief Fractional saturating divide 711/* @brief Fractional saturating divide
712 * 712 *
713 * @param[in] _a first argument 713 * @param[in] _a first argument
714 * @param[in] _b second argument 714 * @param[in] _b second argument
@@ -726,7 +726,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qdiv(
726 const tvector1w _a, 726 const tvector1w _a,
727 const tvector1w _b); 727 const tvector1w _b);
728 728
729/** @brief Modulo 729/* @brief Modulo
730 * 730 *
731 * @param[in] _a first argument 731 * @param[in] _a first argument
732 * @param[in] _b second argument 732 * @param[in] _b second argument
@@ -741,7 +741,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mod(
741 const tvector1w _a, 741 const tvector1w _a,
742 const tvector1w _b); 742 const tvector1w _b);
743 743
744/** @brief Unsigned integer Square root 744/* @brief Unsigned integer Square root
745 * 745 *
746 * @param[in] _a input 746 * @param[in] _a input
747 * 747 *
@@ -754,7 +754,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_sqrt_u(
754 754
755/* Miscellaneous */ 755/* Miscellaneous */
756 756
757/** @brief Multiplexer 757/* @brief Multiplexer
758 * 758 *
759 * @param[in] _a first argument 759 * @param[in] _a first argument
760 * @param[in] _b second argument 760 * @param[in] _b second argument
@@ -770,7 +770,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mux(
770 const tvector1w _b, 770 const tvector1w _b,
771 const tflags _c); 771 const tflags _c);
772 772
773/** @brief Average without rounding 773/* @brief Average without rounding
774 * 774 *
775 * @param[in] _a first operand 775 * @param[in] _a first operand
776 * @param[in] _b second operand 776 * @param[in] _b second operand
@@ -786,7 +786,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avg(
786 const tvector1w _a, 786 const tvector1w _a,
787 const tvector1w _b); 787 const tvector1w _b);
788 788
789/** @brief Average with rounding 789/* @brief Average with rounding
790 * 790 *
791 * @param[in] _a first argument 791 * @param[in] _a first argument
792 * @param[in] _b second argument 792 * @param[in] _b second argument
@@ -802,7 +802,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avgrnd(
802 const tvector1w _a, 802 const tvector1w _a,
803 const tvector1w _b); 803 const tvector1w _b);
804 804
805/** @brief Minimum 805/* @brief Minimum
806 * 806 *
807 * @param[in] _a first argument 807 * @param[in] _a first argument
808 * @param[in] _b second argument 808 * @param[in] _b second argument
@@ -816,7 +816,7 @@ STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_min(
816 const tvector1w _a, 816 const tvector1w _a,
817 const tvector1w _b); 817 const tvector1w _b);
818 818
819/** @brief Maximum 819/* @brief Maximum
820 * 820 *
821 * @param[in] _a first argument 821 * @param[in] _a first argument
822 * @param[in] _b second argument 822 * @param[in] _b second argument
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
index cf7e7314842d..7575d260b837 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
@@ -48,7 +48,7 @@
48 48
49/* Arithmetic */ 49/* Arithmetic */
50 50
51/** @brief bitwise AND 51/* @brief bitwise AND
52 * 52 *
53 * @param[in] _a first argument 53 * @param[in] _a first argument
54 * @param[in] _b second argument 54 * @param[in] _b second argument
@@ -62,7 +62,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_and(
62 const tvector2w _a, 62 const tvector2w _a,
63 const tvector2w _b); 63 const tvector2w _b);
64 64
65/** @brief bitwise OR 65/* @brief bitwise OR
66 * 66 *
67 * @param[in] _a first argument 67 * @param[in] _a first argument
68 * @param[in] _b second argument 68 * @param[in] _b second argument
@@ -76,7 +76,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_or(
76 const tvector2w _a, 76 const tvector2w _a,
77 const tvector2w _b); 77 const tvector2w _b);
78 78
79/** @brief bitwise XOR 79/* @brief bitwise XOR
80 * 80 *
81 * @param[in] _a first argument 81 * @param[in] _a first argument
82 * @param[in] _b second argument 82 * @param[in] _b second argument
@@ -90,7 +90,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_xor(
90 const tvector2w _a, 90 const tvector2w _a,
91 const tvector2w _b); 91 const tvector2w _b);
92 92
93/** @brief bitwise inverse 93/* @brief bitwise inverse
94 * 94 *
95 * @param[in] _a first argument 95 * @param[in] _a first argument
96 * 96 *
@@ -104,7 +104,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_inv(
104 104
105/* Additive */ 105/* Additive */
106 106
107/** @brief addition 107/* @brief addition
108 * 108 *
109 * @param[in] _a first argument 109 * @param[in] _a first argument
110 * @param[in] _b second argument 110 * @param[in] _b second argument
@@ -119,7 +119,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_add(
119 const tvector2w _a, 119 const tvector2w _a,
120 const tvector2w _b); 120 const tvector2w _b);
121 121
122/** @brief subtraction 122/* @brief subtraction
123 * 123 *
124 * @param[in] _a first argument 124 * @param[in] _a first argument
125 * @param[in] _b second argument 125 * @param[in] _b second argument
@@ -134,7 +134,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_sub(
134 const tvector2w _a, 134 const tvector2w _a,
135 const tvector2w _b); 135 const tvector2w _b);
136 136
137/** @brief saturated addition 137/* @brief saturated addition
138 * 138 *
139 * @param[in] _a first argument 139 * @param[in] _a first argument
140 * @param[in] _b second argument 140 * @param[in] _b second argument
@@ -149,7 +149,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_addsat(
149 const tvector2w _a, 149 const tvector2w _a,
150 const tvector2w _b); 150 const tvector2w _b);
151 151
152/** @brief saturated subtraction 152/* @brief saturated subtraction
153 * 153 *
154 * @param[in] _a first argument 154 * @param[in] _a first argument
155 * @param[in] _b second argument 155 * @param[in] _b second argument
@@ -164,7 +164,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subsat(
164 const tvector2w _a, 164 const tvector2w _a,
165 const tvector2w _b); 165 const tvector2w _b);
166 166
167/** @brief subtraction with shift right and rounding 167/* @brief subtraction with shift right and rounding
168 * 168 *
169 * @param[in] _a first argument 169 * @param[in] _a first argument
170 * @param[in] _b second argument 170 * @param[in] _b second argument
@@ -184,7 +184,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subasr1(
184 const tvector2w _a, 184 const tvector2w _a,
185 const tvector2w _b); 185 const tvector2w _b);
186 186
187/** @brief Subtraction with shift right and rounding 187/* @brief Subtraction with shift right and rounding
188 * 188 *
189 * @param[in] _a first operand 189 * @param[in] _a first operand
190 * @param[in] _b second operand 190 * @param[in] _b second operand
@@ -199,7 +199,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalfrnd(
199 const tvector2w _a, 199 const tvector2w _a,
200 const tvector2w _b); 200 const tvector2w _b);
201 201
202/** @brief Subtraction with shift right and no rounding 202/* @brief Subtraction with shift right and no rounding
203 * 203 *
204 * @param[in] _a first operand 204 * @param[in] _a first operand
205 * @param[in] _b second operand 205 * @param[in] _b second operand
@@ -214,7 +214,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
214 const tvector2w _a, 214 const tvector2w _a,
215 const tvector2w _b); 215 const tvector2w _b);
216 216
217/** @brief saturated absolute value 217/* @brief saturated absolute value
218 * 218 *
219 * @param[in] _a input 219 * @param[in] _a input
220 * 220 *
@@ -228,7 +228,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
228STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs( 228STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs(
229 const tvector2w _a); 229 const tvector2w _a);
230 230
231/** @brief saturated absolute difference 231/* @brief saturated absolute difference
232 * 232 *
233 * @param[in] _a first argument 233 * @param[in] _a first argument
234 * @param[in] _b second argument 234 * @param[in] _b second argument
@@ -245,7 +245,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subabssat(
245 245
246/* Multiplicative */ 246/* Multiplicative */
247 247
248/** @brief integer multiply 248/* @brief integer multiply
249 * 249 *
250 * @param[in] _a first argument 250 * @param[in] _a first argument
251 * @param[in] _b second argument 251 * @param[in] _b second argument
@@ -262,7 +262,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mul(
262 const tvector2w _a, 262 const tvector2w _a,
263 const tvector2w _b); 263 const tvector2w _b);
264 264
265/** @brief fractional saturating multiply 265/* @brief fractional saturating multiply
266 * 266 *
267 * @param[in] _a first argument 267 * @param[in] _a first argument
268 * @param[in] _b second argument 268 * @param[in] _b second argument
@@ -279,7 +279,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qmul(
279 const tvector2w _a, 279 const tvector2w _a,
280 const tvector2w _b); 280 const tvector2w _b);
281 281
282/** @brief fractional saturating multiply with rounding 282/* @brief fractional saturating multiply with rounding
283 * 283 *
284 * @param[in] _a first argument 284 * @param[in] _a first argument
285 * @param[in] _b second argument 285 * @param[in] _b second argument
@@ -301,7 +301,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qrmul(
301 301
302/* Comparative */ 302/* Comparative */
303 303
304/** @brief equal 304/* @brief equal
305 * 305 *
306 * @param[in] _a first argument 306 * @param[in] _a first argument
307 * @param[in] _b second argument 307 * @param[in] _b second argument
@@ -315,7 +315,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_eq(
315 const tvector2w _a, 315 const tvector2w _a,
316 const tvector2w _b); 316 const tvector2w _b);
317 317
318/** @brief not equal 318/* @brief not equal
319 * 319 *
320 * @param[in] _a first argument 320 * @param[in] _a first argument
321 * @param[in] _b second argument 321 * @param[in] _b second argument
@@ -329,7 +329,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ne(
329 const tvector2w _a, 329 const tvector2w _a,
330 const tvector2w _b); 330 const tvector2w _b);
331 331
332/** @brief less or equal 332/* @brief less or equal
333 * 333 *
334 * @param[in] _a first argument 334 * @param[in] _a first argument
335 * @param[in] _b second argument 335 * @param[in] _b second argument
@@ -343,7 +343,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_le(
343 const tvector2w _a, 343 const tvector2w _a,
344 const tvector2w _b); 344 const tvector2w _b);
345 345
346/** @brief less then 346/* @brief less then
347 * 347 *
348 * @param[in] _a first argument 348 * @param[in] _a first argument
349 * @param[in] _b second argument 349 * @param[in] _b second argument
@@ -357,7 +357,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_lt(
357 const tvector2w _a, 357 const tvector2w _a,
358 const tvector2w _b); 358 const tvector2w _b);
359 359
360/** @brief greater or equal 360/* @brief greater or equal
361 * 361 *
362 * @param[in] _a first argument 362 * @param[in] _a first argument
363 * @param[in] _b second argument 363 * @param[in] _b second argument
@@ -371,7 +371,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ge(
371 const tvector2w _a, 371 const tvector2w _a,
372 const tvector2w _b); 372 const tvector2w _b);
373 373
374/** @brief greater than 374/* @brief greater than
375 * 375 *
376 * @param[in] _a first argument 376 * @param[in] _a first argument
377 * @param[in] _b second argument 377 * @param[in] _b second argument
@@ -387,7 +387,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_gt(
387 387
388/* Shift */ 388/* Shift */
389 389
390/** @brief aritmetic shift right 390/* @brief aritmetic shift right
391 * 391 *
392 * @param[in] _a input 392 * @param[in] _a input
393 * @param[in] _b shift amount 393 * @param[in] _b shift amount
@@ -404,7 +404,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asr(
404 const tvector2w _a, 404 const tvector2w _a,
405 const tvector2w _b); 405 const tvector2w _b);
406 406
407/** @brief aritmetic shift right with rounding 407/* @brief aritmetic shift right with rounding
408 * 408 *
409 * @param[in] _a input 409 * @param[in] _a input
410 * @param[in] _b shift amount 410 * @param[in] _b shift amount
@@ -423,7 +423,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asrrnd(
423 const tvector2w _a, 423 const tvector2w _a,
424 const tvector2w _b); 424 const tvector2w _b);
425 425
426/** @brief saturating aritmetic shift left 426/* @brief saturating aritmetic shift left
427 * 427 *
428 * @param[in] _a input 428 * @param[in] _a input
429 * @param[in] _b shift amount 429 * @param[in] _b shift amount
@@ -443,7 +443,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asl(
443 const tvector2w _a, 443 const tvector2w _a,
444 const tvector2w _b); 444 const tvector2w _b);
445 445
446/** @brief saturating aritmetic shift left 446/* @brief saturating aritmetic shift left
447 * 447 *
448 * @param[in] _a input 448 * @param[in] _a input
449 * @param[in] _b shift amount 449 * @param[in] _b shift amount
@@ -456,7 +456,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_aslsat(
456 const tvector2w _a, 456 const tvector2w _a,
457 const tvector2w _b); 457 const tvector2w _b);
458 458
459/** @brief logical shift left 459/* @brief logical shift left
460 * 460 *
461 * @param[in] _a input 461 * @param[in] _a input
462 * @param[in] _b shift amount 462 * @param[in] _b shift amount
@@ -473,7 +473,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsl(
473 const tvector2w _a, 473 const tvector2w _a,
474 const tvector2w _b); 474 const tvector2w _b);
475 475
476/** @brief logical shift right 476/* @brief logical shift right
477 * 477 *
478 * @param[in] _a input 478 * @param[in] _a input
479 * @param[in] _b shift amount 479 * @param[in] _b shift amount
@@ -492,7 +492,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsr(
492 492
493/* clipping */ 493/* clipping */
494 494
495/** @brief Clip asymmetrical 495/* @brief Clip asymmetrical
496 * 496 *
497 * @param[in] _a first argument 497 * @param[in] _a first argument
498 * @param[in] _b second argument 498 * @param[in] _b second argument
@@ -507,7 +507,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clip_asym(
507 const tvector2w _a, 507 const tvector2w _a,
508 const tvector2w _b); 508 const tvector2w _b);
509 509
510/** @brief Clip zero 510/* @brief Clip zero
511 * 511 *
512 * @param[in] _a first argument 512 * @param[in] _a first argument
513 * @param[in] _b second argument 513 * @param[in] _b second argument
@@ -524,7 +524,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clipz(
524 524
525/* division */ 525/* division */
526 526
527/** @brief Truncated division 527/* @brief Truncated division
528 * 528 *
529 * @param[in] _a first argument 529 * @param[in] _a first argument
530 * @param[in] _b second argument 530 * @param[in] _b second argument
@@ -541,7 +541,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_div(
541 const tvector2w _a, 541 const tvector2w _a,
542 const tvector2w _b); 542 const tvector2w _b);
543 543
544/** @brief Saturating truncated division 544/* @brief Saturating truncated division
545 * 545 *
546 * @param[in] _a first argument 546 * @param[in] _a first argument
547 * @param[in] _b second argument 547 * @param[in] _b second argument
@@ -559,7 +559,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w OP_2w_divh(
559 const tvector2w _a, 559 const tvector2w _a,
560 const tvector1w _b); 560 const tvector1w _b);
561 561
562/** @brief Modulo 562/* @brief Modulo
563 * 563 *
564 * @param[in] _a first argument 564 * @param[in] _a first argument
565 * @param[in] _b second argument 565 * @param[in] _b second argument
@@ -572,7 +572,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mod(
572 const tvector2w _a, 572 const tvector2w _a,
573 const tvector2w _b); 573 const tvector2w _b);
574 574
575/** @brief Unsigned Integer Square root 575/* @brief Unsigned Integer Square root
576 * 576 *
577 * @param[in] _a input 577 * @param[in] _a input
578 * 578 *
@@ -585,7 +585,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w_unsigned OP_2w_sqrt_u(
585 585
586/* Miscellaneous */ 586/* Miscellaneous */
587 587
588/** @brief Multiplexer 588/* @brief Multiplexer
589 * 589 *
590 * @param[in] _a first argument 590 * @param[in] _a first argument
591 * @param[in] _b second argument 591 * @param[in] _b second argument
@@ -601,7 +601,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mux(
601 const tvector2w _b, 601 const tvector2w _b,
602 const tflags _c); 602 const tflags _c);
603 603
604/** @brief Average without rounding 604/* @brief Average without rounding
605 * 605 *
606 * @param[in] _a first operand 606 * @param[in] _a first operand
607 * @param[in] _b second operand 607 * @param[in] _b second operand
@@ -617,7 +617,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avg(
617 const tvector2w _a, 617 const tvector2w _a,
618 const tvector2w _b); 618 const tvector2w _b);
619 619
620/** @brief Average with rounding 620/* @brief Average with rounding
621 * 621 *
622 * @param[in] _a first argument 622 * @param[in] _a first argument
623 * @param[in] _b second argument 623 * @param[in] _b second argument
@@ -633,7 +633,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avgrnd(
633 const tvector2w _a, 633 const tvector2w _a,
634 const tvector2w _b); 634 const tvector2w _b);
635 635
636/** @brief Minimum 636/* @brief Minimum
637 * 637 *
638 * @param[in] _a first argument 638 * @param[in] _a first argument
639 * @param[in] _b second argument 639 * @param[in] _b second argument
@@ -647,7 +647,7 @@ STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_min(
647 const tvector2w _a, 647 const tvector2w _a,
648 const tvector2w _b); 648 const tvector2w _b);
649 649
650/** @brief Maximum 650/* @brief Maximum
651 * 651 *
652 * @param[in] _a first argument 652 * @param[in] _a first argument
653 * @param[in] _b second argument 653 * @param[in] _b second argument
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
index 5624cfcfa015..6c53ca9df96c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
@@ -43,7 +43,7 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
43 const stream2mmio_ID_t ID, 43 const stream2mmio_ID_t ID,
44 const stream2mmio_sid_ID_t sid_id, 44 const stream2mmio_sid_ID_t sid_id,
45 stream2mmio_sid_state_t *state); 45 stream2mmio_sid_state_t *state);
46/** end of NCI */ 46/* end of NCI */
47 47
48/***************************************************** 48/*****************************************************
49 * 49 *
@@ -96,6 +96,6 @@ STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
96 const stream2mmio_ID_t ID, 96 const stream2mmio_ID_t ID,
97 const hrt_address reg, 97 const hrt_address reg,
98 const hrt_data value); 98 const hrt_data value);
99/** end of DLI */ 99/* end of DLI */
100 100
101#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */ 101#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
index c0f3f3ea32d7..f597e07d7c4f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
@@ -41,7 +41,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
41STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state( 41STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
42 const pixelgen_ID_t ID, 42 const pixelgen_ID_t ID,
43 pixelgen_ctrl_state_t *state); 43 pixelgen_ctrl_state_t *state);
44/** end of NCI */ 44/* end of NCI */
45 45
46/***************************************************** 46/*****************************************************
47 * 47 *
@@ -73,7 +73,7 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
73 const pixelgen_ID_t ID, 73 const pixelgen_ID_t ID,
74 const hrt_address reg, 74 const hrt_address reg,
75 const hrt_data value); 75 const hrt_data value);
76/** end of DLI */ 76/* end of DLI */
77 77
78#endif /* USE_INPUT_SYSTEM_VERSION_2401 */ 78#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
79#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */ 79#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
index a202d6dce106..c1638c06407d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
@@ -27,7 +27,7 @@
27 27
28#include "ref_vector_func_types.h" 28#include "ref_vector_func_types.h"
29 29
30/** @brief Doubling multiply accumulate with saturation 30/* @brief Doubling multiply accumulate with saturation
31 * 31 *
32 * @param[in] acc accumulator 32 * @param[in] acc accumulator
33 * @param[in] a multiply input 33 * @param[in] a multiply input
@@ -44,7 +44,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd_sat(
44 tvector1w a, 44 tvector1w a,
45 tvector1w b ); 45 tvector1w b );
46 46
47/** @brief Doubling multiply accumulate 47/* @brief Doubling multiply accumulate
48 * 48 *
49 * @param[in] acc accumulator 49 * @param[in] acc accumulator
50 * @param[in] a multiply input 50 * @param[in] a multiply input
@@ -61,7 +61,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd(
61 tvector1w a, 61 tvector1w a,
62 tvector1w b ); 62 tvector1w b );
63 63
64/** @brief Re-aligning multiply 64/* @brief Re-aligning multiply
65 * 65 *
66 * @param[in] a multiply input 66 * @param[in] a multiply input
67 * @param[in] b multiply input 67 * @param[in] b multiply input
@@ -78,7 +78,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
78 tvector1w b, 78 tvector1w b,
79 tscalar1w shift ); 79 tscalar1w shift );
80 80
81/** @brief Leading bit index 81/* @brief Leading bit index
82 * 82 *
83 * @param[in] a input 83 * @param[in] a input
84 * 84 *
@@ -92,7 +92,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
92STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod( 92STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod(
93 tvector1w a); 93 tvector1w a);
94 94
95/** @brief Config Unit Input Processing 95/* @brief Config Unit Input Processing
96 * 96 *
97 * @param[in] a input 97 * @param[in] a input
98 * @param[in] input_scale input scaling factor 98 * @param[in] input_scale input scaling factor
@@ -111,7 +111,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_input_scaling_offset_clamping(
111 tscalar1w_5bit_signed input_scale, 111 tscalar1w_5bit_signed input_scale,
112 tscalar1w_5bit_signed input_offset); 112 tscalar1w_5bit_signed input_offset);
113 113
114/** @brief Config Unit Output Processing 114/* @brief Config Unit Output Processing
115 * 115 *
116 * @param[in] a output 116 * @param[in] a output
117 * @param[in] output_scale output scaling factor 117 * @param[in] output_scale output scaling factor
@@ -127,7 +127,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_output_scaling_clamping(
127 tvector1w a, 127 tvector1w a,
128 tscalar1w_5bit_signed output_scale); 128 tscalar1w_5bit_signed output_scale);
129 129
130/** @brief Config Unit Piecewiselinear estimation 130/* @brief Config Unit Piecewiselinear estimation
131 * 131 *
132 * @param[in] a input 132 * @param[in] a input
133 * @param[in] config_points config parameter structure 133 * @param[in] config_points config parameter structure
@@ -143,7 +143,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_piecewise_estimation(
143 tvector1w a, 143 tvector1w a,
144 ref_config_points config_points); 144 ref_config_points config_points);
145 145
146/** @brief Fast Config Unit 146/* @brief Fast Config Unit
147 * 147 *
148 * @param[in] x input 148 * @param[in] x input
149 * @param[in] init_vectors LUT data structure 149 * @param[in] init_vectors LUT data structure
@@ -161,7 +161,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_XCU(
161 xcu_ref_init_vectors init_vectors); 161 xcu_ref_init_vectors init_vectors);
162 162
163 163
164/** @brief LXCU 164/* @brief LXCU
165 * 165 *
166 * @param[in] x input 166 * @param[in] x input
167 * @param[in] init_vectors LUT data structure 167 * @param[in] init_vectors LUT data structure
@@ -180,7 +180,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_LXCU(
180 tvector1w x, 180 tvector1w x,
181 xcu_ref_init_vectors init_vectors); 181 xcu_ref_init_vectors init_vectors);
182 182
183/** @brief Coring 183/* @brief Coring
184 * 184 *
185 * @param[in] coring_vec Amount of coring based on brightness level 185 * @param[in] coring_vec Amount of coring based on brightness level
186 * @param[in] filt_input Vector of input pixels on which Coring is applied 186 * @param[in] filt_input Vector of input pixels on which Coring is applied
@@ -196,7 +196,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
196 tvector1w filt_input, 196 tvector1w filt_input,
197 tscalar1w m_CnrCoring0 ); 197 tscalar1w m_CnrCoring0 );
198 198
199/** @brief Normalised FIR with coefficients [3,4,1] 199/* @brief Normalised FIR with coefficients [3,4,1]
200 * 200 *
201 * @param[in] m 1x3 matrix with pixels 201 * @param[in] m 1x3 matrix with pixels
202 * 202 *
@@ -209,7 +209,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
209STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm ( 209STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
210 const s_1w_1x3_matrix m); 210 const s_1w_1x3_matrix m);
211 211
212/** @brief Normalised FIR with coefficients [1,4,3] 212/* @brief Normalised FIR with coefficients [1,4,3]
213 * 213 *
214 * @param[in] m 1x3 matrix with pixels 214 * @param[in] m 1x3 matrix with pixels
215 * 215 *
@@ -222,7 +222,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
222STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm ( 222STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
223 const s_1w_1x3_matrix m); 223 const s_1w_1x3_matrix m);
224 224
225/** @brief Normalised FIR with coefficients [1,2,1] 225/* @brief Normalised FIR with coefficients [1,2,1]
226 * 226 *
227 * @param[in] m 1x3 matrix with pixels 227 * @param[in] m 1x3 matrix with pixels
228 * 228 *
@@ -234,7 +234,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
234STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm ( 234STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
235 const s_1w_1x3_matrix m); 235 const s_1w_1x3_matrix m);
236 236
237/** @brief Normalised FIR with coefficients [13,16,3] 237/* @brief Normalised FIR with coefficients [13,16,3]
238 * 238 *
239 * @param[in] m 1x3 matrix with pixels 239 * @param[in] m 1x3 matrix with pixels
240 * 240 *
@@ -246,7 +246,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
246STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 ( 246STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
247 const s_1w_1x3_matrix m); 247 const s_1w_1x3_matrix m);
248 248
249/** @brief Normalised FIR with coefficients [9,16,7] 249/* @brief Normalised FIR with coefficients [9,16,7]
250 * 250 *
251 * @param[in] m 1x3 matrix with pixels 251 * @param[in] m 1x3 matrix with pixels
252 * 252 *
@@ -258,7 +258,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
258STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 ( 258STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
259 const s_1w_1x3_matrix m); 259 const s_1w_1x3_matrix m);
260 260
261/** @brief Normalised FIR with coefficients [5,16,11] 261/* @brief Normalised FIR with coefficients [5,16,11]
262 * 262 *
263 * @param[in] m 1x3 matrix with pixels 263 * @param[in] m 1x3 matrix with pixels
264 * 264 *
@@ -270,7 +270,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
270STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 ( 270STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
271 const s_1w_1x3_matrix m); 271 const s_1w_1x3_matrix m);
272 272
273/** @brief Normalised FIR with coefficients [1,16,15] 273/* @brief Normalised FIR with coefficients [1,16,15]
274 * 274 *
275 * @param[in] m 1x3 matrix with pixels 275 * @param[in] m 1x3 matrix with pixels
276 * 276 *
@@ -282,7 +282,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
282STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 ( 282STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
283 const s_1w_1x3_matrix m); 283 const s_1w_1x3_matrix m);
284 284
285/** @brief Normalised FIR with programable phase shift 285/* @brief Normalised FIR with programable phase shift
286 * 286 *
287 * @param[in] m 1x3 matrix with pixels 287 * @param[in] m 1x3 matrix with pixels
288 * @param[in] coeff phase shift 288 * @param[in] coeff phase shift
@@ -295,7 +295,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
295STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff ( 295STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff (
296 const s_1w_1x3_matrix m, tscalar1w_3bit coeff); 296 const s_1w_1x3_matrix m, tscalar1w_3bit coeff);
297 297
298/** @brief 3 tap FIR with coefficients [1,1,1] 298/* @brief 3 tap FIR with coefficients [1,1,1]
299 * 299 *
300 * @param[in] m 1x3 matrix with pixels 300 * @param[in] m 1x3 matrix with pixels
301 * 301 *
@@ -308,7 +308,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_9dB_nrm (
308 const s_1w_1x3_matrix m); 308 const s_1w_1x3_matrix m);
309 309
310#ifdef ISP2401 310#ifdef ISP2401
311/** @brief symmetric 3 tap FIR acts as LPF or BSF 311/* @brief symmetric 3 tap FIR acts as LPF or BSF
312 * 312 *
313 * @param[in] m 1x3 matrix with pixels 313 * @param[in] m 1x3 matrix with pixels
314 * @param[in] k filter coefficient shift 314 * @param[in] k filter coefficient shift
@@ -336,7 +336,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
336 tscalar_bool bsf_flag); 336 tscalar_bool bsf_flag);
337#endif 337#endif
338 338
339/** @brief Normalised 2D FIR with coefficients [1;2;1] * [1,2,1] 339/* @brief Normalised 2D FIR with coefficients [1;2;1] * [1,2,1]
340 * 340 *
341 * @param[in] m 3x3 matrix with pixels 341 * @param[in] m 3x3 matrix with pixels
342 * 342 *
@@ -353,7 +353,7 @@ sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
353STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm ( 353STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
354 const s_1w_3x3_matrix m); 354 const s_1w_3x3_matrix m);
355 355
356/** @brief Normalised 2D FIR with coefficients [1;1;1] * [1,1,1] 356/* @brief Normalised 2D FIR with coefficients [1;1;1] * [1,1,1]
357 * 357 *
358 * @param[in] m 3x3 matrix with pixels 358 * @param[in] m 3x3 matrix with pixels
359 * 359 *
@@ -371,7 +371,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
371STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm ( 371STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
372 const s_1w_3x3_matrix m); 372 const s_1w_3x3_matrix m);
373 373
374/** @brief Normalised dual output 2D FIR with coefficients [1;2;1] * [1,2,1] 374/* @brief Normalised dual output 2D FIR with coefficients [1;2;1] * [1,2,1]
375 * 375 *
376 * @param[in] m 4x3 matrix with pixels 376 * @param[in] m 4x3 matrix with pixels
377 * 377 *
@@ -391,7 +391,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
391 STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm ( 391 STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm (
392 const s_1w_4x3_matrix m); 392 const s_1w_4x3_matrix m);
393 393
394/** @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1] 394/* @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
395 * 395 *
396 * @param[in] m 4x3 matrix with pixels 396 * @param[in] m 4x3 matrix with pixels
397 * 397 *
@@ -411,7 +411,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
411STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm ( 411STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
412 const s_1w_4x3_matrix m); 412 const s_1w_4x3_matrix m);
413 413
414/** @brief Normalised 2D FIR 5x5 414/* @brief Normalised 2D FIR 5x5
415 * 415 *
416 * @param[in] m 5x5 matrix with pixels 416 * @param[in] m 5x5 matrix with pixels
417 * 417 *
@@ -429,7 +429,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
429STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm ( 429STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
430 const s_1w_5x5_matrix m); 430 const s_1w_5x5_matrix m);
431 431
432/** @brief Normalised FIR 1x5 432/* @brief Normalised FIR 1x5
433 * 433 *
434 * @param[in] m 1x5 matrix with pixels 434 * @param[in] m 1x5 matrix with pixels
435 * 435 *
@@ -447,7 +447,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
447STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm ( 447STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
448 const s_1w_1x5_matrix m); 448 const s_1w_1x5_matrix m);
449 449
450/** @brief Normalised 2D FIR 5x5 450/* @brief Normalised 2D FIR 5x5
451 * 451 *
452 * @param[in] m 5x5 matrix with pixels 452 * @param[in] m 5x5 matrix with pixels
453 * 453 *
@@ -465,7 +465,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
465STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm ( 465STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
466 const s_1w_5x5_matrix m); 466 const s_1w_5x5_matrix m);
467 467
468/** @brief Approximate averaging FIR 1x5 468/* @brief Approximate averaging FIR 1x5
469 * 469 *
470 * @param[in] m 1x5 matrix with pixels 470 * @param[in] m 1x5 matrix with pixels
471 * 471 *
@@ -479,7 +479,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
479STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box ( 479STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
480 s_1w_1x5_matrix m); 480 s_1w_1x5_matrix m);
481 481
482/** @brief Approximate averaging FIR 1x9 482/* @brief Approximate averaging FIR 1x9
483 * 483 *
484 * @param[in] m 1x9 matrix with pixels 484 * @param[in] m 1x9 matrix with pixels
485 * 485 *
@@ -493,7 +493,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
493STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box ( 493STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
494 s_1w_1x9_matrix m); 494 s_1w_1x9_matrix m);
495 495
496/** @brief Approximate averaging FIR 1x11 496/* @brief Approximate averaging FIR 1x11
497 * 497 *
498 * @param[in] m 1x11 matrix with pixels 498 * @param[in] m 1x11 matrix with pixels
499 * 499 *
@@ -507,7 +507,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
507STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box ( 507STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box (
508 s_1w_1x11_matrix m); 508 s_1w_1x11_matrix m);
509 509
510/** @brief Symmetric 7 tap filter with normalization 510/* @brief Symmetric 7 tap filter with normalization
511 * 511 *
512 * @param[in] in 1x7 matrix with pixels 512 * @param[in] in 1x7 matrix with pixels
513 * @param[in] coeff 1x4 matrix with coefficients 513 * @param[in] coeff 1x4 matrix with coefficients
@@ -528,7 +528,7 @@ fir1x7m_sym_nrm(s_1w_1x7_matrix in,
528 s_1w_1x4_matrix coeff, 528 s_1w_1x4_matrix coeff,
529 tvector1w out_shift); 529 tvector1w out_shift);
530 530
531/** @brief Symmetric 7 tap filter with normalization at input side 531/* @brief Symmetric 7 tap filter with normalization at input side
532 * 532 *
533 * @param[in] in 1x7 matrix with pixels 533 * @param[in] in 1x7 matrix with pixels
534 * @param[in] coeff 1x4 matrix with coefficients 534 * @param[in] coeff 1x4 matrix with coefficients
@@ -549,7 +549,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
549fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in, 549fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in,
550 s_1w_1x4_matrix coeff); 550 s_1w_1x4_matrix coeff);
551 551
552/** @brief Symmetric 7 tap filter with normalization at output side 552/* @brief Symmetric 7 tap filter with normalization at output side
553 * 553 *
554 * @param[in] in 1x7 matrix with pixels 554 * @param[in] in 1x7 matrix with pixels
555 * @param[in] coeff 1x4 matrix with coefficients 555 * @param[in] coeff 1x4 matrix with coefficients
@@ -571,7 +571,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
571fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in, 571fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in,
572 s_1w_1x4_matrix coeff); 572 s_1w_1x4_matrix coeff);
573 573
574/** @brief 4 tap filter with normalization 574/* @brief 4 tap filter with normalization
575 * 575 *
576 * @param[in] in 1x4 matrix with pixels 576 * @param[in] in 1x4 matrix with pixels
577 * @param[in] coeff 1x4 matrix with coefficients 577 * @param[in] coeff 1x4 matrix with coefficients
@@ -588,7 +588,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
588 s_1w_1x4_matrix coeff, 588 s_1w_1x4_matrix coeff,
589 tvector1w out_shift); 589 tvector1w out_shift);
590 590
591/** @brief 4 tap filter with normalization for half pixel interpolation 591/* @brief 4 tap filter with normalization for half pixel interpolation
592 * 592 *
593 * @param[in] in 1x4 matrix with pixels 593 * @param[in] in 1x4 matrix with pixels
594 * 594 *
@@ -604,7 +604,7 @@ fir1x4m_nrm(s_1w_1x4_matrix in,
604STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w 604STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
605fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in); 605fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in);
606 606
607/** @brief 4 tap filter with normalization for quarter pixel interpolation 607/* @brief 4 tap filter with normalization for quarter pixel interpolation
608 * 608 *
609 * @param[in] in 1x4 matrix with pixels 609 * @param[in] in 1x4 matrix with pixels
610 * @param[in] coeff 1x4 matrix with coefficients 610 * @param[in] coeff 1x4 matrix with coefficients
@@ -626,7 +626,7 @@ fir1x4m_bicubic_bezier_quarter(s_1w_1x4_matrix in,
626 s_1w_1x4_matrix coeff); 626 s_1w_1x4_matrix coeff);
627 627
628 628
629/** @brief Symmetric 3 tap filter with normalization 629/* @brief Symmetric 3 tap filter with normalization
630 * 630 *
631 * @param[in] in 1x3 matrix with pixels 631 * @param[in] in 1x3 matrix with pixels
632 * @param[in] coeff 1x2 matrix with coefficients 632 * @param[in] coeff 1x2 matrix with coefficients
@@ -646,7 +646,7 @@ fir1x3m_sym_nrm(s_1w_1x3_matrix in,
646 s_1w_1x2_matrix coeff, 646 s_1w_1x2_matrix coeff,
647 tvector1w out_shift); 647 tvector1w out_shift);
648 648
649/** @brief Symmetric 3 tap filter with normalization 649/* @brief Symmetric 3 tap filter with normalization
650 * 650 *
651 * @param[in] in 1x3 matrix with pixels 651 * @param[in] in 1x3 matrix with pixels
652 * @param[in] coeff 1x2 matrix with coefficients 652 * @param[in] coeff 1x2 matrix with coefficients
@@ -666,7 +666,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
666fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in, 666fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
667 s_1w_1x2_matrix coeff); 667 s_1w_1x2_matrix coeff);
668 668
669/** @brief Mean of 1x3 matrix 669/* @brief Mean of 1x3 matrix
670 * 670 *
671 * @param[in] m 1x3 matrix with pixels 671 * @param[in] m 1x3 matrix with pixels
672 * 672 *
@@ -678,7 +678,7 @@ fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
678STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m( 678STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
679 s_1w_1x3_matrix m); 679 s_1w_1x3_matrix m);
680 680
681/** @brief Mean of 3x3 matrix 681/* @brief Mean of 3x3 matrix
682 * 682 *
683 * @param[in] m 3x3 matrix with pixels 683 * @param[in] m 3x3 matrix with pixels
684 * 684 *
@@ -690,7 +690,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
690STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m( 690STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
691 s_1w_3x3_matrix m); 691 s_1w_3x3_matrix m);
692 692
693/** @brief Mean of 1x4 matrix 693/* @brief Mean of 1x4 matrix
694 * 694 *
695 * @param[in] m 1x4 matrix with pixels 695 * @param[in] m 1x4 matrix with pixels
696 * 696 *
@@ -701,7 +701,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
701STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m( 701STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
702 s_1w_1x4_matrix m); 702 s_1w_1x4_matrix m);
703 703
704/** @brief Mean of 4x4 matrix 704/* @brief Mean of 4x4 matrix
705 * 705 *
706 * @param[in] m 4x4 matrix with pixels 706 * @param[in] m 4x4 matrix with pixels
707 * 707 *
@@ -712,7 +712,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
712STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m( 712STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
713 s_1w_4x4_matrix m); 713 s_1w_4x4_matrix m);
714 714
715/** @brief Mean of 2x3 matrix 715/* @brief Mean of 2x3 matrix
716 * 716 *
717 * @param[in] m 2x3 matrix with pixels 717 * @param[in] m 2x3 matrix with pixels
718 * 718 *
@@ -724,7 +724,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
724STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m( 724STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
725 s_1w_2x3_matrix m); 725 s_1w_2x3_matrix m);
726 726
727/** @brief Mean of 1x5 matrix 727/* @brief Mean of 1x5 matrix
728 * 728 *
729 * @param[in] m 1x5 matrix with pixels 729 * @param[in] m 1x5 matrix with pixels
730 * 730 *
@@ -735,7 +735,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
735*/ 735*/
736STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m); 736STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
737 737
738/** @brief Mean of 1x6 matrix 738/* @brief Mean of 1x6 matrix
739 * 739 *
740 * @param[in] m 1x6 matrix with pixels 740 * @param[in] m 1x6 matrix with pixels
741 * 741 *
@@ -747,7 +747,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
747STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m( 747STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
748 s_1w_1x6_matrix m); 748 s_1w_1x6_matrix m);
749 749
750/** @brief Mean of 5x5 matrix 750/* @brief Mean of 5x5 matrix
751 * 751 *
752 * @param[in] m 5x5 matrix with pixels 752 * @param[in] m 5x5 matrix with pixels
753 * 753 *
@@ -759,7 +759,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
759STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m( 759STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
760 s_1w_5x5_matrix m); 760 s_1w_5x5_matrix m);
761 761
762/** @brief Mean of 6x6 matrix 762/* @brief Mean of 6x6 matrix
763 * 763 *
764 * @param[in] m 6x6 matrix with pixels 764 * @param[in] m 6x6 matrix with pixels
765 * 765 *
@@ -771,7 +771,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
771STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m( 771STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
772 s_1w_6x6_matrix m); 772 s_1w_6x6_matrix m);
773 773
774/** @brief Minimum of 4x4 matrix 774/* @brief Minimum of 4x4 matrix
775 * 775 *
776 * @param[in] m 4x4 matrix with pixels 776 * @param[in] m 4x4 matrix with pixels
777 * 777 *
@@ -783,7 +783,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
783STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m( 783STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
784 s_1w_4x4_matrix m); 784 s_1w_4x4_matrix m);
785 785
786/** @brief Maximum of 4x4 matrix 786/* @brief Maximum of 4x4 matrix
787 * 787 *
788 * @param[in] m 4x4 matrix with pixels 788 * @param[in] m 4x4 matrix with pixels
789 * 789 *
@@ -795,7 +795,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
795STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m( 795STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m(
796 s_1w_4x4_matrix m); 796 s_1w_4x4_matrix m);
797 797
798/** @brief SAD between two 3x3 matrices 798/* @brief SAD between two 3x3 matrices
799 * 799 *
800 * @param[in] a 3x3 matrix with pixels 800 * @param[in] a 3x3 matrix with pixels
801 * 801 *
@@ -813,7 +813,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m_precise(
813 s_1w_3x3_matrix a, 813 s_1w_3x3_matrix a,
814 s_1w_3x3_matrix b); 814 s_1w_3x3_matrix b);
815 815
816/** @brief SAD between two 3x3 matrices 816/* @brief SAD between two 3x3 matrices
817 * 817 *
818 * @param[in] a 3x3 matrix with pixels 818 * @param[in] a 3x3 matrix with pixels
819 * 819 *
@@ -833,7 +833,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m(
833 s_1w_3x3_matrix a, 833 s_1w_3x3_matrix a,
834 s_1w_3x3_matrix b); 834 s_1w_3x3_matrix b);
835 835
836/** @brief SAD between two 5x5 matrices 836/* @brief SAD between two 5x5 matrices
837 * 837 *
838 * @param[in] a 5x5 matrix with pixels 838 * @param[in] a 5x5 matrix with pixels
839 * 839 *
@@ -847,7 +847,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
847 s_1w_5x5_matrix a, 847 s_1w_5x5_matrix a,
848 s_1w_5x5_matrix b); 848 s_1w_5x5_matrix b);
849 849
850/** @brief Absolute gradient between two sets of 1x5 matrices 850/* @brief Absolute gradient between two sets of 1x5 matrices
851 * 851 *
852 * @param[in] m0 first set of 1x5 matrix with pixels 852 * @param[in] m0 first set of 1x5 matrix with pixels
853 * @param[in] m1 second set of 1x5 matrix with pixels 853 * @param[in] m1 second set of 1x5 matrix with pixels
@@ -860,7 +860,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
860STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w 860STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
861absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1); 861absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1);
862 862
863/** @brief Bi-linear Interpolation optimized(approximate) 863/* @brief Bi-linear Interpolation optimized(approximate)
864 * 864 *
865 * @param[in] a input0 865 * @param[in] a input0
866 * @param[in] b input1 866 * @param[in] b input1
@@ -882,7 +882,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx_c(
882 tvector1w b, 882 tvector1w b,
883 tscalar1w_weight c); 883 tscalar1w_weight c);
884 884
885/** @brief Bi-linear Interpolation optimized(approximate) 885/* @brief Bi-linear Interpolation optimized(approximate)
886 * 886 *
887 * @param[in] a input0 887 * @param[in] a input0
888 * @param[in] b input1 888 * @param[in] b input1
@@ -904,7 +904,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx(
904 tvector1w b, 904 tvector1w b,
905 tvector1w_weight c); 905 tvector1w_weight c);
906 906
907/** @brief Bi-linear Interpolation 907/* @brief Bi-linear Interpolation
908 * 908 *
909 * @param[in] a input0 909 * @param[in] a input0
910 * @param[in] b input1 910 * @param[in] b input1
@@ -925,7 +925,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol(
925 tvector1w b, 925 tvector1w b,
926 tscalar1w_weight c); 926 tscalar1w_weight c);
927 927
928/** @brief Generic Block Matching Algorithm 928/* @brief Generic Block Matching Algorithm
929 * @param[in] search_window pointer to input search window of 16x16 pixels 929 * @param[in] search_window pointer to input search window of 16x16 pixels
930 * @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M 930 * @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M
931 * @param[in] output pointer to output sads 931 * @param[in] output pointer to output sads
@@ -954,9 +954,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H int generic_block_matching_algorithm(
954 tscalar1w_4bit_bma_shift shift); 954 tscalar1w_4bit_bma_shift shift);
955 955
956#ifndef ISP2401 956#ifndef ISP2401
957/** @brief OP_1w_asp_bma_16_1_32way 957/* @brief OP_1w_asp_bma_16_1_32way
958#else 958#else
959/** @brief OP_1w_asp_bma_16_1_32way_nomask 959/* @brief OP_1w_asp_bma_16_1_32way_nomask
960#endif 960#endif
961 * 961 *
962 * @param[in] search_area input search window of 16x16 pixels 962 * @param[in] search_area input search window of 16x16 pixels
@@ -984,9 +984,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way_nomask(
984 tscalar1w_4bit_bma_shift shift); 984 tscalar1w_4bit_bma_shift shift);
985 985
986#ifndef ISP2401 986#ifndef ISP2401
987/** @brief OP_1w_asp_bma_16_2_32way 987/* @brief OP_1w_asp_bma_16_2_32way
988#else 988#else
989/** @brief OP_1w_asp_bma_16_2_32way_nomask 989/* @brief OP_1w_asp_bma_16_2_32way_nomask
990#endif 990#endif
991 * 991 *
992 * @param[in] search_area input search window of 16x16 pixels 992 * @param[in] search_area input search window of 16x16 pixels
@@ -1011,9 +1011,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way_nomask(
1011 ref_block_8x8 input_block, 1011 ref_block_8x8 input_block,
1012 tscalar1w_4bit_bma_shift shift); 1012 tscalar1w_4bit_bma_shift shift);
1013#ifndef ISP2401 1013#ifndef ISP2401
1014/** @brief OP_1w_asp_bma_14_1_32way 1014/* @brief OP_1w_asp_bma_14_1_32way
1015#else 1015#else
1016/** @brief OP_1w_asp_bma_14_1_32way_nomask 1016/* @brief OP_1w_asp_bma_14_1_32way_nomask
1017#endif 1017#endif
1018 * 1018 *
1019 * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels 1019 * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1041,9 +1041,9 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way_nomask(
1041 tscalar1w_4bit_bma_shift shift); 1041 tscalar1w_4bit_bma_shift shift);
1042 1042
1043#ifndef ISP2401 1043#ifndef ISP2401
1044/** @brief OP_1w_asp_bma_14_2_32way 1044/* @brief OP_1w_asp_bma_14_2_32way
1045#else 1045#else
1046/** @brief OP_1w_asp_bma_14_2_32way_nomask 1046/* @brief OP_1w_asp_bma_14_2_32way_nomask
1047#endif 1047#endif
1048 * 1048 *
1049 * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels 1049 * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
@@ -1069,7 +1069,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way_nomask(
1069 tscalar1w_4bit_bma_shift shift); 1069 tscalar1w_4bit_bma_shift shift);
1070 1070
1071#ifdef ISP2401 1071#ifdef ISP2401
1072/** @brief multiplex addition and passing 1072/* @brief multiplex addition and passing
1073 * 1073 *
1074 * @param[in] _a first pixel 1074 * @param[in] _a first pixel
1075 * @param[in] _b second pixel 1075 * @param[in] _b second pixel
@@ -1087,7 +1087,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_cond_add(
1087 1087
1088#endif 1088#endif
1089#ifdef HAS_bfa_unit 1089#ifdef HAS_bfa_unit
1090/** @brief OP_1w_single_bfa_7x7 1090/* @brief OP_1w_single_bfa_7x7
1091 * 1091 *
1092 * @param[in] weights - spatial and range weight lut 1092 * @param[in] weights - spatial and range weight lut
1093 * @param[in] threshold - threshold plane, for range weight scaling 1093 * @param[in] threshold - threshold plane, for range weight scaling
@@ -1115,7 +1115,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_single_bfa_7x7(
1115 tvector1w central_pix, 1115 tvector1w central_pix,
1116 s_1w_7x7_matrix src_plane); 1116 s_1w_7x7_matrix src_plane);
1117 1117
1118/** @brief OP_1w_joint_bfa_7x7 1118/* @brief OP_1w_joint_bfa_7x7
1119 * 1119 *
1120 * @param[in] weights - spatial and range weight lut 1120 * @param[in] weights - spatial and range weight lut
1121 * @param[in] threshold0 - 1st threshold plane, for range weight scaling 1121 * @param[in] threshold0 - 1st threshold plane, for range weight scaling
@@ -1149,7 +1149,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_joint_bfa_7x7(
1149 tvector1w central_pix1, 1149 tvector1w central_pix1,
1150 s_1w_7x7_matrix src1_plane); 1150 s_1w_7x7_matrix src1_plane);
1151 1151
1152/** @brief bbb_bfa_gen_spatial_weight_lut 1152/* @brief bbb_bfa_gen_spatial_weight_lut
1153 * 1153 *
1154 * @param[in] in - 7x7 matrix of spatial weights 1154 * @param[in] in - 7x7 matrix of spatial weights
1155 * @param[in] out - generated LUT 1155 * @param[in] out - generated LUT
@@ -1163,7 +1163,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_spatial_weight_lut(
1163 s_1w_7x7_matrix in, 1163 s_1w_7x7_matrix in,
1164 tvector1w out[BFA_MAX_KWAY]); 1164 tvector1w out[BFA_MAX_KWAY]);
1165 1165
1166/** @brief bbb_bfa_gen_range_weight_lut 1166/* @brief bbb_bfa_gen_range_weight_lut
1167 * 1167 *
1168 * @param[in] in - input range weight, 1168 * @param[in] in - input range weight,
1169 * @param[in] out - generated LUT 1169 * @param[in] out - generated LUT
@@ -1184,7 +1184,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
1184#endif 1184#endif
1185 1185
1186#ifdef ISP2401 1186#ifdef ISP2401
1187/** @brief OP_1w_imax32 1187/* @brief OP_1w_imax32
1188 * 1188 *
1189 * @param[in] src - structure that holds an array of 32 elements. 1189 * @param[in] src - structure that holds an array of 32 elements.
1190 * 1190 *
@@ -1195,7 +1195,7 @@ STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
1195STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32( 1195STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32(
1196 imax32_ref_in_vector src); 1196 imax32_ref_in_vector src);
1197 1197
1198/** @brief OP_1w_imaxidx32 1198/* @brief OP_1w_imaxidx32
1199 * 1199 *
1200 * @param[in] src - structure that holds a vector of elements. 1200 * @param[in] src - structure that holds a vector of elements.
1201 * 1201 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
index e85e5c889c15..6436dae0007e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -168,7 +168,7 @@ static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
168} 168}
169#endif 169#endif
170 170
171/** @brief Next Power of Two 171/* @brief Next Power of Two
172 * 172 *
173 * @param[in] unsigned number 173 * @param[in] unsigned number
174 * 174 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
index d80437c58bde..f4d9674cdab6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25 25
26/** @brief Copy from src_buf to dest_buf. 26/* @brief Copy from src_buf to dest_buf.
27 * 27 *
28 * @param[out] dest_buf. Destination buffer to copy to 28 * @param[out] dest_buf. Destination buffer to copy to
29 * @param[in] dest_size. The size of the destination buffer in bytes 29 * @param[in] dest_size. The size of the destination buffer in bytes
@@ -53,7 +53,7 @@ static inline int memcpy_s(
53 return 0; 53 return 0;
54} 54}
55 55
56/** @brief Get the length of the string, excluding the null terminator 56/* @brief Get the length of the string, excluding the null terminator
57 * 57 *
58 * @param[in] src_str. The source string 58 * @param[in] src_str. The source string
59 * @param[in] max_len. Look only for max_len bytes in the string 59 * @param[in] max_len. Look only for max_len bytes in the string
@@ -78,7 +78,7 @@ static size_t strnlen_s(
78 return ix; 78 return ix;
79} 79}
80 80
81/** @brief Copy string from src_str to dest_str 81/* @brief Copy string from src_str to dest_str
82 * 82 *
83 * @param[out] dest_str. Destination buffer to copy to 83 * @param[out] dest_str. Destination buffer to copy to
84 * @param[in] dest_size. The size of the destination buffer in bytes 84 * @param[in] dest_size. The size of the destination buffer in bytes
@@ -120,7 +120,7 @@ static inline int strncpy_s(
120 return 0; 120 return 0;
121} 121}
122 122
123/** @brief Copy string from src_str to dest_str 123/* @brief Copy string from src_str to dest_str
124 * 124 *
125 * @param[out] dest_str. Destination buffer to copy to 125 * @param[out] dest_str. Destination buffer to copy to
126 * @param[in] dest_size. The size of the destination buffer in bytes 126 * @param[in] dest_size. The size of the destination buffer in bytes
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
index 9aa8c168a803..2cf1d58941bf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
@@ -17,7 +17,7 @@
17#include <assert_support.h> 17#include <assert_support.h>
18#include "tag_local.h" 18#include "tag_local.h"
19 19
20/** 20/*
21 * @brief Creates the tag description from the given parameters. 21 * @brief Creates the tag description from the given parameters.
22 * @param[in] num_captures 22 * @param[in] num_captures
23 * @param[in] skip 23 * @param[in] skip
@@ -39,7 +39,7 @@ sh_css_create_tag_descr(int num_captures,
39 tag_descr->exp_id = exp_id; 39 tag_descr->exp_id = exp_id;
40} 40}
41 41
42/** 42/*
43 * @brief Encodes the members of tag description into a 32-bit value. 43 * @brief Encodes the members of tag description into a 32-bit value.
44 * @param[in] tag Pointer to the tag description 44 * @param[in] tag Pointer to the tag description
45 * @return (unsigned int) Encoded 32-bit tag-info 45 * @return (unsigned int) Encoded 32-bit tag-info
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
index 2458b3767c90..e44df6916d90 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
@@ -16,7 +16,7 @@
16#ifndef _IA_CSS_H_ 16#ifndef _IA_CSS_H_
17#define _IA_CSS_H_ 17#define _IA_CSS_H_
18 18
19/** @file 19/* @file
20 * This file is the starting point of the CSS-API. It includes all CSS-API 20 * This file is the starting point of the CSS-API. It includes all CSS-API
21 * header files. 21 * header files.
22 */ 22 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
index a80a7dbaf712..080198796ad0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_3A_H 15#ifndef __IA_CSS_3A_H
16#define __IA_CSS_3A_H 16#define __IA_CSS_3A_H
17 17
18/** @file 18/* @file
19 * This file contains types used for 3A statistics 19 * This file contains types used for 3A statistics
20 */ 20 */
21 21
@@ -31,7 +31,7 @@ enum ia_css_3a_tables {
31 IA_CSS_NUM_3A_TABLES 31 IA_CSS_NUM_3A_TABLES
32}; 32};
33 33
34/** Structure that holds 3A statistics in the ISP internal 34/* Structure that holds 3A statistics in the ISP internal
35 * format. Use ia_css_get_3a_statistics() to translate 35 * format. Use ia_css_get_3a_statistics() to translate
36 * this to the format used on the host (3A library). 36 * this to the format used on the host (3A library).
37 * */ 37 * */
@@ -48,13 +48,13 @@ struct ia_css_isp_3a_statistics {
48 struct { 48 struct {
49 ia_css_ptr rgby_tbl; 49 ia_css_ptr rgby_tbl;
50 } data_hmem; 50 } data_hmem;
51 uint32_t exp_id; /**< exposure id, to match statistics to a frame, 51 uint32_t exp_id; /** exposure id, to match statistics to a frame,
52 see ia_css_event_public.h for more detail. */ 52 see ia_css_event_public.h for more detail. */
53 uint32_t isp_config_id;/**< Unique ID to track which config was actually applied to a particular frame */ 53 uint32_t isp_config_id;/** Unique ID to track which config was actually applied to a particular frame */
54 ia_css_ptr data_ptr; /**< pointer to base of all data */ 54 ia_css_ptr data_ptr; /** pointer to base of all data */
55 uint32_t size; /**< total size of all data */ 55 uint32_t size; /** total size of all data */
56 uint32_t dmem_size; 56 uint32_t dmem_size;
57 uint32_t vmem_size; /**< both lo and hi have this size */ 57 uint32_t vmem_size; /** both lo and hi have this size */
58 uint32_t hmem_size; 58 uint32_t hmem_size;
59}; 59};
60#define SIZE_OF_DMEM_STRUCT \ 60#define SIZE_OF_DMEM_STRUCT \
@@ -77,7 +77,7 @@ struct ia_css_isp_3a_statistics {
77 SIZE_OF_IA_CSS_PTR + \ 77 SIZE_OF_IA_CSS_PTR + \
78 4 * sizeof(uint32_t)) 78 4 * sizeof(uint32_t))
79 79
80/** Map with host-side pointers to ISP-format statistics. 80/* Map with host-side pointers to ISP-format statistics.
81 * These pointers can either be copies of ISP data or memory mapped 81 * These pointers can either be copies of ISP data or memory mapped
82 * ISP pointers. 82 * ISP pointers.
83 * All of the data behind these pointers is allocated contiguously, the 83 * All of the data behind these pointers is allocated contiguously, the
@@ -85,17 +85,17 @@ struct ia_css_isp_3a_statistics {
85 * point into this one block of data. 85 * point into this one block of data.
86 */ 86 */
87struct ia_css_isp_3a_statistics_map { 87struct ia_css_isp_3a_statistics_map {
88 void *data_ptr; /**< Pointer to start of memory */ 88 void *data_ptr; /** Pointer to start of memory */
89 struct ia_css_3a_output *dmem_stats; 89 struct ia_css_3a_output *dmem_stats;
90 uint16_t *vmem_stats_hi; 90 uint16_t *vmem_stats_hi;
91 uint16_t *vmem_stats_lo; 91 uint16_t *vmem_stats_lo;
92 struct ia_css_bh_table *hmem_stats; 92 struct ia_css_bh_table *hmem_stats;
93 uint32_t size; /**< total size in bytes of data_ptr */ 93 uint32_t size; /** total size in bytes of data_ptr */
94 uint32_t data_allocated; /**< indicate whether data_ptr 94 uint32_t data_allocated; /** indicate whether data_ptr
95 was allocated or not. */ 95 was allocated or not. */
96}; 96};
97 97
98/** @brief Copy and translate 3A statistics from an ISP buffer to a host buffer 98/* @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
99 * @param[out] host_stats Host buffer. 99 * @param[out] host_stats Host buffer.
100 * @param[in] isp_stats ISP buffer. 100 * @param[in] isp_stats ISP buffer.
101 * @return error value if temporary memory cannot be allocated 101 * @return error value if temporary memory cannot be allocated
@@ -109,7 +109,7 @@ enum ia_css_err
109ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats, 109ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
110 const struct ia_css_isp_3a_statistics *isp_stats); 110 const struct ia_css_isp_3a_statistics *isp_stats);
111 111
112/** @brief Translate 3A statistics from ISP format to host format. 112/* @brief Translate 3A statistics from ISP format to host format.
113 * @param[out] host_stats host-format statistics 113 * @param[out] host_stats host-format statistics
114 * @param[in] isp_stats ISP-format statistics 114 * @param[in] isp_stats ISP-format statistics
115 * @return None 115 * @return None
@@ -125,35 +125,35 @@ ia_css_translate_3a_statistics(
125 125
126/* Convenience functions for alloc/free of certain datatypes */ 126/* Convenience functions for alloc/free of certain datatypes */
127 127
128/** @brief Allocate memory for the 3a statistics on the ISP 128/* @brief Allocate memory for the 3a statistics on the ISP
129 * @param[in] grid The grid. 129 * @param[in] grid The grid.
130 * @return Pointer to the allocated 3a statistics buffer on the ISP 130 * @return Pointer to the allocated 3a statistics buffer on the ISP
131*/ 131*/
132struct ia_css_isp_3a_statistics * 132struct ia_css_isp_3a_statistics *
133ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid); 133ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
134 134
135/** @brief Free the 3a statistics memory on the isp 135/* @brief Free the 3a statistics memory on the isp
136 * @param[in] me Pointer to the 3a statistics buffer on the ISP. 136 * @param[in] me Pointer to the 3a statistics buffer on the ISP.
137 * @return None 137 * @return None
138*/ 138*/
139void 139void
140ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me); 140ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
141 141
142/** @brief Allocate memory for the 3a statistics on the host 142/* @brief Allocate memory for the 3a statistics on the host
143 * @param[in] grid The grid. 143 * @param[in] grid The grid.
144 * @return Pointer to the allocated 3a statistics buffer on the host 144 * @return Pointer to the allocated 3a statistics buffer on the host
145*/ 145*/
146struct ia_css_3a_statistics * 146struct ia_css_3a_statistics *
147ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid); 147ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
148 148
149/** @brief Free the 3a statistics memory on the host 149/* @brief Free the 3a statistics memory on the host
150 * @param[in] me Pointer to the 3a statistics buffer on the host. 150 * @param[in] me Pointer to the 3a statistics buffer on the host.
151 * @return None 151 * @return None
152 */ 152 */
153void 153void
154ia_css_3a_statistics_free(struct ia_css_3a_statistics *me); 154ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
155 155
156/** @brief Allocate a 3a statistics map structure 156/* @brief Allocate a 3a statistics map structure
157 * @param[in] isp_stats pointer to ISP 3a statistis struct 157 * @param[in] isp_stats pointer to ISP 3a statistis struct
158 * @param[in] data_ptr host-side pointer to ISP 3a statistics. 158 * @param[in] data_ptr host-side pointer to ISP 3a statistics.
159 * @return Pointer to the allocated 3a statistics map 159 * @return Pointer to the allocated 3a statistics map
@@ -174,7 +174,7 @@ ia_css_isp_3a_statistics_map_allocate(
174 const struct ia_css_isp_3a_statistics *isp_stats, 174 const struct ia_css_isp_3a_statistics *isp_stats,
175 void *data_ptr); 175 void *data_ptr);
176 176
177/** @brief Free the 3a statistics map 177/* @brief Free the 3a statistics map
178 * @param[in] me Pointer to the 3a statistics map 178 * @param[in] me Pointer to the 3a statistics map
179 * @return None 179 * @return None
180 * 180 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
index a2a1873aca83..138bc3bb4627 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
@@ -15,7 +15,7 @@
15#ifndef _IA_CSS_ACC_TYPES_H 15#ifndef _IA_CSS_ACC_TYPES_H
16#define _IA_CSS_ACC_TYPES_H 16#define _IA_CSS_ACC_TYPES_H
17 17
18/** @file 18/* @file
19 * This file contains types used for acceleration 19 * This file contains types used for acceleration
20 */ 20 */
21 21
@@ -40,16 +40,16 @@
40 * in the kernel and HAL. 40 * in the kernel and HAL.
41*/ 41*/
42 42
43/** Type of acceleration. 43/* Type of acceleration.
44 */ 44 */
45enum ia_css_acc_type { 45enum ia_css_acc_type {
46 IA_CSS_ACC_NONE, /**< Normal binary */ 46 IA_CSS_ACC_NONE, /** Normal binary */
47 IA_CSS_ACC_OUTPUT, /**< Accelerator stage on output frame */ 47 IA_CSS_ACC_OUTPUT, /** Accelerator stage on output frame */
48 IA_CSS_ACC_VIEWFINDER, /**< Accelerator stage on viewfinder frame */ 48 IA_CSS_ACC_VIEWFINDER, /** Accelerator stage on viewfinder frame */
49 IA_CSS_ACC_STANDALONE, /**< Stand-alone acceleration */ 49 IA_CSS_ACC_STANDALONE, /** Stand-alone acceleration */
50}; 50};
51 51
52/** Cells types 52/* Cells types
53 */ 53 */
54enum ia_css_cell_type { 54enum ia_css_cell_type {
55 IA_CSS_SP0 = 0, 55 IA_CSS_SP0 = 0,
@@ -58,45 +58,45 @@ enum ia_css_cell_type {
58 MAX_NUM_OF_CELLS 58 MAX_NUM_OF_CELLS
59}; 59};
60 60
61/** Firmware types. 61/* Firmware types.
62 */ 62 */
63enum ia_css_fw_type { 63enum ia_css_fw_type {
64 ia_css_sp_firmware, /**< Firmware for the SP */ 64 ia_css_sp_firmware, /** Firmware for the SP */
65 ia_css_isp_firmware, /**< Firmware for the ISP */ 65 ia_css_isp_firmware, /** Firmware for the ISP */
66 ia_css_bootloader_firmware, /**< Firmware for the BootLoader */ 66 ia_css_bootloader_firmware, /** Firmware for the BootLoader */
67 ia_css_acc_firmware /**< Firmware for accelrations */ 67 ia_css_acc_firmware /** Firmware for accelrations */
68}; 68};
69 69
70struct ia_css_blob_descr; 70struct ia_css_blob_descr;
71 71
72/** Blob descriptor. 72/* Blob descriptor.
73 * This structure describes an SP or ISP blob. 73 * This structure describes an SP or ISP blob.
74 * It describes the test, data and bss sections as well as position in a 74 * It describes the test, data and bss sections as well as position in a
75 * firmware file. 75 * firmware file.
76 * For convenience, it contains dynamic data after loading. 76 * For convenience, it contains dynamic data after loading.
77 */ 77 */
78struct ia_css_blob_info { 78struct ia_css_blob_info {
79 /**< Static blob data */ 79 /** Static blob data */
80 uint32_t offset; /**< Blob offset in fw file */ 80 uint32_t offset; /** Blob offset in fw file */
81 struct ia_css_isp_param_memory_offsets memory_offsets; /**< offset wrt hdr in bytes */ 81 struct ia_css_isp_param_memory_offsets memory_offsets; /** offset wrt hdr in bytes */
82 uint32_t prog_name_offset; /**< offset wrt hdr in bytes */ 82 uint32_t prog_name_offset; /** offset wrt hdr in bytes */
83 uint32_t size; /**< Size of blob */ 83 uint32_t size; /** Size of blob */
84 uint32_t padding_size; /**< total cummulative of bytes added due to section alignment */ 84 uint32_t padding_size; /** total cummulative of bytes added due to section alignment */
85 uint32_t icache_source; /**< Position of icache in blob */ 85 uint32_t icache_source; /** Position of icache in blob */
86 uint32_t icache_size; /**< Size of icache section */ 86 uint32_t icache_size; /** Size of icache section */
87 uint32_t icache_padding;/**< bytes added due to icache section alignment */ 87 uint32_t icache_padding;/** bytes added due to icache section alignment */
88 uint32_t text_source; /**< Position of text in blob */ 88 uint32_t text_source; /** Position of text in blob */
89 uint32_t text_size; /**< Size of text section */ 89 uint32_t text_size; /** Size of text section */
90 uint32_t text_padding; /**< bytes added due to text section alignment */ 90 uint32_t text_padding; /** bytes added due to text section alignment */
91 uint32_t data_source; /**< Position of data in blob */ 91 uint32_t data_source; /** Position of data in blob */
92 uint32_t data_target; /**< Start of data in SP dmem */ 92 uint32_t data_target; /** Start of data in SP dmem */
93 uint32_t data_size; /**< Size of text section */ 93 uint32_t data_size; /** Size of text section */
94 uint32_t data_padding; /**< bytes added due to data section alignment */ 94 uint32_t data_padding; /** bytes added due to data section alignment */
95 uint32_t bss_target; /**< Start position of bss in SP dmem */ 95 uint32_t bss_target; /** Start position of bss in SP dmem */
96 uint32_t bss_size; /**< Size of bss section */ 96 uint32_t bss_size; /** Size of bss section */
97 /**< Dynamic data filled by loader */ 97 /** Dynamic data filled by loader */
98 CSS_ALIGN(const void *code, 8); /**< Code section absolute pointer within fw, code = icache + text */ 98 CSS_ALIGN(const void *code, 8); /** Code section absolute pointer within fw, code = icache + text */
99 CSS_ALIGN(const void *data, 8); /**< Data section absolute pointer within fw, data = data + bss */ 99 CSS_ALIGN(const void *data, 8); /** Data section absolute pointer within fw, data = data + bss */
100}; 100};
101 101
102struct ia_css_binary_input_info { 102struct ia_css_binary_input_info {
@@ -140,9 +140,9 @@ struct ia_css_binary_s3a_info {
140 uint32_t fixed_s3a_deci_log; 140 uint32_t fixed_s3a_deci_log;
141}; 141};
142 142
143/** DPC related binary info */ 143/* DPC related binary info */
144struct ia_css_binary_dpc_info { 144struct ia_css_binary_dpc_info {
145 uint32_t bnr_lite; /**< bnr lite enable flag */ 145 uint32_t bnr_lite; /** bnr lite enable flag */
146}; 146};
147 147
148struct ia_css_binary_iterator_info { 148struct ia_css_binary_iterator_info {
@@ -193,7 +193,7 @@ struct ia_css_binary_block_info {
193 uint32_t output_block_height; 193 uint32_t output_block_height;
194}; 194};
195 195
196/** Structure describing an ISP binary. 196/* Structure describing an ISP binary.
197 * It describes the capabilities of a binary, like the maximum resolution, 197 * It describes the capabilities of a binary, like the maximum resolution,
198 * support features, dma channels, uds features, etc. 198 * support features, dma channels, uds features, etc.
199 * This part is to be used by the SP. 199 * This part is to be used by the SP.
@@ -210,7 +210,7 @@ struct ia_css_binary_info {
210 struct ia_css_binary_dvs_info dvs; 210 struct ia_css_binary_dvs_info dvs;
211 struct ia_css_binary_vf_dec_info vf_dec; 211 struct ia_css_binary_vf_dec_info vf_dec;
212 struct ia_css_binary_s3a_info s3a; 212 struct ia_css_binary_s3a_info s3a;
213 struct ia_css_binary_dpc_info dpc_bnr; /**< DPC related binary info */ 213 struct ia_css_binary_dpc_info dpc_bnr; /** DPC related binary info */
214 struct ia_css_binary_iterator_info iterator; 214 struct ia_css_binary_iterator_info iterator;
215 struct ia_css_binary_address_info addresses; 215 struct ia_css_binary_address_info addresses;
216 struct ia_css_binary_uds_info uds; 216 struct ia_css_binary_uds_info uds;
@@ -269,7 +269,7 @@ struct ia_css_binary_info {
269 } dma; 269 } dma;
270}; 270};
271 271
272/** Structure describing an ISP binary. 272/* Structure describing an ISP binary.
273 * It describes the capabilities of a binary, like the maximum resolution, 273 * It describes the capabilities of a binary, like the maximum resolution,
274 * support features, dma channels, uds features, etc. 274 * support features, dma channels, uds features, etc.
275 */ 275 */
@@ -281,8 +281,8 @@ struct ia_css_binary_xinfo {
281 enum ia_css_acc_type type; 281 enum ia_css_acc_type type;
282 CSS_ALIGN(int32_t num_output_formats, 8); 282 CSS_ALIGN(int32_t num_output_formats, 8);
283 enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM]; 283 enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM];
284 CSS_ALIGN(int32_t num_vf_formats, 8); /**< number of supported vf formats */ 284 CSS_ALIGN(int32_t num_vf_formats, 8); /** number of supported vf formats */
285 enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /**< types of supported vf formats */ 285 enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /** types of supported vf formats */
286 uint8_t num_output_pins; 286 uint8_t num_output_pins;
287 ia_css_ptr xmem_addr; 287 ia_css_ptr xmem_addr;
288 CSS_ALIGN(const struct ia_css_blob_descr *blob, 8); 288 CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
@@ -291,55 +291,55 @@ struct ia_css_binary_xinfo {
291 CSS_ALIGN(struct ia_css_binary_xinfo *next, 8); 291 CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
292}; 292};
293 293
294/** Structure describing the Bootloader (an ISP binary). 294/* Structure describing the Bootloader (an ISP binary).
295 * It contains several address, either in ddr, isp_dmem or 295 * It contains several address, either in ddr, isp_dmem or
296 * the entry function in icache. 296 * the entry function in icache.
297 */ 297 */
298struct ia_css_bl_info { 298struct ia_css_bl_info {
299 uint32_t num_dma_cmds; /**< Number of cmds sent by CSS */ 299 uint32_t num_dma_cmds; /** Number of cmds sent by CSS */
300 uint32_t dma_cmd_list; /**< Dma command list sent by CSS */ 300 uint32_t dma_cmd_list; /** Dma command list sent by CSS */
301 uint32_t sw_state; /**< Polled from css */ 301 uint32_t sw_state; /** Polled from css */
302 /* Entry functions */ 302 /* Entry functions */
303 uint32_t bl_entry; /**< The SP entry function */ 303 uint32_t bl_entry; /** The SP entry function */
304}; 304};
305 305
306/** Structure describing the SP binary. 306/* Structure describing the SP binary.
307 * It contains several address, either in ddr, sp_dmem or 307 * It contains several address, either in ddr, sp_dmem or
308 * the entry function in pmem. 308 * the entry function in pmem.
309 */ 309 */
310struct ia_css_sp_info { 310struct ia_css_sp_info {
311 uint32_t init_dmem_data; /**< data sect config, stored to dmem */ 311 uint32_t init_dmem_data; /** data sect config, stored to dmem */
312 uint32_t per_frame_data; /**< Per frame data, stored to dmem */ 312 uint32_t per_frame_data; /** Per frame data, stored to dmem */
313 uint32_t group; /**< Per pipeline data, loaded by dma */ 313 uint32_t group; /** Per pipeline data, loaded by dma */
314 uint32_t output; /**< SP output data, loaded by dmem */ 314 uint32_t output; /** SP output data, loaded by dmem */
315 uint32_t host_sp_queue; /**< Host <-> SP queues */ 315 uint32_t host_sp_queue; /** Host <-> SP queues */
316 uint32_t host_sp_com;/**< Host <-> SP commands */ 316 uint32_t host_sp_com;/** Host <-> SP commands */
317 uint32_t isp_started; /**< Polled from sensor thread, csim only */ 317 uint32_t isp_started; /** Polled from sensor thread, csim only */
318 uint32_t sw_state; /**< Polled from css */ 318 uint32_t sw_state; /** Polled from css */
319 uint32_t host_sp_queues_initialized; /**< Polled from the SP */ 319 uint32_t host_sp_queues_initialized; /** Polled from the SP */
320 uint32_t sleep_mode; /**< different mode to halt SP */ 320 uint32_t sleep_mode; /** different mode to halt SP */
321 uint32_t invalidate_tlb; /**< inform SP to invalidate mmu TLB */ 321 uint32_t invalidate_tlb; /** inform SP to invalidate mmu TLB */
322#ifndef ISP2401 322#ifndef ISP2401
323 uint32_t stop_copy_preview; /**< suspend copy and preview pipe when capture */ 323 uint32_t stop_copy_preview; /** suspend copy and preview pipe when capture */
324#endif 324#endif
325 uint32_t debug_buffer_ddr_address; /**< inform SP the address 325 uint32_t debug_buffer_ddr_address; /** inform SP the address
326 of DDR debug queue */ 326 of DDR debug queue */
327 uint32_t perf_counter_input_system_error; /**< input system perf 327 uint32_t perf_counter_input_system_error; /** input system perf
328 counter array */ 328 counter array */
329#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG 329#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
330 uint32_t debug_wait; /**< thread/pipe post mortem debug */ 330 uint32_t debug_wait; /** thread/pipe post mortem debug */
331 uint32_t debug_stage; /**< thread/pipe post mortem debug */ 331 uint32_t debug_stage; /** thread/pipe post mortem debug */
332 uint32_t debug_stripe; /**< thread/pipe post mortem debug */ 332 uint32_t debug_stripe; /** thread/pipe post mortem debug */
333#endif 333#endif
334 uint32_t threads_stack; /**< sp thread's stack pointers */ 334 uint32_t threads_stack; /** sp thread's stack pointers */
335 uint32_t threads_stack_size; /**< sp thread's stack sizes */ 335 uint32_t threads_stack_size; /** sp thread's stack sizes */
336 uint32_t curr_binary_id; /**< current binary id */ 336 uint32_t curr_binary_id; /** current binary id */
337 uint32_t raw_copy_line_count; /**< raw copy line counter */ 337 uint32_t raw_copy_line_count; /** raw copy line counter */
338 uint32_t ddr_parameter_address; /**< acc param ddrptr, sp dmem */ 338 uint32_t ddr_parameter_address; /** acc param ddrptr, sp dmem */
339 uint32_t ddr_parameter_size; /**< acc param size, sp dmem */ 339 uint32_t ddr_parameter_size; /** acc param size, sp dmem */
340 /* Entry functions */ 340 /* Entry functions */
341 uint32_t sp_entry; /**< The SP entry function */ 341 uint32_t sp_entry; /** The SP entry function */
342 uint32_t tagger_frames_addr; /**< Base address of tagger state */ 342 uint32_t tagger_frames_addr; /** Base address of tagger state */
343}; 343};
344 344
345/* The following #if is there because this header file is also included 345/* The following #if is there because this header file is also included
@@ -348,37 +348,37 @@ struct ia_css_sp_info {
348 More permanent solution will be to refactor this include. 348 More permanent solution will be to refactor this include.
349*/ 349*/
350#if !defined(__ISP) 350#if !defined(__ISP)
351/** Accelerator firmware information. 351/* Accelerator firmware information.
352 */ 352 */
353struct ia_css_acc_info { 353struct ia_css_acc_info {
354 uint32_t per_frame_data; /**< Dummy for now */ 354 uint32_t per_frame_data; /** Dummy for now */
355}; 355};
356 356
357/** Firmware information. 357/* Firmware information.
358 */ 358 */
359union ia_css_fw_union { 359union ia_css_fw_union {
360 struct ia_css_binary_xinfo isp; /**< ISP info */ 360 struct ia_css_binary_xinfo isp; /** ISP info */
361 struct ia_css_sp_info sp; /**< SP info */ 361 struct ia_css_sp_info sp; /** SP info */
362 struct ia_css_bl_info bl; /**< Bootloader info */ 362 struct ia_css_bl_info bl; /** Bootloader info */
363 struct ia_css_acc_info acc; /**< Accelerator info */ 363 struct ia_css_acc_info acc; /** Accelerator info */
364}; 364};
365 365
366/** Firmware information. 366/* Firmware information.
367 */ 367 */
368struct ia_css_fw_info { 368struct ia_css_fw_info {
369 size_t header_size; /**< size of fw header */ 369 size_t header_size; /** size of fw header */
370 CSS_ALIGN(uint32_t type, 8); 370 CSS_ALIGN(uint32_t type, 8);
371 union ia_css_fw_union info; /**< Binary info */ 371 union ia_css_fw_union info; /** Binary info */
372 struct ia_css_blob_info blob; /**< Blob info */ 372 struct ia_css_blob_info blob; /** Blob info */
373 /* Dynamic part */ 373 /* Dynamic part */
374 struct ia_css_fw_info *next; 374 struct ia_css_fw_info *next;
375 CSS_ALIGN(uint32_t loaded, 8); /**< Firmware has been loaded */ 375 CSS_ALIGN(uint32_t loaded, 8); /** Firmware has been loaded */
376 CSS_ALIGN(const uint8_t *isp_code, 8); /**< ISP pointer to code */ 376 CSS_ALIGN(const uint8_t *isp_code, 8); /** ISP pointer to code */
377 /**< Firmware handle between user space and kernel */ 377 /** Firmware handle between user space and kernel */
378 CSS_ALIGN(uint32_t handle, 8); 378 CSS_ALIGN(uint32_t handle, 8);
379 /**< Sections to copy from/to ISP */ 379 /** Sections to copy from/to ISP */
380 struct ia_css_isp_param_css_segments mem_initializers; 380 struct ia_css_isp_param_css_segments mem_initializers;
381 /**< Initializer for local ISP memories */ 381 /** Initializer for local ISP memories */
382}; 382};
383 383
384struct ia_css_blob_descr { 384struct ia_css_blob_descr {
@@ -390,39 +390,39 @@ struct ia_css_blob_descr {
390 390
391struct ia_css_acc_fw; 391struct ia_css_acc_fw;
392 392
393/** Structure describing the SP binary of a stand-alone accelerator. 393/* Structure describing the SP binary of a stand-alone accelerator.
394 */ 394 */
395struct ia_css_acc_sp { 395struct ia_css_acc_sp {
396 void (*init)(struct ia_css_acc_fw *); /**< init for crun */ 396 void (*init)(struct ia_css_acc_fw *); /** init for crun */
397 uint32_t sp_prog_name_offset; /**< program name offset wrt hdr in bytes */ 397 uint32_t sp_prog_name_offset; /** program name offset wrt hdr in bytes */
398 uint32_t sp_blob_offset; /**< blob offset wrt hdr in bytes */ 398 uint32_t sp_blob_offset; /** blob offset wrt hdr in bytes */
399 void *entry; /**< Address of sp entry point */ 399 void *entry; /** Address of sp entry point */
400 uint32_t *css_abort; /**< SP dmem abort flag */ 400 uint32_t *css_abort; /** SP dmem abort flag */
401 void *isp_code; /**< SP dmem address holding xmem 401 void *isp_code; /** SP dmem address holding xmem
402 address of isp code */ 402 address of isp code */
403 struct ia_css_fw_info fw; /**< SP fw descriptor */ 403 struct ia_css_fw_info fw; /** SP fw descriptor */
404 const uint8_t *code; /**< ISP pointer of allocated SP code */ 404 const uint8_t *code; /** ISP pointer of allocated SP code */
405}; 405};
406 406
407/** Acceleration firmware descriptor. 407/* Acceleration firmware descriptor.
408 * This descriptor descibes either SP code (stand-alone), or 408 * This descriptor descibes either SP code (stand-alone), or
409 * ISP code (a separate pipeline stage). 409 * ISP code (a separate pipeline stage).
410 */ 410 */
411struct ia_css_acc_fw_hdr { 411struct ia_css_acc_fw_hdr {
412 enum ia_css_acc_type type; /**< Type of accelerator */ 412 enum ia_css_acc_type type; /** Type of accelerator */
413 uint32_t isp_prog_name_offset; /**< program name offset wrt 413 uint32_t isp_prog_name_offset; /** program name offset wrt
414 header in bytes */ 414 header in bytes */
415 uint32_t isp_blob_offset; /**< blob offset wrt header 415 uint32_t isp_blob_offset; /** blob offset wrt header
416 in bytes */ 416 in bytes */
417 uint32_t isp_size; /**< Size of isp blob */ 417 uint32_t isp_size; /** Size of isp blob */
418 const uint8_t *isp_code; /**< ISP pointer to code */ 418 const uint8_t *isp_code; /** ISP pointer to code */
419 struct ia_css_acc_sp sp; /**< Standalone sp code */ 419 struct ia_css_acc_sp sp; /** Standalone sp code */
420 /**< Firmware handle between user space and kernel */ 420 /** Firmware handle between user space and kernel */
421 uint32_t handle; 421 uint32_t handle;
422 struct ia_css_data parameters; /**< Current SP parameters */ 422 struct ia_css_data parameters; /** Current SP parameters */
423}; 423};
424 424
425/** Firmware structure. 425/* Firmware structure.
426 * This contains the header and actual blobs. 426 * This contains the header and actual blobs.
427 * For standalone, it contains SP and ISP blob. 427 * For standalone, it contains SP and ISP blob.
428 * For a pipeline stage accelerator, it contains ISP code only. 428 * For a pipeline stage accelerator, it contains ISP code only.
@@ -430,7 +430,7 @@ struct ia_css_acc_fw_hdr {
430 * header and computed using the access macros below. 430 * header and computed using the access macros below.
431 */ 431 */
432struct ia_css_acc_fw { 432struct ia_css_acc_fw {
433 struct ia_css_acc_fw_hdr header; /**< firmware header */ 433 struct ia_css_acc_fw_hdr header; /** firmware header */
434 /* 434 /*
435 int8_t isp_progname[]; **< ISP program name 435 int8_t isp_progname[]; **< ISP program name
436 int8_t sp_progname[]; **< SP program name, stand-alone only 436 int8_t sp_progname[]; **< SP program name, stand-alone only
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
index b2ecf3618c15..a0058eac7d5a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_BUFFER_H 15#ifndef __IA_CSS_BUFFER_H
16#define __IA_CSS_BUFFER_H 16#define __IA_CSS_BUFFER_H
17 17
18/** @file 18/* @file
19 * This file contains datastructures and types for buffers used in CSS 19 * This file contains datastructures and types for buffers used in CSS
20 */ 20 */
21 21
@@ -23,7 +23,7 @@
23#include "ia_css_types.h" 23#include "ia_css_types.h"
24#include "ia_css_timer.h" 24#include "ia_css_timer.h"
25 25
26/** Enumeration of buffer types. Buffers can be queued and de-queued 26/* Enumeration of buffer types. Buffers can be queued and de-queued
27 * to hand them over between IA and ISP. 27 * to hand them over between IA and ISP.
28 */ 28 */
29enum ia_css_buffer_type { 29enum ia_css_buffer_type {
@@ -48,28 +48,28 @@ enum ia_css_buffer_type {
48 48
49/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */ 49/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
50#if !defined(__ISP) 50#if !defined(__ISP)
51/** Buffer structure. This is a container structure that enables content 51/* Buffer structure. This is a container structure that enables content
52 * independent buffer queues and access functions. 52 * independent buffer queues and access functions.
53 */ 53 */
54struct ia_css_buffer { 54struct ia_css_buffer {
55 enum ia_css_buffer_type type; /**< Buffer type. */ 55 enum ia_css_buffer_type type; /** Buffer type. */
56 unsigned int exp_id; 56 unsigned int exp_id;
57 /**< exposure id for this buffer; 0 = not available 57 /** exposure id for this buffer; 0 = not available
58 see ia_css_event_public.h for more detail. */ 58 see ia_css_event_public.h for more detail. */
59 union { 59 union {
60 struct ia_css_isp_3a_statistics *stats_3a; /**< 3A statistics & optionally RGBY statistics. */ 60 struct ia_css_isp_3a_statistics *stats_3a; /** 3A statistics & optionally RGBY statistics. */
61 struct ia_css_isp_dvs_statistics *stats_dvs; /**< DVS statistics. */ 61 struct ia_css_isp_dvs_statistics *stats_dvs; /** DVS statistics. */
62 struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /**< SKC DVS statistics. */ 62 struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /** SKC DVS statistics. */
63 struct ia_css_frame *frame; /**< Frame buffer. */ 63 struct ia_css_frame *frame; /** Frame buffer. */
64 struct ia_css_acc_param *custom_data; /**< Custom buffer. */ 64 struct ia_css_acc_param *custom_data; /** Custom buffer. */
65 struct ia_css_metadata *metadata; /**< Sensor metadata. */ 65 struct ia_css_metadata *metadata; /** Sensor metadata. */
66 } data; /**< Buffer data pointer. */ 66 } data; /** Buffer data pointer. */
67 uint64_t driver_cookie; /**< cookie for the driver */ 67 uint64_t driver_cookie; /** cookie for the driver */
68 struct ia_css_time_meas timing_data; /**< timing data (readings from the timer) */ 68 struct ia_css_time_meas timing_data; /** timing data (readings from the timer) */
69 struct ia_css_clock_tick isys_eof_clock_tick; /**< ISYS's end of frame timer tick*/ 69 struct ia_css_clock_tick isys_eof_clock_tick; /** ISYS's end of frame timer tick*/
70}; 70};
71 71
72/** @brief Dequeue param buffers from sp2host_queue 72/* @brief Dequeue param buffers from sp2host_queue
73 * 73 *
74 * @return None 74 * @return None
75 * 75 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
index a15d3e368341..021a313fab85 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_CONTROL_H 15#ifndef __IA_CSS_CONTROL_H
16#define __IA_CSS_CONTROL_H 16#define __IA_CSS_CONTROL_H
17 17
18/** @file 18/* @file
19 * This file contains functionality for starting and controlling CSS 19 * This file contains functionality for starting and controlling CSS
20 */ 20 */
21 21
@@ -24,7 +24,7 @@
24#include <ia_css_firmware.h> 24#include <ia_css_firmware.h>
25#include <ia_css_irq.h> 25#include <ia_css_irq.h>
26 26
27/** @brief Initialize the CSS API. 27/* @brief Initialize the CSS API.
28 * @param[in] env Environment, provides functions to access the 28 * @param[in] env Environment, provides functions to access the
29 * environment in which the CSS code runs. This is 29 * environment in which the CSS code runs. This is
30 * used for host side memory access and message 30 * used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err ia_css_init(
51 uint32_t l1_base, 51 uint32_t l1_base,
52 enum ia_css_irq_type irq_type); 52 enum ia_css_irq_type irq_type);
53 53
54/** @brief Un-initialize the CSS API. 54/* @brief Un-initialize the CSS API.
55 * @return None 55 * @return None
56 * 56 *
57 * This function deallocates all memory that has been allocated by the CSS API 57 * This function deallocates all memory that has been allocated by the CSS API
@@ -66,7 +66,7 @@ enum ia_css_err ia_css_init(
66void 66void
67ia_css_uninit(void); 67ia_css_uninit(void);
68 68
69/** @brief Suspend CSS API for power down 69/* @brief Suspend CSS API for power down
70 * @return success or faulure code 70 * @return success or faulure code
71 * 71 *
72 * suspend shuts down the system by: 72 * suspend shuts down the system by:
@@ -80,7 +80,7 @@ ia_css_uninit(void);
80enum ia_css_err 80enum ia_css_err
81ia_css_suspend(void); 81ia_css_suspend(void);
82 82
83/** @brief Resume CSS API from power down 83/* @brief Resume CSS API from power down
84 * @return success or failure code 84 * @return success or failure code
85 * 85 *
86 * After a power cycle, this function will bring the CSS API back into 86 * After a power cycle, this function will bring the CSS API back into
@@ -91,7 +91,7 @@ ia_css_suspend(void);
91enum ia_css_err 91enum ia_css_err
92ia_css_resume(void); 92ia_css_resume(void);
93 93
94/** @brief Enable use of a separate queue for ISYS events. 94/* @brief Enable use of a separate queue for ISYS events.
95 * 95 *
96 * @param[in] enable: enable or disable use of separate ISYS event queues. 96 * @param[in] enable: enable or disable use of separate ISYS event queues.
97 * @return error if called when SP is running. 97 * @return error if called when SP is running.
@@ -105,7 +105,7 @@ ia_css_resume(void);
105enum ia_css_err 105enum ia_css_err
106ia_css_enable_isys_event_queue(bool enable); 106ia_css_enable_isys_event_queue(bool enable);
107 107
108/** @brief Test whether the ISP has started. 108/* @brief Test whether the ISP has started.
109 * 109 *
110 * @return Boolean flag true if the ISP has started or false otherwise. 110 * @return Boolean flag true if the ISP has started or false otherwise.
111 * 111 *
@@ -114,7 +114,7 @@ ia_css_enable_isys_event_queue(bool enable);
114bool 114bool
115ia_css_isp_has_started(void); 115ia_css_isp_has_started(void);
116 116
117/** @brief Test whether the SP has initialized. 117/* @brief Test whether the SP has initialized.
118 * 118 *
119 * @return Boolean flag true if the SP has initialized or false otherwise. 119 * @return Boolean flag true if the SP has initialized or false otherwise.
120 * 120 *
@@ -123,7 +123,7 @@ ia_css_isp_has_started(void);
123bool 123bool
124ia_css_sp_has_initialized(void); 124ia_css_sp_has_initialized(void);
125 125
126/** @brief Test whether the SP has terminated. 126/* @brief Test whether the SP has terminated.
127 * 127 *
128 * @return Boolean flag true if the SP has terminated or false otherwise. 128 * @return Boolean flag true if the SP has terminated or false otherwise.
129 * 129 *
@@ -132,7 +132,7 @@ ia_css_sp_has_initialized(void);
132bool 132bool
133ia_css_sp_has_terminated(void); 133ia_css_sp_has_terminated(void);
134 134
135/** @brief start SP hardware 135/* @brief start SP hardware
136 * 136 *
137 * @return IA_CSS_SUCCESS or error code upon error. 137 * @return IA_CSS_SUCCESS or error code upon error.
138 * 138 *
@@ -144,7 +144,7 @@ enum ia_css_err
144ia_css_start_sp(void); 144ia_css_start_sp(void);
145 145
146 146
147/** @brief stop SP hardware 147/* @brief stop SP hardware
148 * 148 *
149 * @return IA_CSS_SUCCESS or error code upon error. 149 * @return IA_CSS_SUCCESS or error code upon error.
150 * 150 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
index 59459f7a9876..84a960b7abbc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
@@ -15,7 +15,7 @@
15#ifndef _IA_CSS_DEVICE_ACCESS_H 15#ifndef _IA_CSS_DEVICE_ACCESS_H
16#define _IA_CSS_DEVICE_ACCESS_H 16#define _IA_CSS_DEVICE_ACCESS_H
17 17
18/** @file 18/* @file
19 * File containing internal functions for the CSS-API to access the CSS device. 19 * File containing internal functions for the CSS-API to access the CSS device.
20 */ 20 */
21 21
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
index 147bf81959d3..1f01534964e3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_DVS_H 15#ifndef __IA_CSS_DVS_H
16#define __IA_CSS_DVS_H 16#define __IA_CSS_DVS_H
17 17
18/** @file 18/* @file
19 * This file contains types for DVS statistics 19 * This file contains types for DVS statistics
20 */ 20 */
21 21
@@ -31,7 +31,7 @@ enum dvs_statistics_type {
31}; 31};
32 32
33 33
34/** Structure that holds DVS statistics in the ISP internal 34/* Structure that holds DVS statistics in the ISP internal
35 * format. Use ia_css_get_dvs_statistics() to translate 35 * format. Use ia_css_get_dvs_statistics() to translate
36 * this to the format used on the host (DVS engine). 36 * this to the format used on the host (DVS engine).
37 * */ 37 * */
@@ -40,12 +40,12 @@ struct ia_css_isp_dvs_statistics {
40 ia_css_ptr ver_proj; 40 ia_css_ptr ver_proj;
41 uint32_t hor_size; 41 uint32_t hor_size;
42 uint32_t ver_size; 42 uint32_t ver_size;
43 uint32_t exp_id; /**< see ia_css_event_public.h for more detail */ 43 uint32_t exp_id; /** see ia_css_event_public.h for more detail */
44 ia_css_ptr data_ptr; /* base pointer containing all memory */ 44 ia_css_ptr data_ptr; /* base pointer containing all memory */
45 uint32_t size; /* size of allocated memory in data_ptr */ 45 uint32_t size; /* size of allocated memory in data_ptr */
46}; 46};
47 47
48/** Structure that holds SKC DVS statistics in the ISP internal 48/* Structure that holds SKC DVS statistics in the ISP internal
49 * format. Use ia_css_dvs_statistics_get() to translate this to 49 * format. Use ia_css_dvs_statistics_get() to translate this to
50 * the format used on the host. 50 * the format used on the host.
51 * */ 51 * */
@@ -82,7 +82,7 @@ union ia_css_dvs_statistics_host {
82 struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host; 82 struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
83}; 83};
84 84
85/** @brief Copy DVS statistics from an ISP buffer to a host buffer. 85/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
86 * @param[in] host_stats Host buffer 86 * @param[in] host_stats Host buffer
87 * @param[in] isp_stats ISP buffer 87 * @param[in] isp_stats ISP buffer
88 * @return error value if temporary memory cannot be allocated 88 * @return error value if temporary memory cannot be allocated
@@ -100,7 +100,7 @@ enum ia_css_err
100ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats, 100ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
101 const struct ia_css_isp_dvs_statistics *isp_stats); 101 const struct ia_css_isp_dvs_statistics *isp_stats);
102 102
103/** @brief Translate DVS statistics from ISP format to host format 103/* @brief Translate DVS statistics from ISP format to host format
104 * @param[in] host_stats Host buffer 104 * @param[in] host_stats Host buffer
105 * @param[in] isp_stats ISP buffer 105 * @param[in] isp_stats ISP buffer
106 * @return None 106 * @return None
@@ -116,7 +116,7 @@ ia_css_translate_dvs_statistics(
116 struct ia_css_dvs_statistics *host_stats, 116 struct ia_css_dvs_statistics *host_stats,
117 const struct ia_css_isp_dvs_statistics_map *isp_stats); 117 const struct ia_css_isp_dvs_statistics_map *isp_stats);
118 118
119/** @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer. 119/* @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
120 * @param[in] host_stats Host buffer 120 * @param[in] host_stats Host buffer
121 * @param[in] isp_stats ISP buffer 121 * @param[in] isp_stats ISP buffer
122 * @return error value if temporary memory cannot be allocated 122 * @return error value if temporary memory cannot be allocated
@@ -134,7 +134,7 @@ enum ia_css_err
134ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats, 134ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
135 const struct ia_css_isp_dvs_statistics *isp_stats); 135 const struct ia_css_isp_dvs_statistics *isp_stats);
136 136
137/** @brief Translate DVS2 statistics from ISP format to host format 137/* @brief Translate DVS2 statistics from ISP format to host format
138 * @param[in] host_stats Host buffer 138 * @param[in] host_stats Host buffer
139 * @param[in] isp_stats ISP buffer 139 * @param[in] isp_stats ISP buffer
140 * @return None 140 * @return None
@@ -150,7 +150,7 @@ ia_css_translate_dvs2_statistics(
150 struct ia_css_dvs2_statistics *host_stats, 150 struct ia_css_dvs2_statistics *host_stats,
151 const struct ia_css_isp_dvs_statistics_map *isp_stats); 151 const struct ia_css_isp_dvs_statistics_map *isp_stats);
152 152
153/** @brief Copy DVS statistics from an ISP buffer to a host buffer. 153/* @brief Copy DVS statistics from an ISP buffer to a host buffer.
154 * @param[in] type - DVS statistics type 154 * @param[in] type - DVS statistics type
155 * @param[in] host_stats Host buffer 155 * @param[in] host_stats Host buffer
156 * @param[in] isp_stats ISP buffer 156 * @param[in] isp_stats ISP buffer
@@ -161,105 +161,105 @@ ia_css_dvs_statistics_get(enum dvs_statistics_type type,
161 union ia_css_dvs_statistics_host *host_stats, 161 union ia_css_dvs_statistics_host *host_stats,
162 const union ia_css_dvs_statistics_isp *isp_stats); 162 const union ia_css_dvs_statistics_isp *isp_stats);
163 163
164/** @brief Allocate the DVS statistics memory on the ISP 164/* @brief Allocate the DVS statistics memory on the ISP
165 * @param[in] grid The grid. 165 * @param[in] grid The grid.
166 * @return Pointer to the allocated DVS statistics buffer on the ISP 166 * @return Pointer to the allocated DVS statistics buffer on the ISP
167*/ 167*/
168struct ia_css_isp_dvs_statistics * 168struct ia_css_isp_dvs_statistics *
169ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid); 169ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
170 170
171/** @brief Free the DVS statistics memory on the ISP 171/* @brief Free the DVS statistics memory on the ISP
172 * @param[in] me Pointer to the DVS statistics buffer on the ISP. 172 * @param[in] me Pointer to the DVS statistics buffer on the ISP.
173 * @return None 173 * @return None
174*/ 174*/
175void 175void
176ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me); 176ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
177 177
178/** @brief Allocate the DVS 2.0 statistics memory 178/* @brief Allocate the DVS 2.0 statistics memory
179 * @param[in] grid The grid. 179 * @param[in] grid The grid.
180 * @return Pointer to the allocated DVS statistics buffer on the ISP 180 * @return Pointer to the allocated DVS statistics buffer on the ISP
181*/ 181*/
182struct ia_css_isp_dvs_statistics * 182struct ia_css_isp_dvs_statistics *
183ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid); 183ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
184 184
185/** @brief Free the DVS 2.0 statistics memory 185/* @brief Free the DVS 2.0 statistics memory
186 * @param[in] me Pointer to the DVS statistics buffer on the ISP. 186 * @param[in] me Pointer to the DVS statistics buffer on the ISP.
187 * @return None 187 * @return None
188*/ 188*/
189void 189void
190ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me); 190ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
191 191
192/** @brief Allocate the DVS statistics memory on the host 192/* @brief Allocate the DVS statistics memory on the host
193 * @param[in] grid The grid. 193 * @param[in] grid The grid.
194 * @return Pointer to the allocated DVS statistics buffer on the host 194 * @return Pointer to the allocated DVS statistics buffer on the host
195*/ 195*/
196struct ia_css_dvs_statistics * 196struct ia_css_dvs_statistics *
197ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid); 197ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
198 198
199/** @brief Free the DVS statistics memory on the host 199/* @brief Free the DVS statistics memory on the host
200 * @param[in] me Pointer to the DVS statistics buffer on the host. 200 * @param[in] me Pointer to the DVS statistics buffer on the host.
201 * @return None 201 * @return None
202*/ 202*/
203void 203void
204ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me); 204ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
205 205
206/** @brief Allocate the DVS coefficients memory 206/* @brief Allocate the DVS coefficients memory
207 * @param[in] grid The grid. 207 * @param[in] grid The grid.
208 * @return Pointer to the allocated DVS coefficients buffer 208 * @return Pointer to the allocated DVS coefficients buffer
209*/ 209*/
210struct ia_css_dvs_coefficients * 210struct ia_css_dvs_coefficients *
211ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid); 211ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
212 212
213/** @brief Free the DVS coefficients memory 213/* @brief Free the DVS coefficients memory
214 * @param[in] me Pointer to the DVS coefficients buffer. 214 * @param[in] me Pointer to the DVS coefficients buffer.
215 * @return None 215 * @return None
216 */ 216 */
217void 217void
218ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me); 218ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
219 219
220/** @brief Allocate the DVS 2.0 statistics memory on the host 220/* @brief Allocate the DVS 2.0 statistics memory on the host
221 * @param[in] grid The grid. 221 * @param[in] grid The grid.
222 * @return Pointer to the allocated DVS 2.0 statistics buffer on the host 222 * @return Pointer to the allocated DVS 2.0 statistics buffer on the host
223 */ 223 */
224struct ia_css_dvs2_statistics * 224struct ia_css_dvs2_statistics *
225ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid); 225ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
226 226
227/** @brief Free the DVS 2.0 statistics memory 227/* @brief Free the DVS 2.0 statistics memory
228 * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host. 228 * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host.
229 * @return None 229 * @return None
230*/ 230*/
231void 231void
232ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me); 232ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
233 233
234/** @brief Allocate the DVS 2.0 coefficients memory 234/* @brief Allocate the DVS 2.0 coefficients memory
235 * @param[in] grid The grid. 235 * @param[in] grid The grid.
236 * @return Pointer to the allocated DVS 2.0 coefficients buffer 236 * @return Pointer to the allocated DVS 2.0 coefficients buffer
237*/ 237*/
238struct ia_css_dvs2_coefficients * 238struct ia_css_dvs2_coefficients *
239ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid); 239ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
240 240
241/** @brief Free the DVS 2.0 coefficients memory 241/* @brief Free the DVS 2.0 coefficients memory
242 * @param[in] me Pointer to the DVS 2.0 coefficients buffer. 242 * @param[in] me Pointer to the DVS 2.0 coefficients buffer.
243 * @return None 243 * @return None
244*/ 244*/
245void 245void
246ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me); 246ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
247 247
248/** @brief Allocate the DVS 2.0 6-axis config memory 248/* @brief Allocate the DVS 2.0 6-axis config memory
249 * @param[in] stream The stream. 249 * @param[in] stream The stream.
250 * @return Pointer to the allocated DVS 6axis configuration buffer 250 * @return Pointer to the allocated DVS 6axis configuration buffer
251*/ 251*/
252struct ia_css_dvs_6axis_config * 252struct ia_css_dvs_6axis_config *
253ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream); 253ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
254 254
255/** @brief Free the DVS 2.0 6-axis config memory 255/* @brief Free the DVS 2.0 6-axis config memory
256 * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer 256 * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer
257 * @return None 257 * @return None
258 */ 258 */
259void 259void
260ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config); 260ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
261 261
262/** @brief Allocate a dvs statistics map structure 262/* @brief Allocate a dvs statistics map structure
263 * @param[in] isp_stats pointer to ISP dvs statistis struct 263 * @param[in] isp_stats pointer to ISP dvs statistis struct
264 * @param[in] data_ptr host-side pointer to ISP dvs statistics. 264 * @param[in] data_ptr host-side pointer to ISP dvs statistics.
265 * @return Pointer to the allocated dvs statistics map 265 * @return Pointer to the allocated dvs statistics map
@@ -280,7 +280,7 @@ ia_css_isp_dvs_statistics_map_allocate(
280 const struct ia_css_isp_dvs_statistics *isp_stats, 280 const struct ia_css_isp_dvs_statistics *isp_stats,
281 void *data_ptr); 281 void *data_ptr);
282 282
283/** @brief Free the dvs statistics map 283/* @brief Free the dvs statistics map
284 * @param[in] me Pointer to the dvs statistics map 284 * @param[in] me Pointer to the dvs statistics map
285 * @return None 285 * @return None
286 * 286 *
@@ -291,7 +291,7 @@ ia_css_isp_dvs_statistics_map_allocate(
291void 291void
292ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me); 292ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
293 293
294/** @brief Allocate memory for the SKC DVS statistics on the ISP 294/* @brief Allocate memory for the SKC DVS statistics on the ISP
295 * @return Pointer to the allocated ACC DVS statistics buffer on the ISP 295 * @return Pointer to the allocated ACC DVS statistics buffer on the ISP
296*/ 296*/
297struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void); 297struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
index 1ae9daf0be76..8b0218ee658d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
@@ -20,12 +20,12 @@
20#include "ia_css_types.h" 20#include "ia_css_types.h"
21#include "ia_css_acc_types.h" 21#include "ia_css_acc_types.h"
22 22
23/** @file 23/* @file
24 * This file contains prototypes for functions that need to be provided to the 24 * This file contains prototypes for functions that need to be provided to the
25 * CSS-API host-code by the environment in which the CSS-API code runs. 25 * CSS-API host-code by the environment in which the CSS-API code runs.
26 */ 26 */
27 27
28/** Memory allocation attributes, for use in ia_css_css_mem_env. */ 28/* Memory allocation attributes, for use in ia_css_css_mem_env. */
29enum ia_css_mem_attr { 29enum ia_css_mem_attr {
30 IA_CSS_MEM_ATTR_CACHED = 1 << 0, 30 IA_CSS_MEM_ATTR_CACHED = 1 << 0,
31 IA_CSS_MEM_ATTR_ZEROED = 1 << 1, 31 IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
@@ -33,62 +33,62 @@ enum ia_css_mem_attr {
33 IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3, 33 IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
34}; 34};
35 35
36/** Environment with function pointers for local IA memory allocation. 36/* Environment with function pointers for local IA memory allocation.
37 * This provides the CSS code with environment specific functionality 37 * This provides the CSS code with environment specific functionality
38 * for memory allocation of small local buffers such as local data structures. 38 * for memory allocation of small local buffers such as local data structures.
39 * This is never expected to allocate more than one page of memory (4K bytes). 39 * This is never expected to allocate more than one page of memory (4K bytes).
40 */ 40 */
41struct ia_css_cpu_mem_env { 41struct ia_css_cpu_mem_env {
42 void (*flush)(struct ia_css_acc_fw *fw); 42 void (*flush)(struct ia_css_acc_fw *fw);
43 /**< Flush function to flush the cache for given accelerator. */ 43 /** Flush function to flush the cache for given accelerator. */
44}; 44};
45 45
46/** Environment with function pointers to access the CSS hardware. This includes 46/* Environment with function pointers to access the CSS hardware. This includes
47 * registers and local memories. 47 * registers and local memories.
48 */ 48 */
49struct ia_css_hw_access_env { 49struct ia_css_hw_access_env {
50 void (*store_8)(hrt_address addr, uint8_t data); 50 void (*store_8)(hrt_address addr, uint8_t data);
51 /**< Store an 8 bit value into an address in the CSS HW address space. 51 /** Store an 8 bit value into an address in the CSS HW address space.
52 The address must be an 8 bit aligned address. */ 52 The address must be an 8 bit aligned address. */
53 void (*store_16)(hrt_address addr, uint16_t data); 53 void (*store_16)(hrt_address addr, uint16_t data);
54 /**< Store a 16 bit value into an address in the CSS HW address space. 54 /** Store a 16 bit value into an address in the CSS HW address space.
55 The address must be a 16 bit aligned address. */ 55 The address must be a 16 bit aligned address. */
56 void (*store_32)(hrt_address addr, uint32_t data); 56 void (*store_32)(hrt_address addr, uint32_t data);
57 /**< Store a 32 bit value into an address in the CSS HW address space. 57 /** Store a 32 bit value into an address in the CSS HW address space.
58 The address must be a 32 bit aligned address. */ 58 The address must be a 32 bit aligned address. */
59 uint8_t (*load_8)(hrt_address addr); 59 uint8_t (*load_8)(hrt_address addr);
60 /**< Load an 8 bit value from an address in the CSS HW address 60 /** Load an 8 bit value from an address in the CSS HW address
61 space. The address must be an 8 bit aligned address. */ 61 space. The address must be an 8 bit aligned address. */
62 uint16_t (*load_16)(hrt_address addr); 62 uint16_t (*load_16)(hrt_address addr);
63 /**< Load a 16 bit value from an address in the CSS HW address 63 /** Load a 16 bit value from an address in the CSS HW address
64 space. The address must be a 16 bit aligned address. */ 64 space. The address must be a 16 bit aligned address. */
65 uint32_t (*load_32)(hrt_address addr); 65 uint32_t (*load_32)(hrt_address addr);
66 /**< Load a 32 bit value from an address in the CSS HW address 66 /** Load a 32 bit value from an address in the CSS HW address
67 space. The address must be a 32 bit aligned address. */ 67 space. The address must be a 32 bit aligned address. */
68 void (*store)(hrt_address addr, const void *data, uint32_t bytes); 68 void (*store)(hrt_address addr, const void *data, uint32_t bytes);
69 /**< Store a number of bytes into a byte-aligned address in the CSS HW address space. */ 69 /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */
70 void (*load)(hrt_address addr, void *data, uint32_t bytes); 70 void (*load)(hrt_address addr, void *data, uint32_t bytes);
71 /**< Load a number of bytes from a byte-aligned address in the CSS HW address space. */ 71 /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
72}; 72};
73 73
74/** Environment with function pointers to print error and debug messages. 74/* Environment with function pointers to print error and debug messages.
75 */ 75 */
76struct ia_css_print_env { 76struct ia_css_print_env {
77 int (*debug_print)(const char *fmt, va_list args); 77 int (*debug_print)(const char *fmt, va_list args);
78 /**< Print a debug message. */ 78 /** Print a debug message. */
79 int (*error_print)(const char *fmt, va_list args); 79 int (*error_print)(const char *fmt, va_list args);
80 /**< Print an error message.*/ 80 /** Print an error message.*/
81}; 81};
82 82
83/** Environment structure. This includes function pointers to access several 83/* Environment structure. This includes function pointers to access several
84 * features provided by the environment in which the CSS API is used. 84 * features provided by the environment in which the CSS API is used.
85 * This is used to run the camera IP in multiple platforms such as Linux, 85 * This is used to run the camera IP in multiple platforms such as Linux,
86 * Windows and several simulation environments. 86 * Windows and several simulation environments.
87 */ 87 */
88struct ia_css_env { 88struct ia_css_env {
89 struct ia_css_cpu_mem_env cpu_mem_env; /**< local flush. */ 89 struct ia_css_cpu_mem_env cpu_mem_env; /** local flush. */
90 struct ia_css_hw_access_env hw_access_env; /**< CSS HW access functions */ 90 struct ia_css_hw_access_env hw_access_env; /** CSS HW access functions */
91 struct ia_css_print_env print_env; /**< Message printing env. */ 91 struct ia_css_print_env print_env; /** Message printing env. */
92}; 92};
93 93
94#endif /* __IA_CSS_ENV_H */ 94#endif /* __IA_CSS_ENV_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
index 572e4e55c69e..cf895815ea31 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
@@ -15,12 +15,12 @@
15#ifndef __IA_CSS_ERR_H 15#ifndef __IA_CSS_ERR_H
16#define __IA_CSS_ERR_H 16#define __IA_CSS_ERR_H
17 17
18/** @file 18/* @file
19 * This file contains possible return values for most 19 * This file contains possible return values for most
20 * functions in the CSS-API. 20 * functions in the CSS-API.
21 */ 21 */
22 22
23/** Errors, these values are used as the return value for most 23/* Errors, these values are used as the return value for most
24 * functions in this API. 24 * functions in this API.
25 */ 25 */
26enum ia_css_err { 26enum ia_css_err {
@@ -41,22 +41,22 @@ enum ia_css_err {
41 IA_CSS_ERR_NOT_SUPPORTED 41 IA_CSS_ERR_NOT_SUPPORTED
42}; 42};
43 43
44/** FW warnings. This enum contains a value for each warning that 44/* FW warnings. This enum contains a value for each warning that
45 * the SP FW could indicate potential performance issue 45 * the SP FW could indicate potential performance issue
46 */ 46 */
47enum ia_css_fw_warning { 47enum ia_css_fw_warning {
48 IA_CSS_FW_WARNING_NONE, 48 IA_CSS_FW_WARNING_NONE,
49 IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the ISys queue. 49 IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the ISys queue.
50 This warning can be avoided by de-queing ISYS buffers more timely. */ 50 This warning can be avoided by de-queing ISYS buffers more timely. */
51 IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the PSys queue. 51 IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /* < CSS system delayed because of insufficient space in the PSys queue.
52 This warning can be avoided by de-queing PSYS buffers more timely. */ 52 This warning can be avoided by de-queing PSYS buffers more timely. */
53 IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /** < CSS system delayed because of insufficient available buffers. 53 IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /* < CSS system delayed because of insufficient available buffers.
54 This warning can be avoided by unlocking locked frame-buffers more timely. */ 54 This warning can be avoided by unlocking locked frame-buffers more timely. */
55 IA_CSS_FW_WARNING_EXP_ID_LOCKED, /** < Exposure ID skipped because the frame associated to it was still locked. 55 IA_CSS_FW_WARNING_EXP_ID_LOCKED, /* < Exposure ID skipped because the frame associated to it was still locked.
56 This warning can be avoided by unlocking locked frame-buffers more timely. */ 56 This warning can be avoided by unlocking locked frame-buffers more timely. */
57 IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /** < Exposure ID cannot be found on the circular buffer. 57 IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /* < Exposure ID cannot be found on the circular buffer.
58 This warning can be avoided by unlocking locked frame-buffers more timely. */ 58 This warning can be avoided by unlocking locked frame-buffers more timely. */
59 IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /** < Frame and param pair mismatched in tagger. 59 IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /* < Frame and param pair mismatched in tagger.
60 This warning can be avoided by providing a param set for each frame. */ 60 This warning can be avoided by providing a param set for each frame. */
61}; 61};
62 62
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
index aaf349772abe..036a2f03d3bd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_EVENT_PUBLIC_H 15#ifndef __IA_CSS_EVENT_PUBLIC_H
16#define __IA_CSS_EVENT_PUBLIC_H 16#define __IA_CSS_EVENT_PUBLIC_H
17 17
18/** @file 18/* @file
19 * This file contains CSS-API events functionality 19 * This file contains CSS-API events functionality
20 */ 20 */
21 21
@@ -24,7 +24,7 @@
24#include <ia_css_types.h> /* ia_css_pipe */ 24#include <ia_css_types.h> /* ia_css_pipe */
25#include <ia_css_timer.h> /* ia_css_timer */ 25#include <ia_css_timer.h> /* ia_css_timer */
26 26
27/** The event type, distinguishes the kind of events that 27/* The event type, distinguishes the kind of events that
28 * can are generated by the CSS system. 28 * can are generated by the CSS system.
29 * 29 *
30 * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC: 30 * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -35,43 +35,43 @@
35 */ 35 */
36enum ia_css_event_type { 36enum ia_css_event_type {
37 IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0, 37 IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0,
38 /**< Output frame ready. */ 38 /** Output frame ready. */
39 IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1, 39 IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1,
40 /**< Second output frame ready. */ 40 /** Second output frame ready. */
41 IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2, 41 IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2,
42 /**< Viewfinder Output frame ready. */ 42 /** Viewfinder Output frame ready. */
43 IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3, 43 IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3,
44 /**< Second viewfinder Output frame ready. */ 44 /** Second viewfinder Output frame ready. */
45 IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4, 45 IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4,
46 /**< Indication that 3A statistics are available. */ 46 /** Indication that 3A statistics are available. */
47 IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5, 47 IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5,
48 /**< Indication that DIS statistics are available. */ 48 /** Indication that DIS statistics are available. */
49 IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6, 49 IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6,
50 /**< Pipeline Done event, sent after last pipeline stage. */ 50 /** Pipeline Done event, sent after last pipeline stage. */
51 IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7, 51 IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7,
52 /**< Frame tagged. */ 52 /** Frame tagged. */
53 IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8, 53 IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8,
54 /**< Input frame ready. */ 54 /** Input frame ready. */
55 IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9, 55 IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9,
56 /**< Metadata ready. */ 56 /** Metadata ready. */
57 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10, 57 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10,
58 /**< Indication that LACE statistics are available. */ 58 /** Indication that LACE statistics are available. */
59 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11, 59 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11,
60 /**< Extension stage complete. */ 60 /** Extension stage complete. */
61 IA_CSS_EVENT_TYPE_TIMER = 1 << 12, 61 IA_CSS_EVENT_TYPE_TIMER = 1 << 12,
62 /**< Timer event for measuring the SP side latencies. It contains the 62 /** Timer event for measuring the SP side latencies. It contains the
63 32-bit timer value from the SP */ 63 32-bit timer value from the SP */
64 IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13, 64 IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13,
65 /**< End Of Frame event, sent when in buffered sensor mode. */ 65 /** End Of Frame event, sent when in buffered sensor mode. */
66 IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14, 66 IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14,
67 /**< Performance warning encounter by FW */ 67 /** Performance warning encounter by FW */
68 IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15, 68 IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15,
69 /**< Assertion hit by FW */ 69 /** Assertion hit by FW */
70}; 70};
71 71
72#define IA_CSS_EVENT_TYPE_NONE 0 72#define IA_CSS_EVENT_TYPE_NONE 0
73 73
74/** IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events. 74/* IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
75 * The other events (such as PORT_EOF) cannot be enabled/disabled 75 * The other events (such as PORT_EOF) cannot be enabled/disabled
76 * and are hence excluded from this macro. 76 * and are hence excluded from this macro.
77 */ 77 */
@@ -89,7 +89,7 @@ enum ia_css_event_type {
89 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \ 89 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \
90 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) 90 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
91 91
92/** The event struct, container for the event type and its related values. 92/* The event struct, container for the event type and its related values.
93 * Depending on the event type, either pipe or port will be filled. 93 * Depending on the event type, either pipe or port will be filled.
94 * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle. 94 * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
95 * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be 95 * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
@@ -97,14 +97,14 @@ enum ia_css_event_type {
97 */ 97 */
98struct ia_css_event { 98struct ia_css_event {
99 struct ia_css_pipe *pipe; 99 struct ia_css_pipe *pipe;
100 /**< Pipe handle on which event happened, NULL for non pipe related 100 /** Pipe handle on which event happened, NULL for non pipe related
101 events. */ 101 events. */
102 enum ia_css_event_type type; 102 enum ia_css_event_type type;
103 /**< Type of Event, always valid/filled. */ 103 /** Type of Event, always valid/filled. */
104 uint8_t port; 104 uint8_t port;
105 /**< Port number for EOF event (not valid for other events). */ 105 /** Port number for EOF event (not valid for other events). */
106 uint8_t exp_id; 106 uint8_t exp_id;
107 /**< Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events) 107 /** Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
108 The exposure ID is unique only within a logical stream and it is 108 The exposure ID is unique only within a logical stream and it is
109 only generated on systems that have an input system (such as 2400 109 only generated on systems that have an input system (such as 2400
110 and 2401). 110 and 2401).
@@ -120,26 +120,26 @@ struct ia_css_event {
120 in the exposure IDs. Therefor applications should not use this 120 in the exposure IDs. Therefor applications should not use this
121 to detect frame drops. */ 121 to detect frame drops. */
122 uint32_t fw_handle; 122 uint32_t fw_handle;
123 /**< Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other 123 /** Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
124 events). */ 124 events). */
125 enum ia_css_fw_warning fw_warning; 125 enum ia_css_fw_warning fw_warning;
126 /**< Firmware warning code, only for WARNING events. */ 126 /** Firmware warning code, only for WARNING events. */
127 uint8_t fw_assert_module_id; 127 uint8_t fw_assert_module_id;
128 /**< Firmware module id, only for ASSERT events, should be logged by driver. */ 128 /** Firmware module id, only for ASSERT events, should be logged by driver. */
129 uint16_t fw_assert_line_no; 129 uint16_t fw_assert_line_no;
130 /**< Firmware line number, only for ASSERT events, should be logged by driver. */ 130 /** Firmware line number, only for ASSERT events, should be logged by driver. */
131 clock_value_t timer_data; 131 clock_value_t timer_data;
132 /**< For storing the full 32-bit of the timer value. Valid only for TIMER 132 /** For storing the full 32-bit of the timer value. Valid only for TIMER
133 event */ 133 event */
134 uint8_t timer_code; 134 uint8_t timer_code;
135 /**< For storing the code of the TIMER event. Valid only for 135 /** For storing the code of the TIMER event. Valid only for
136 TIMER event */ 136 TIMER event */
137 uint8_t timer_subcode; 137 uint8_t timer_subcode;
138 /**< For storing the subcode of the TIMER event. Valid only 138 /** For storing the subcode of the TIMER event. Valid only
139 for TIMER event */ 139 for TIMER event */
140}; 140};
141 141
142/** @brief Dequeue a PSYS event from the CSS system. 142/* @brief Dequeue a PSYS event from the CSS system.
143 * 143 *
144 * @param[out] event Pointer to the event struct which will be filled by 144 * @param[out] event Pointer to the event struct which will be filled by
145 * this function if an event is available. 145 * this function if an event is available.
@@ -156,7 +156,7 @@ struct ia_css_event {
156enum ia_css_err 156enum ia_css_err
157ia_css_dequeue_psys_event(struct ia_css_event *event); 157ia_css_dequeue_psys_event(struct ia_css_event *event);
158 158
159/** @brief Dequeue an event from the CSS system. 159/* @brief Dequeue an event from the CSS system.
160 * 160 *
161 * @param[out] event Pointer to the event struct which will be filled by 161 * @param[out] event Pointer to the event struct which will be filled by
162 * this function if an event is available. 162 * this function if an event is available.
@@ -171,7 +171,7 @@ ia_css_dequeue_psys_event(struct ia_css_event *event);
171enum ia_css_err 171enum ia_css_err
172ia_css_dequeue_event(struct ia_css_event *event); 172ia_css_dequeue_event(struct ia_css_event *event);
173 173
174/** @brief Dequeue an ISYS event from the CSS system. 174/* @brief Dequeue an ISYS event from the CSS system.
175 * 175 *
176 * @param[out] event Pointer to the event struct which will be filled by 176 * @param[out] event Pointer to the event struct which will be filled by
177 * this function if an event is available. 177 * this function if an event is available.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
index 06d375a09be2..d7d7f0a995e5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
@@ -15,21 +15,21 @@
15#ifndef __IA_CSS_FIRMWARE_H 15#ifndef __IA_CSS_FIRMWARE_H
16#define __IA_CSS_FIRMWARE_H 16#define __IA_CSS_FIRMWARE_H
17 17
18/** @file 18/* @file
19 * This file contains firmware loading/unloading support functionality 19 * This file contains firmware loading/unloading support functionality
20 */ 20 */
21 21
22#include "ia_css_err.h" 22#include "ia_css_err.h"
23#include "ia_css_env.h" 23#include "ia_css_env.h"
24 24
25/** CSS firmware package structure. 25/* CSS firmware package structure.
26 */ 26 */
27struct ia_css_fw { 27struct ia_css_fw {
28 void *data; /**< pointer to the firmware data */ 28 void *data; /** pointer to the firmware data */
29 unsigned int bytes; /**< length in bytes of firmware data */ 29 unsigned int bytes; /** length in bytes of firmware data */
30}; 30};
31 31
32/** @brief Loads the firmware 32/* @brief Loads the firmware
33 * @param[in] env Environment, provides functions to access the 33 * @param[in] env Environment, provides functions to access the
34 * environment in which the CSS code runs. This is 34 * environment in which the CSS code runs. This is
35 * used for host side memory access and message 35 * used for host side memory access and message
@@ -51,7 +51,7 @@ enum ia_css_err
51ia_css_load_firmware(const struct ia_css_env *env, 51ia_css_load_firmware(const struct ia_css_env *env,
52 const struct ia_css_fw *fw); 52 const struct ia_css_fw *fw);
53 53
54/** @brief Unloads the firmware 54/* @brief Unloads the firmware
55 * @return None 55 * @return None
56 * 56 *
57 * This function unloads the firmware loaded by ia_css_load_firmware. 57 * This function unloads the firmware loaded by ia_css_load_firmware.
@@ -61,7 +61,7 @@ ia_css_load_firmware(const struct ia_css_env *env,
61void 61void
62ia_css_unload_firmware(void); 62ia_css_unload_firmware(void);
63 63
64/** @brief Checks firmware version 64/* @brief Checks firmware version
65 * @param[in] fw Firmware package containing the firmware for all 65 * @param[in] fw Firmware package containing the firmware for all
66 * predefined ISP binaries. 66 * predefined ISP binaries.
67 * @return Returns true when the firmware version matches with the CSS 67 * @return Returns true when the firmware version matches with the CSS
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
index da9c60144c6d..e5ffc579aef1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
@@ -15,7 +15,7 @@
15#ifndef _IA_CSS_FRAC_H 15#ifndef _IA_CSS_FRAC_H
16#define _IA_CSS_FRAC_H 16#define _IA_CSS_FRAC_H
17 17
18/** @file 18/* @file
19 * This file contains typedefs used for fractional numbers 19 * This file contains typedefs used for fractional numbers
20 */ 20 */
21 21
@@ -25,13 +25,13 @@
25 * NOTE: the 16 bit fixed point types actually occupy 32 bits 25 * NOTE: the 16 bit fixed point types actually occupy 32 bits
26 * to save on extension operations in the ISP code. 26 * to save on extension operations in the ISP code.
27 */ 27 */
28/** Unsigned fixed point value, 0 integer bits, 16 fractional bits */ 28/* Unsigned fixed point value, 0 integer bits, 16 fractional bits */
29typedef uint32_t ia_css_u0_16; 29typedef uint32_t ia_css_u0_16;
30/** Unsigned fixed point value, 5 integer bits, 11 fractional bits */ 30/* Unsigned fixed point value, 5 integer bits, 11 fractional bits */
31typedef uint32_t ia_css_u5_11; 31typedef uint32_t ia_css_u5_11;
32/** Unsigned fixed point value, 8 integer bits, 8 fractional bits */ 32/* Unsigned fixed point value, 8 integer bits, 8 fractional bits */
33typedef uint32_t ia_css_u8_8; 33typedef uint32_t ia_css_u8_8;
34/** Signed fixed point value, 0 integer bits, 15 fractional bits */ 34/* Signed fixed point value, 0 integer bits, 15 fractional bits */
35typedef int32_t ia_css_s0_15; 35typedef int32_t ia_css_s0_15;
36 36
37#endif /* _IA_CSS_FRAC_H */ 37#endif /* _IA_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
index d534fbd91380..2f177edc36ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_FRAME_FORMAT_H 15#ifndef __IA_CSS_FRAME_FORMAT_H
16#define __IA_CSS_FRAME_FORMAT_H 16#define __IA_CSS_FRAME_FORMAT_H
17 17
18/** @file 18/* @file
19 * This file contains information about formats supported in the ISP 19 * This file contains information about formats supported in the ISP
20 */ 20 */
21 21
22/** Frame formats, some of these come from fourcc.org, others are 22/* Frame formats, some of these come from fourcc.org, others are
23 better explained by video4linux2. The NV11 seems to be described only 23 better explained by video4linux2. The NV11 seems to be described only
24 on MSDN pages, but even those seem to be gone now. 24 on MSDN pages, but even those seem to be gone now.
25 Frames can come in many forms, the main categories are RAW, RGB and YUV 25 Frames can come in many forms, the main categories are RAW, RGB and YUV
@@ -48,45 +48,45 @@
48 - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h 48 - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
49*/ 49*/
50enum ia_css_frame_format { 50enum ia_css_frame_format {
51 IA_CSS_FRAME_FORMAT_NV11 = 0, /**< 12 bit YUV 411, Y, UV plane */ 51 IA_CSS_FRAME_FORMAT_NV11 = 0, /** 12 bit YUV 411, Y, UV plane */
52 IA_CSS_FRAME_FORMAT_NV12, /**< 12 bit YUV 420, Y, UV plane */ 52 IA_CSS_FRAME_FORMAT_NV12, /** 12 bit YUV 420, Y, UV plane */
53 IA_CSS_FRAME_FORMAT_NV12_16, /**< 16 bit YUV 420, Y, UV plane */ 53 IA_CSS_FRAME_FORMAT_NV12_16, /** 16 bit YUV 420, Y, UV plane */
54 IA_CSS_FRAME_FORMAT_NV12_TILEY, /**< 12 bit YUV 420, Intel proprietary tiled format, TileY */ 54 IA_CSS_FRAME_FORMAT_NV12_TILEY, /** 12 bit YUV 420, Intel proprietary tiled format, TileY */
55 IA_CSS_FRAME_FORMAT_NV16, /**< 16 bit YUV 422, Y, UV plane */ 55 IA_CSS_FRAME_FORMAT_NV16, /** 16 bit YUV 422, Y, UV plane */
56 IA_CSS_FRAME_FORMAT_NV21, /**< 12 bit YUV 420, Y, VU plane */ 56 IA_CSS_FRAME_FORMAT_NV21, /** 12 bit YUV 420, Y, VU plane */
57 IA_CSS_FRAME_FORMAT_NV61, /**< 16 bit YUV 422, Y, VU plane */ 57 IA_CSS_FRAME_FORMAT_NV61, /** 16 bit YUV 422, Y, VU plane */
58 IA_CSS_FRAME_FORMAT_YV12, /**< 12 bit YUV 420, Y, V, U plane */ 58 IA_CSS_FRAME_FORMAT_YV12, /** 12 bit YUV 420, Y, V, U plane */
59 IA_CSS_FRAME_FORMAT_YV16, /**< 16 bit YUV 422, Y, V, U plane */ 59 IA_CSS_FRAME_FORMAT_YV16, /** 16 bit YUV 422, Y, V, U plane */
60 IA_CSS_FRAME_FORMAT_YUV420, /**< 12 bit YUV 420, Y, U, V plane */ 60 IA_CSS_FRAME_FORMAT_YUV420, /** 12 bit YUV 420, Y, U, V plane */
61 IA_CSS_FRAME_FORMAT_YUV420_16, /**< yuv420, 16 bits per subpixel */ 61 IA_CSS_FRAME_FORMAT_YUV420_16, /** yuv420, 16 bits per subpixel */
62 IA_CSS_FRAME_FORMAT_YUV422, /**< 16 bit YUV 422, Y, U, V plane */ 62 IA_CSS_FRAME_FORMAT_YUV422, /** 16 bit YUV 422, Y, U, V plane */
63 IA_CSS_FRAME_FORMAT_YUV422_16, /**< yuv422, 16 bits per subpixel */ 63 IA_CSS_FRAME_FORMAT_YUV422_16, /** yuv422, 16 bits per subpixel */
64 IA_CSS_FRAME_FORMAT_UYVY, /**< 16 bit YUV 422, UYVY interleaved */ 64 IA_CSS_FRAME_FORMAT_UYVY, /** 16 bit YUV 422, UYVY interleaved */
65 IA_CSS_FRAME_FORMAT_YUYV, /**< 16 bit YUV 422, YUYV interleaved */ 65 IA_CSS_FRAME_FORMAT_YUYV, /** 16 bit YUV 422, YUYV interleaved */
66 IA_CSS_FRAME_FORMAT_YUV444, /**< 24 bit YUV 444, Y, U, V plane */ 66 IA_CSS_FRAME_FORMAT_YUV444, /** 24 bit YUV 444, Y, U, V plane */
67 IA_CSS_FRAME_FORMAT_YUV_LINE, /**< Internal format, 2 y lines followed 67 IA_CSS_FRAME_FORMAT_YUV_LINE, /** Internal format, 2 y lines followed
68 by a uvinterleaved line */ 68 by a uvinterleaved line */
69 IA_CSS_FRAME_FORMAT_RAW, /**< RAW, 1 plane */ 69 IA_CSS_FRAME_FORMAT_RAW, /** RAW, 1 plane */
70 IA_CSS_FRAME_FORMAT_RGB565, /**< 16 bit RGB, 1 plane. Each 3 sub 70 IA_CSS_FRAME_FORMAT_RGB565, /** 16 bit RGB, 1 plane. Each 3 sub
71 pixels are packed into one 16 bit 71 pixels are packed into one 16 bit
72 value, 5 bits for R, 6 bits for G 72 value, 5 bits for R, 6 bits for G
73 and 5 bits for B. */ 73 and 5 bits for B. */
74 IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /**< 24 bit RGB, 3 planes */ 74 IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /** 24 bit RGB, 3 planes */
75 IA_CSS_FRAME_FORMAT_RGBA888, /**< 32 bit RGBA, 1 plane, A=Alpha 75 IA_CSS_FRAME_FORMAT_RGBA888, /** 32 bit RGBA, 1 plane, A=Alpha
76 (alpha is unused) */ 76 (alpha is unused) */
77 IA_CSS_FRAME_FORMAT_QPLANE6, /**< Internal, for advanced ISP */ 77 IA_CSS_FRAME_FORMAT_QPLANE6, /** Internal, for advanced ISP */
78 IA_CSS_FRAME_FORMAT_BINARY_8, /**< byte stream, used for jpeg. For 78 IA_CSS_FRAME_FORMAT_BINARY_8, /** byte stream, used for jpeg. For
79 frames of this type, we set the 79 frames of this type, we set the
80 height to 1 and the width to the 80 height to 1 and the width to the
81 number of allocated bytes. */ 81 number of allocated bytes. */
82 IA_CSS_FRAME_FORMAT_MIPI, /**< MIPI frame, 1 plane */ 82 IA_CSS_FRAME_FORMAT_MIPI, /** MIPI frame, 1 plane */
83 IA_CSS_FRAME_FORMAT_RAW_PACKED, /**< RAW, 1 plane, packed */ 83 IA_CSS_FRAME_FORMAT_RAW_PACKED, /** RAW, 1 plane, packed */
84 IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /**< 8 bit per Y/U/V. 84 IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /** 8 bit per Y/U/V.
85 Y odd line; UYVY 85 Y odd line; UYVY
86 interleaved even line */ 86 interleaved even line */
87 IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /**< Legacy YUV420. UY odd 87 IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /** Legacy YUV420. UY odd
88 line; VY even line */ 88 line; VY even line */
89 IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /**< 10 bit per Y/U/V. Y odd 89 IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /** 10 bit per Y/U/V. Y odd
90 line; UYVY interleaved 90 line; UYVY interleaved
91 even line */ 91 even line */
92}; 92};
@@ -95,7 +95,7 @@ enum ia_css_frame_format {
95/* because of issues this would cause with the Clockwork code checking tool. */ 95/* because of issues this would cause with the Clockwork code checking tool. */
96#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1) 96#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
97 97
98/** Number of valid output frame formats for ISP **/ 98/* Number of valid output frame formats for ISP **/
99#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1) 99#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
100 100
101#endif /* __IA_CSS_FRAME_FORMAT_H */ 101#endif /* __IA_CSS_FRAME_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
index 92f2389176b2..ba7a076c3afa 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_FRAME_PUBLIC_H 15#ifndef __IA_CSS_FRAME_PUBLIC_H
16#define __IA_CSS_FRAME_PUBLIC_H 16#define __IA_CSS_FRAME_PUBLIC_H
17 17
18/** @file 18/* @file
19 * This file contains structs to describe various frame-formats supported by the ISP. 19 * This file contains structs to describe various frame-formats supported by the ISP.
20 */ 20 */
21 21
@@ -25,73 +25,73 @@
25#include "ia_css_frame_format.h" 25#include "ia_css_frame_format.h"
26#include "ia_css_buffer.h" 26#include "ia_css_buffer.h"
27 27
28/** For RAW input, the bayer order needs to be specified separately. There 28/* For RAW input, the bayer order needs to be specified separately. There
29 * are 4 possible orders. The name is constructed by taking the first two 29 * are 4 possible orders. The name is constructed by taking the first two
30 * colors on the first line and the first two colors from the second line. 30 * colors on the first line and the first two colors from the second line.
31 */ 31 */
32enum ia_css_bayer_order { 32enum ia_css_bayer_order {
33 IA_CSS_BAYER_ORDER_GRBG, /**< GRGRGRGRGR .. BGBGBGBGBG */ 33 IA_CSS_BAYER_ORDER_GRBG, /** GRGRGRGRGR .. BGBGBGBGBG */
34 IA_CSS_BAYER_ORDER_RGGB, /**< RGRGRGRGRG .. GBGBGBGBGB */ 34 IA_CSS_BAYER_ORDER_RGGB, /** RGRGRGRGRG .. GBGBGBGBGB */
35 IA_CSS_BAYER_ORDER_BGGR, /**< BGBGBGBGBG .. GRGRGRGRGR */ 35 IA_CSS_BAYER_ORDER_BGGR, /** BGBGBGBGBG .. GRGRGRGRGR */
36 IA_CSS_BAYER_ORDER_GBRG, /**< GBGBGBGBGB .. RGRGRGRGRG */ 36 IA_CSS_BAYER_ORDER_GBRG, /** GBGBGBGBGB .. RGRGRGRGRG */
37}; 37};
38#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1) 38#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
39 39
40/** Frame plane structure. This describes one plane in an image 40/* Frame plane structure. This describes one plane in an image
41 * frame buffer. 41 * frame buffer.
42 */ 42 */
43struct ia_css_frame_plane { 43struct ia_css_frame_plane {
44 unsigned int height; /**< height of a plane in lines */ 44 unsigned int height; /** height of a plane in lines */
45 unsigned int width; /**< width of a line, in DMA elements, note that 45 unsigned int width; /** width of a line, in DMA elements, note that
46 for RGB565 the three subpixels are stored in 46 for RGB565 the three subpixels are stored in
47 one element. For all other formats this is 47 one element. For all other formats this is
48 the number of subpixels per line. */ 48 the number of subpixels per line. */
49 unsigned int stride; /**< stride of a line in bytes */ 49 unsigned int stride; /** stride of a line in bytes */
50 unsigned int offset; /**< offset in bytes to start of frame data. 50 unsigned int offset; /** offset in bytes to start of frame data.
51 offset is wrt data field in ia_css_frame */ 51 offset is wrt data field in ia_css_frame */
52}; 52};
53 53
54/** Binary "plane". This is used to story binary streams such as jpeg 54/* Binary "plane". This is used to story binary streams such as jpeg
55 * images. This is not actually a real plane. 55 * images. This is not actually a real plane.
56 */ 56 */
57struct ia_css_frame_binary_plane { 57struct ia_css_frame_binary_plane {
58 unsigned int size; /**< number of bytes in the stream */ 58 unsigned int size; /** number of bytes in the stream */
59 struct ia_css_frame_plane data; /**< plane */ 59 struct ia_css_frame_plane data; /** plane */
60}; 60};
61 61
62/** Container for planar YUV frames. This contains 3 planes. 62/* Container for planar YUV frames. This contains 3 planes.
63 */ 63 */
64struct ia_css_frame_yuv_planes { 64struct ia_css_frame_yuv_planes {
65 struct ia_css_frame_plane y; /**< Y plane */ 65 struct ia_css_frame_plane y; /** Y plane */
66 struct ia_css_frame_plane u; /**< U plane */ 66 struct ia_css_frame_plane u; /** U plane */
67 struct ia_css_frame_plane v; /**< V plane */ 67 struct ia_css_frame_plane v; /** V plane */
68}; 68};
69 69
70/** Container for semi-planar YUV frames. 70/* Container for semi-planar YUV frames.
71 */ 71 */
72struct ia_css_frame_nv_planes { 72struct ia_css_frame_nv_planes {
73 struct ia_css_frame_plane y; /**< Y plane */ 73 struct ia_css_frame_plane y; /** Y plane */
74 struct ia_css_frame_plane uv; /**< UV plane */ 74 struct ia_css_frame_plane uv; /** UV plane */
75}; 75};
76 76
77/** Container for planar RGB frames. Each color has its own plane. 77/* Container for planar RGB frames. Each color has its own plane.
78 */ 78 */
79struct ia_css_frame_rgb_planes { 79struct ia_css_frame_rgb_planes {
80 struct ia_css_frame_plane r; /**< Red plane */ 80 struct ia_css_frame_plane r; /** Red plane */
81 struct ia_css_frame_plane g; /**< Green plane */ 81 struct ia_css_frame_plane g; /** Green plane */
82 struct ia_css_frame_plane b; /**< Blue plane */ 82 struct ia_css_frame_plane b; /** Blue plane */
83}; 83};
84 84
85/** Container for 6-plane frames. These frames are used internally 85/* Container for 6-plane frames. These frames are used internally
86 * in the advanced ISP only. 86 * in the advanced ISP only.
87 */ 87 */
88struct ia_css_frame_plane6_planes { 88struct ia_css_frame_plane6_planes {
89 struct ia_css_frame_plane r; /**< Red plane */ 89 struct ia_css_frame_plane r; /** Red plane */
90 struct ia_css_frame_plane r_at_b; /**< Red at blue plane */ 90 struct ia_css_frame_plane r_at_b; /** Red at blue plane */
91 struct ia_css_frame_plane gr; /**< Red-green plane */ 91 struct ia_css_frame_plane gr; /** Red-green plane */
92 struct ia_css_frame_plane gb; /**< Blue-green plane */ 92 struct ia_css_frame_plane gb; /** Blue-green plane */
93 struct ia_css_frame_plane b; /**< Blue plane */ 93 struct ia_css_frame_plane b; /** Blue plane */
94 struct ia_css_frame_plane b_at_r; /**< Blue at red plane */ 94 struct ia_css_frame_plane b_at_r; /** Blue at red plane */
95}; 95};
96 96
97/* Crop info struct - stores the lines to be cropped in isp */ 97/* Crop info struct - stores the lines to be cropped in isp */
@@ -103,15 +103,15 @@ struct ia_css_crop_info {
103 unsigned int start_line; 103 unsigned int start_line;
104}; 104};
105 105
106/** Frame info struct. This describes the contents of an image frame buffer. 106/* Frame info struct. This describes the contents of an image frame buffer.
107 */ 107 */
108struct ia_css_frame_info { 108struct ia_css_frame_info {
109 struct ia_css_resolution res; /**< Frame resolution (valid data) */ 109 struct ia_css_resolution res; /** Frame resolution (valid data) */
110 unsigned int padded_width; /**< stride of line in memory (in pixels) */ 110 unsigned int padded_width; /** stride of line in memory (in pixels) */
111 enum ia_css_frame_format format; /**< format of the frame data */ 111 enum ia_css_frame_format format; /** format of the frame data */
112 unsigned int raw_bit_depth; /**< number of valid bits per pixel, 112 unsigned int raw_bit_depth; /** number of valid bits per pixel,
113 only valid for RAW bayer frames */ 113 only valid for RAW bayer frames */
114 enum ia_css_bayer_order raw_bayer_order; /**< bayer order, only valid 114 enum ia_css_bayer_order raw_bayer_order; /** bayer order, only valid
115 for RAW bayer frames */ 115 for RAW bayer frames */
116 /* the params below are computed based on bayer_order 116 /* the params below are computed based on bayer_order
117 * we can remove the raw_bayer_order if it is redundant 117 * we can remove the raw_bayer_order if it is redundant
@@ -136,9 +136,9 @@ struct ia_css_frame_info {
136 * Specifies the DVS loop delay in "frame periods" 136 * Specifies the DVS loop delay in "frame periods"
137 */ 137 */
138enum ia_css_frame_delay { 138enum ia_css_frame_delay {
139 IA_CSS_FRAME_DELAY_0, /**< Frame delay = 0 */ 139 IA_CSS_FRAME_DELAY_0, /** Frame delay = 0 */
140 IA_CSS_FRAME_DELAY_1, /**< Frame delay = 1 */ 140 IA_CSS_FRAME_DELAY_1, /** Frame delay = 1 */
141 IA_CSS_FRAME_DELAY_2 /**< Frame delay = 2 */ 141 IA_CSS_FRAME_DELAY_2 /** Frame delay = 2 */
142}; 142};
143 143
144enum ia_css_frame_flash_state { 144enum ia_css_frame_flash_state {
@@ -147,13 +147,13 @@ enum ia_css_frame_flash_state {
147 IA_CSS_FRAME_FLASH_STATE_FULL 147 IA_CSS_FRAME_FLASH_STATE_FULL
148}; 148};
149 149
150/** Frame structure. This structure describes an image buffer or frame. 150/* Frame structure. This structure describes an image buffer or frame.
151 * This is the main structure used for all input and output images. 151 * This is the main structure used for all input and output images.
152 */ 152 */
153struct ia_css_frame { 153struct ia_css_frame {
154 struct ia_css_frame_info info; /**< info struct describing the frame */ 154 struct ia_css_frame_info info; /** info struct describing the frame */
155 ia_css_ptr data; /**< pointer to start of image data */ 155 ia_css_ptr data; /** pointer to start of image data */
156 unsigned int data_bytes; /**< size of image data in bytes */ 156 unsigned int data_bytes; /** size of image data in bytes */
157 /* LA: move this to ia_css_buffer */ 157 /* LA: move this to ia_css_buffer */
158 /* 158 /*
159 * -1 if data address is static during life time of pipeline 159 * -1 if data address is static during life time of pipeline
@@ -171,10 +171,10 @@ struct ia_css_frame {
171 enum ia_css_buffer_type buf_type; 171 enum ia_css_buffer_type buf_type;
172 enum ia_css_frame_flash_state flash_state; 172 enum ia_css_frame_flash_state flash_state;
173 unsigned int exp_id; 173 unsigned int exp_id;
174 /**< exposure id, see ia_css_event_public.h for more detail */ 174 /** exposure id, see ia_css_event_public.h for more detail */
175 uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */ 175 uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
176 bool valid; /**< First video output frame is not valid */ 176 bool valid; /** First video output frame is not valid */
177 bool contiguous; /**< memory is allocated physically contiguously */ 177 bool contiguous; /** memory is allocated physically contiguously */
178 union { 178 union {
179 unsigned int _initialisation_dummy; 179 unsigned int _initialisation_dummy;
180 struct ia_css_frame_plane raw; 180 struct ia_css_frame_plane raw;
@@ -185,7 +185,7 @@ struct ia_css_frame {
185 struct ia_css_frame_nv_planes nv; 185 struct ia_css_frame_nv_planes nv;
186 struct ia_css_frame_plane6_planes plane6; 186 struct ia_css_frame_plane6_planes plane6;
187 struct ia_css_frame_binary_plane binary; 187 struct ia_css_frame_binary_plane binary;
188 } planes; /**< frame planes, select the right one based on 188 } planes; /** frame planes, select the right one based on
189 info.format */ 189 info.format */
190}; 190};
191 191
@@ -204,7 +204,7 @@ struct ia_css_frame {
204 { 0 } /* planes */ \ 204 { 0 } /* planes */ \
205} 205}
206 206
207/** @brief Fill a frame with zeros 207/* @brief Fill a frame with zeros
208 * 208 *
209 * @param frame The frame. 209 * @param frame The frame.
210 * @return None 210 * @return None
@@ -213,7 +213,7 @@ struct ia_css_frame {
213 */ 213 */
214void ia_css_frame_zero(struct ia_css_frame *frame); 214void ia_css_frame_zero(struct ia_css_frame *frame);
215 215
216/** @brief Allocate a CSS frame structure 216/* @brief Allocate a CSS frame structure
217 * 217 *
218 * @param frame The allocated frame. 218 * @param frame The allocated frame.
219 * @param width The width (in pixels) of the frame. 219 * @param width The width (in pixels) of the frame.
@@ -234,7 +234,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
234 unsigned int stride, 234 unsigned int stride,
235 unsigned int raw_bit_depth); 235 unsigned int raw_bit_depth);
236 236
237/** @brief Allocate a CSS frame structure using a frame info structure. 237/* @brief Allocate a CSS frame structure using a frame info structure.
238 * 238 *
239 * @param frame The allocated frame. 239 * @param frame The allocated frame.
240 * @param[in] info The frame info structure. 240 * @param[in] info The frame info structure.
@@ -247,7 +247,7 @@ ia_css_frame_allocate(struct ia_css_frame **frame,
247enum ia_css_err 247enum ia_css_err
248ia_css_frame_allocate_from_info(struct ia_css_frame **frame, 248ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
249 const struct ia_css_frame_info *info); 249 const struct ia_css_frame_info *info);
250/** @brief Free a CSS frame structure. 250/* @brief Free a CSS frame structure.
251 * 251 *
252 * @param[in] frame Pointer to the frame. 252 * @param[in] frame Pointer to the frame.
253 * @return None 253 * @return None
@@ -258,7 +258,7 @@ ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
258void 258void
259ia_css_frame_free(struct ia_css_frame *frame); 259ia_css_frame_free(struct ia_css_frame *frame);
260 260
261/** @brief Allocate a contiguous CSS frame structure 261/* @brief Allocate a contiguous CSS frame structure
262 * 262 *
263 * @param frame The allocated frame. 263 * @param frame The allocated frame.
264 * @param width The width (in pixels) of the frame. 264 * @param width The width (in pixels) of the frame.
@@ -280,7 +280,7 @@ ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
280 unsigned int stride, 280 unsigned int stride,
281 unsigned int raw_bit_depth); 281 unsigned int raw_bit_depth);
282 282
283/** @brief Allocate a contiguous CSS frame from a frame info structure. 283/* @brief Allocate a contiguous CSS frame from a frame info structure.
284 * 284 *
285 * @param frame The allocated frame. 285 * @param frame The allocated frame.
286 * @param[in] info The frame info structure. 286 * @param[in] info The frame info structure.
@@ -296,7 +296,7 @@ enum ia_css_err
296ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame, 296ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
297 const struct ia_css_frame_info *info); 297 const struct ia_css_frame_info *info);
298 298
299/** @brief Allocate a CSS frame structure using a frame info structure. 299/* @brief Allocate a CSS frame structure using a frame info structure.
300 * 300 *
301 * @param frame The allocated frame. 301 * @param frame The allocated frame.
302 * @param[in] info The frame info structure. 302 * @param[in] info The frame info structure.
@@ -309,7 +309,7 @@ enum ia_css_err
309ia_css_frame_create_from_info(struct ia_css_frame **frame, 309ia_css_frame_create_from_info(struct ia_css_frame **frame,
310 const struct ia_css_frame_info *info); 310 const struct ia_css_frame_info *info);
311 311
312/** @brief Set a mapped data buffer to a CSS frame 312/* @brief Set a mapped data buffer to a CSS frame
313 * 313 *
314 * @param[in] frame Valid CSS frame pointer 314 * @param[in] frame Valid CSS frame pointer
315 * @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame 315 * @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame
@@ -327,7 +327,7 @@ ia_css_frame_set_data(struct ia_css_frame *frame,
327 const ia_css_ptr mapped_data, 327 const ia_css_ptr mapped_data,
328 size_t data_size_bytes); 328 size_t data_size_bytes);
329 329
330/** @brief Map an existing frame data pointer to a CSS frame. 330/* @brief Map an existing frame data pointer to a CSS frame.
331 * 331 *
332 * @param frame Pointer to the frame to be initialized 332 * @param frame Pointer to the frame to be initialized
333 * @param[in] info The frame info. 333 * @param[in] info The frame info.
@@ -350,7 +350,7 @@ ia_css_frame_map(struct ia_css_frame **frame,
350 uint16_t attribute, 350 uint16_t attribute,
351 void *context); 351 void *context);
352 352
353/** @brief Unmap a CSS frame structure. 353/* @brief Unmap a CSS frame structure.
354 * 354 *
355 * @param[in] frame Pointer to the CSS frame. 355 * @param[in] frame Pointer to the CSS frame.
356 * @return None 356 * @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
index 8a17c3346caa..f415570a3da9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_INPUT_PORT_H 15#ifndef __IA_CSS_INPUT_PORT_H
16#define __IA_CSS_INPUT_PORT_H 16#define __IA_CSS_INPUT_PORT_H
17 17
18/** @file 18/* @file
19 * This file contains information about the possible input ports for CSS 19 * This file contains information about the possible input ports for CSS
20 */ 20 */
21 21
22/** Enumeration of the physical input ports on the CSS hardware. 22/* Enumeration of the physical input ports on the CSS hardware.
23 * There are 3 MIPI CSI-2 ports. 23 * There are 3 MIPI CSI-2 ports.
24 */ 24 */
25enum ia_css_csi2_port { 25enum ia_css_csi2_port {
@@ -28,39 +28,39 @@ enum ia_css_csi2_port {
28 IA_CSS_CSI2_PORT2 /* Implicitly map to MIPI_PORT2_ID */ 28 IA_CSS_CSI2_PORT2 /* Implicitly map to MIPI_PORT2_ID */
29}; 29};
30 30
31/** Backward compatible for CSS API 2.0 only 31/* Backward compatible for CSS API 2.0 only
32 * TO BE REMOVED when all drivers move to CSS API 2.1 32 * TO BE REMOVED when all drivers move to CSS API 2.1
33 */ 33 */
34#define IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0 34#define IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0
35#define IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1 35#define IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1
36#define IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2 36#define IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2
37 37
38/** The CSI2 interface supports 2 types of compression or can 38/* The CSI2 interface supports 2 types of compression or can
39 * be run without compression. 39 * be run without compression.
40 */ 40 */
41enum ia_css_csi2_compression_type { 41enum ia_css_csi2_compression_type {
42 IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /**< No compression */ 42 IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /** No compression */
43 IA_CSS_CSI2_COMPRESSION_TYPE_1, /**< Compression scheme 1 */ 43 IA_CSS_CSI2_COMPRESSION_TYPE_1, /** Compression scheme 1 */
44 IA_CSS_CSI2_COMPRESSION_TYPE_2 /**< Compression scheme 2 */ 44 IA_CSS_CSI2_COMPRESSION_TYPE_2 /** Compression scheme 2 */
45}; 45};
46 46
47struct ia_css_csi2_compression { 47struct ia_css_csi2_compression {
48 enum ia_css_csi2_compression_type type; 48 enum ia_css_csi2_compression_type type;
49 /**< Compression used */ 49 /** Compression used */
50 unsigned int compressed_bits_per_pixel; 50 unsigned int compressed_bits_per_pixel;
51 /**< Compressed bits per pixel (only when compression is enabled) */ 51 /** Compressed bits per pixel (only when compression is enabled) */
52 unsigned int uncompressed_bits_per_pixel; 52 unsigned int uncompressed_bits_per_pixel;
53 /**< Uncompressed bits per pixel (only when compression is enabled) */ 53 /** Uncompressed bits per pixel (only when compression is enabled) */
54}; 54};
55 55
56/** Input port structure. 56/* Input port structure.
57 */ 57 */
58struct ia_css_input_port { 58struct ia_css_input_port {
59 enum ia_css_csi2_port port; /**< Physical CSI-2 port */ 59 enum ia_css_csi2_port port; /** Physical CSI-2 port */
60 unsigned int num_lanes; /**< Number of lanes used (4-lane port only) */ 60 unsigned int num_lanes; /** Number of lanes used (4-lane port only) */
61 unsigned int timeout; /**< Timeout value */ 61 unsigned int timeout; /** Timeout value */
62 unsigned int rxcount; /**< Register value, should include all lanes */ 62 unsigned int rxcount; /** Register value, should include all lanes */
63 struct ia_css_csi2_compression compression; /**< Compression used */ 63 struct ia_css_csi2_compression compression; /** Compression used */
64}; 64};
65 65
66#endif /* __IA_CSS_INPUT_PORT_H */ 66#endif /* __IA_CSS_INPUT_PORT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
index 416ca4d28732..10ef61178bb2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_IRQ_H 15#ifndef __IA_CSS_IRQ_H
16#define __IA_CSS_IRQ_H 16#define __IA_CSS_IRQ_H
17 17
18/** @file 18/* @file
19 * This file contains information for Interrupts/IRQs from CSS 19 * This file contains information for Interrupts/IRQs from CSS
20 */ 20 */
21 21
@@ -23,14 +23,14 @@
23#include "ia_css_pipe_public.h" 23#include "ia_css_pipe_public.h"
24#include "ia_css_input_port.h" 24#include "ia_css_input_port.h"
25 25
26/** Interrupt types, these enumerate all supported interrupt types. 26/* Interrupt types, these enumerate all supported interrupt types.
27 */ 27 */
28enum ia_css_irq_type { 28enum ia_css_irq_type {
29 IA_CSS_IRQ_TYPE_EDGE, /**< Edge (level) sensitive interrupt */ 29 IA_CSS_IRQ_TYPE_EDGE, /** Edge (level) sensitive interrupt */
30 IA_CSS_IRQ_TYPE_PULSE /**< Pulse-shaped interrupt */ 30 IA_CSS_IRQ_TYPE_PULSE /** Pulse-shaped interrupt */
31}; 31};
32 32
33/** Interrupt request type. 33/* Interrupt request type.
34 * When the CSS hardware generates an interrupt, a function in this API 34 * When the CSS hardware generates an interrupt, a function in this API
35 * needs to be called to retrieve information about the interrupt. 35 * needs to be called to retrieve information about the interrupt.
36 * This interrupt type is part of this information and indicates what 36 * This interrupt type is part of this information and indicates what
@@ -46,55 +46,55 @@ enum ia_css_irq_type {
46 */ 46 */
47enum ia_css_irq_info { 47enum ia_css_irq_info {
48 IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0, 48 IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0,
49 /**< the css receiver has encountered an error */ 49 /** the css receiver has encountered an error */
50 IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1, 50 IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1,
51 /**< the FIFO in the csi receiver has overflown */ 51 /** the FIFO in the csi receiver has overflown */
52 IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2, 52 IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2,
53 /**< the css receiver received the start of frame */ 53 /** the css receiver received the start of frame */
54 IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3, 54 IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3,
55 /**< the css receiver received the end of frame */ 55 /** the css receiver received the end of frame */
56 IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4, 56 IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4,
57 /**< the css receiver received the start of line */ 57 /** the css receiver received the start of line */
58 IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5, 58 IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5,
59 /**< One or more events are available in the PSYS event queue */ 59 /** One or more events are available in the PSYS event queue */
60 IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY, 60 IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
61 /**< deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY, 61 /** deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
62 * same functionality.} */ 62 * same functionality.} */
63 IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6, 63 IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6,
64 /**< the css receiver received the end of line */ 64 /** the css receiver received the end of line */
65 IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7, 65 IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
66 /**< the css receiver received a change in side band signals */ 66 /** the css receiver received a change in side band signals */
67 IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8, 67 IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8,
68 /**< generic short packets (0) */ 68 /** generic short packets (0) */
69 IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9, 69 IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9,
70 /**< generic short packets (1) */ 70 /** generic short packets (1) */
71 IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10, 71 IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10,
72 /**< the primary input formatter (A) has encountered an error */ 72 /** the primary input formatter (A) has encountered an error */
73 IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11, 73 IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11,
74 /**< the primary input formatter (B) has encountered an error */ 74 /** the primary input formatter (B) has encountered an error */
75 IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12, 75 IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12,
76 /**< the secondary input formatter has encountered an error */ 76 /** the secondary input formatter has encountered an error */
77 IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13, 77 IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13,
78 /**< the stream-to-memory device has encountered an error */ 78 /** the stream-to-memory device has encountered an error */
79 IA_CSS_IRQ_INFO_SW_0 = 1 << 14, 79 IA_CSS_IRQ_INFO_SW_0 = 1 << 14,
80 /**< software interrupt 0 */ 80 /** software interrupt 0 */
81 IA_CSS_IRQ_INFO_SW_1 = 1 << 15, 81 IA_CSS_IRQ_INFO_SW_1 = 1 << 15,
82 /**< software interrupt 1 */ 82 /** software interrupt 1 */
83 IA_CSS_IRQ_INFO_SW_2 = 1 << 16, 83 IA_CSS_IRQ_INFO_SW_2 = 1 << 16,
84 /**< software interrupt 2 */ 84 /** software interrupt 2 */
85 IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17, 85 IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17,
86 /**< ISP binary statistics are ready */ 86 /** ISP binary statistics are ready */
87 IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18, 87 IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18,
88 /**< the input system in in error */ 88 /** the input system in in error */
89 IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19, 89 IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19,
90 /**< the input formatter in in error */ 90 /** the input formatter in in error */
91 IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20, 91 IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20,
92 /**< the dma in in error */ 92 /** the dma in in error */
93 IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21, 93 IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21,
94 /**< end-of-frame events are ready in the isys_event queue */ 94 /** end-of-frame events are ready in the isys_event queue */
95}; 95};
96 96
97/** CSS receiver error types. Whenever the CSS receiver has encountered 97/* CSS receiver error types. Whenever the CSS receiver has encountered
98 * an error, this enumeration is used to indicate which errors have occurred. 98 * an error, this enumeration is used to indicate which errors have occurred.
99 * 99 *
100 * Note that multiple error flags can be enabled at once and that this is in 100 * Note that multiple error flags can be enabled at once and that this is in
@@ -105,39 +105,39 @@ enum ia_css_irq_info {
105 * different receiver types, or possibly none in case of tests systems. 105 * different receiver types, or possibly none in case of tests systems.
106 */ 106 */
107enum ia_css_rx_irq_info { 107enum ia_css_rx_irq_info {
108 IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /**< buffer overrun */ 108 IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /** buffer overrun */
109 IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /**< entering sleep mode */ 109 IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /** entering sleep mode */
110 IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /**< exited sleep mode */ 110 IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /** exited sleep mode */
111 IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /**< ECC corrected */ 111 IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /** ECC corrected */
112 IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4, 112 IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4,
113 /**< Start of transmission */ 113 /** Start of transmission */
114 IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /**< SOT sync (??) */ 114 IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /** SOT sync (??) */
115 IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /**< Control (??) */ 115 IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /** Control (??) */
116 IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /**< Double ECC */ 116 IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /** Double ECC */
117 IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /**< CRC error */ 117 IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /** CRC error */
118 IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /**< Unknown ID */ 118 IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /** Unknown ID */
119 IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/**< Frame sync error */ 119 IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/** Frame sync error */
120 IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/**< Frame data error */ 120 IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/** Frame data error */
121 IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/**< Timeout occurred */ 121 IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/** Timeout occurred */
122 IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/**< Unknown escape seq. */ 122 IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/** Unknown escape seq. */
123 IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/**< Line Sync error */ 123 IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/** Line Sync error */
124 IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15, 124 IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15,
125}; 125};
126 126
127/** Interrupt info structure. This structure contains information about an 127/* Interrupt info structure. This structure contains information about an
128 * interrupt. This needs to be used after an interrupt is received on the IA 128 * interrupt. This needs to be used after an interrupt is received on the IA
129 * to perform the correct action. 129 * to perform the correct action.
130 */ 130 */
131struct ia_css_irq { 131struct ia_css_irq {
132 enum ia_css_irq_info type; /**< Interrupt type. */ 132 enum ia_css_irq_info type; /** Interrupt type. */
133 unsigned int sw_irq_0_val; /**< In case of SW interrupt 0, value. */ 133 unsigned int sw_irq_0_val; /** In case of SW interrupt 0, value. */
134 unsigned int sw_irq_1_val; /**< In case of SW interrupt 1, value. */ 134 unsigned int sw_irq_1_val; /** In case of SW interrupt 1, value. */
135 unsigned int sw_irq_2_val; /**< In case of SW interrupt 2, value. */ 135 unsigned int sw_irq_2_val; /** In case of SW interrupt 2, value. */
136 struct ia_css_pipe *pipe; 136 struct ia_css_pipe *pipe;
137 /**< The image pipe that generated the interrupt. */ 137 /** The image pipe that generated the interrupt. */
138}; 138};
139 139
140/** @brief Obtain interrupt information. 140/* @brief Obtain interrupt information.
141 * 141 *
142 * @param[out] info Pointer to the interrupt info. The interrupt 142 * @param[out] info Pointer to the interrupt info. The interrupt
143 * information wil be written to this info. 143 * information wil be written to this info.
@@ -154,7 +154,7 @@ struct ia_css_irq {
154enum ia_css_err 154enum ia_css_err
155ia_css_irq_translate(unsigned int *info); 155ia_css_irq_translate(unsigned int *info);
156 156
157/** @brief Get CSI receiver error info. 157/* @brief Get CSI receiver error info.
158 * 158 *
159 * @param[out] irq_bits Pointer to the interrupt bits. The interrupt 159 * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
160 * bits will be written this info. 160 * bits will be written this info.
@@ -172,7 +172,7 @@ ia_css_irq_translate(unsigned int *info);
172void 172void
173ia_css_rx_get_irq_info(unsigned int *irq_bits); 173ia_css_rx_get_irq_info(unsigned int *irq_bits);
174 174
175/** @brief Get CSI receiver error info. 175/* @brief Get CSI receiver error info.
176 * 176 *
177 * @param[in] port Input port identifier. 177 * @param[in] port Input port identifier.
178 * @param[out] irq_bits Pointer to the interrupt bits. The interrupt 178 * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
@@ -188,7 +188,7 @@ ia_css_rx_get_irq_info(unsigned int *irq_bits);
188void 188void
189ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits); 189ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
190 190
191/** @brief Clear CSI receiver error info. 191/* @brief Clear CSI receiver error info.
192 * 192 *
193 * @param[in] irq_bits The bits that should be cleared from the CSI receiver 193 * @param[in] irq_bits The bits that should be cleared from the CSI receiver
194 * interrupt bits register. 194 * interrupt bits register.
@@ -205,7 +205,7 @@ ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
205void 205void
206ia_css_rx_clear_irq_info(unsigned int irq_bits); 206ia_css_rx_clear_irq_info(unsigned int irq_bits);
207 207
208/** @brief Clear CSI receiver error info. 208/* @brief Clear CSI receiver error info.
209 * 209 *
210 * @param[in] port Input port identifier. 210 * @param[in] port Input port identifier.
211 * @param[in] irq_bits The bits that should be cleared from the CSI receiver 211 * @param[in] irq_bits The bits that should be cleared from the CSI receiver
@@ -220,7 +220,7 @@ ia_css_rx_clear_irq_info(unsigned int irq_bits);
220void 220void
221ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits); 221ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits);
222 222
223/** @brief Enable or disable specific interrupts. 223/* @brief Enable or disable specific interrupts.
224 * 224 *
225 * @param[in] type The interrupt type that will be enabled/disabled. 225 * @param[in] type The interrupt type that will be enabled/disabled.
226 * @param[in] enable enable or disable. 226 * @param[in] enable enable or disable.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
index c40c5a19bfe1..8b674c98224c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_METADATA_H 15#ifndef __IA_CSS_METADATA_H
16#define __IA_CSS_METADATA_H 16#define __IA_CSS_METADATA_H
17 17
18/** @file 18/* @file
19 * This file contains structure for processing sensor metadata. 19 * This file contains structure for processing sensor metadata.
20 */ 20 */
21 21
@@ -23,32 +23,32 @@
23#include "ia_css_types.h" 23#include "ia_css_types.h"
24#include "ia_css_stream_format.h" 24#include "ia_css_stream_format.h"
25 25
26/** Metadata configuration. This data structure contains necessary info 26/* Metadata configuration. This data structure contains necessary info
27 * to process sensor metadata. 27 * to process sensor metadata.
28 */ 28 */
29struct ia_css_metadata_config { 29struct ia_css_metadata_config {
30 enum ia_css_stream_format data_type; /**< Data type of CSI-2 embedded 30 enum ia_css_stream_format data_type; /** Data type of CSI-2 embedded
31 data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For 31 data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For
32 certain sensors, user can choose non-default data type for embedded 32 certain sensors, user can choose non-default data type for embedded
33 data. */ 33 data. */
34 struct ia_css_resolution resolution; /**< Resolution */ 34 struct ia_css_resolution resolution; /** Resolution */
35}; 35};
36 36
37struct ia_css_metadata_info { 37struct ia_css_metadata_info {
38 struct ia_css_resolution resolution; /**< Resolution */ 38 struct ia_css_resolution resolution; /** Resolution */
39 uint32_t stride; /**< Stride in bytes */ 39 uint32_t stride; /** Stride in bytes */
40 uint32_t size; /**< Total size in bytes */ 40 uint32_t size; /** Total size in bytes */
41}; 41};
42 42
43struct ia_css_metadata { 43struct ia_css_metadata {
44 struct ia_css_metadata_info info; /**< Layout info */ 44 struct ia_css_metadata_info info; /** Layout info */
45 ia_css_ptr address; /**< CSS virtual address */ 45 ia_css_ptr address; /** CSS virtual address */
46 uint32_t exp_id; 46 uint32_t exp_id;
47 /**< Exposure ID, see ia_css_event_public.h for more detail */ 47 /** Exposure ID, see ia_css_event_public.h for more detail */
48}; 48};
49#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata) 49#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
50 50
51/** @brief Allocate a metadata buffer. 51/* @brief Allocate a metadata buffer.
52 * @param[in] metadata_info Metadata info struct, contains details on metadata buffers. 52 * @param[in] metadata_info Metadata info struct, contains details on metadata buffers.
53 * @return Pointer of metadata buffer or NULL (if error) 53 * @return Pointer of metadata buffer or NULL (if error)
54 * 54 *
@@ -58,7 +58,7 @@ struct ia_css_metadata {
58struct ia_css_metadata * 58struct ia_css_metadata *
59ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info); 59ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
60 60
61/** @brief Free a metadata buffer. 61/* @brief Free a metadata buffer.
62 * 62 *
63 * @param[in] metadata Pointer of metadata buffer. 63 * @param[in] metadata Pointer of metadata buffer.
64 * @return None 64 * @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
index fd2c01b60b28..f9c9cd76be97 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_MIPI_H 15#ifndef __IA_CSS_MIPI_H
16#define __IA_CSS_MIPI_H 16#define __IA_CSS_MIPI_H
17 17
18/** @file 18/* @file
19 * This file contains MIPI support functionality 19 * This file contains MIPI support functionality
20 */ 20 */
21 21
@@ -24,10 +24,10 @@
24#include "ia_css_stream_format.h" 24#include "ia_css_stream_format.h"
25#include "ia_css_input_port.h" 25#include "ia_css_input_port.h"
26 26
27/** Backward compatible for CSS API 2.0 only 27/* Backward compatible for CSS API 2.0 only
28 * TO BE REMOVED when all drivers move to CSS API 2.1. 28 * TO BE REMOVED when all drivers move to CSS API 2.1.
29 */ 29 */
30/** @brief Specify a CSS MIPI frame buffer. 30/* @brief Specify a CSS MIPI frame buffer.
31 * 31 *
32 * @param[in] size_mem_words The frame size in memory words (32B). 32 * @param[in] size_mem_words The frame size in memory words (32B).
33 * @param[in] contiguous Allocate memory physically contiguously or not. 33 * @param[in] contiguous Allocate memory physically contiguously or not.
@@ -42,7 +42,7 @@ ia_css_mipi_frame_specify(const unsigned int size_mem_words,
42 const bool contiguous); 42 const bool contiguous);
43 43
44#if !defined(HAS_NO_INPUT_SYSTEM) 44#if !defined(HAS_NO_INPUT_SYSTEM)
45/** @brief Register size of a CSS MIPI frame for check during capturing. 45/* @brief Register size of a CSS MIPI frame for check during capturing.
46 * 46 *
47 * @param[in] port CSI-2 port this check is registered. 47 * @param[in] port CSI-2 port this check is registered.
48 * @param[in] size_mem_words The frame size in memory words (32B). 48 * @param[in] size_mem_words The frame size in memory words (32B).
@@ -59,7 +59,7 @@ ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
59 const unsigned int size_mem_words); 59 const unsigned int size_mem_words);
60#endif 60#endif
61 61
62/** @brief Calculate the size of a mipi frame. 62/* @brief Calculate the size of a mipi frame.
63 * 63 *
64 * @param[in] width The width (in pixels) of the frame. 64 * @param[in] width The width (in pixels) of the frame.
65 * @param[in] height The height (in lines) of the frame. 65 * @param[in] height The height (in lines) of the frame.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
index 48f8855d61f6..13c21056bfbf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_MMU_H 15#ifndef __IA_CSS_MMU_H
16#define __IA_CSS_MMU_H 16#define __IA_CSS_MMU_H
17 17
18/** @file 18/* @file
19 * This file contains one support function for invalidating the CSS MMU cache 19 * This file contains one support function for invalidating the CSS MMU cache
20 */ 20 */
21 21
22/** @brief Invalidate the MMU internal cache. 22/* @brief Invalidate the MMU internal cache.
23 * @return None 23 * @return None
24 * 24 *
25 * This function triggers an invalidation of the translate-look-aside 25 * This function triggers an invalidation of the translate-look-aside
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
index 969840da52b2..de409638d009 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
@@ -15,13 +15,13 @@
15#ifndef __IA_CSS_MORPH_H 15#ifndef __IA_CSS_MORPH_H
16#define __IA_CSS_MORPH_H 16#define __IA_CSS_MORPH_H
17 17
18/** @file 18/* @file
19 * This file contains supporting for morphing table 19 * This file contains supporting for morphing table
20 */ 20 */
21 21
22#include <ia_css_types.h> 22#include <ia_css_types.h>
23 23
24/** @brief Morphing table 24/* @brief Morphing table
25 * @param[in] width Width of the morphing table. 25 * @param[in] width Width of the morphing table.
26 * @param[in] height Height of the morphing table. 26 * @param[in] height Height of the morphing table.
27 * @return Pointer to the morphing table 27 * @return Pointer to the morphing table
@@ -29,7 +29,7 @@
29struct ia_css_morph_table * 29struct ia_css_morph_table *
30ia_css_morph_table_allocate(unsigned int width, unsigned int height); 30ia_css_morph_table_allocate(unsigned int width, unsigned int height);
31 31
32/** @brief Free the morph table 32/* @brief Free the morph table
33 * @param[in] me Pointer to the morph table. 33 * @param[in] me Pointer to the morph table.
34 * @return None 34 * @return None
35*/ 35*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
index 733e0ef3afe8..df0aad9a6ab9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_PIPE_PUBLIC_H 15#ifndef __IA_CSS_PIPE_PUBLIC_H
16#define __IA_CSS_PIPE_PUBLIC_H 16#define __IA_CSS_PIPE_PUBLIC_H
17 17
18/** @file 18/* @file
19 * This file contains the public interface for CSS pipes. 19 * This file contains the public interface for CSS pipes.
20 */ 20 */
21 21
@@ -34,7 +34,7 @@ enum {
34 IA_CSS_PIPE_MAX_OUTPUT_STAGE, 34 IA_CSS_PIPE_MAX_OUTPUT_STAGE,
35}; 35};
36 36
37/** Enumeration of pipe modes. This mode can be used to create 37/* Enumeration of pipe modes. This mode can be used to create
38 * an image pipe for this mode. These pipes can be combined 38 * an image pipe for this mode. These pipes can be combined
39 * to configure and run streams on the ISP. 39 * to configure and run streams on the ISP.
40 * 40 *
@@ -42,12 +42,12 @@ enum {
42 * create a continuous capture stream. 42 * create a continuous capture stream.
43 */ 43 */
44enum ia_css_pipe_mode { 44enum ia_css_pipe_mode {
45 IA_CSS_PIPE_MODE_PREVIEW, /**< Preview pipe */ 45 IA_CSS_PIPE_MODE_PREVIEW, /** Preview pipe */
46 IA_CSS_PIPE_MODE_VIDEO, /**< Video pipe */ 46 IA_CSS_PIPE_MODE_VIDEO, /** Video pipe */
47 IA_CSS_PIPE_MODE_CAPTURE, /**< Still capture pipe */ 47 IA_CSS_PIPE_MODE_CAPTURE, /** Still capture pipe */
48 IA_CSS_PIPE_MODE_ACC, /**< Accelerated pipe */ 48 IA_CSS_PIPE_MODE_ACC, /** Accelerated pipe */
49 IA_CSS_PIPE_MODE_COPY, /**< Copy pipe, only used for embedded/image data copying */ 49 IA_CSS_PIPE_MODE_COPY, /** Copy pipe, only used for embedded/image data copying */
50 IA_CSS_PIPE_MODE_YUVPP, /**< YUV post processing pipe, used for all use cases with YUV input, 50 IA_CSS_PIPE_MODE_YUVPP, /** YUV post processing pipe, used for all use cases with YUV input,
51 for SoC sensor and external ISP */ 51 for SoC sensor and external ISP */
52}; 52};
53/* Temporary define */ 53/* Temporary define */
@@ -58,10 +58,10 @@ enum ia_css_pipe_mode {
58 * the order should match with definition in sh_css_defs.h 58 * the order should match with definition in sh_css_defs.h
59 */ 59 */
60enum ia_css_pipe_version { 60enum ia_css_pipe_version {
61 IA_CSS_PIPE_VERSION_1 = 1, /**< ISP1.0 pipe */ 61 IA_CSS_PIPE_VERSION_1 = 1, /** ISP1.0 pipe */
62 IA_CSS_PIPE_VERSION_2_2 = 2, /**< ISP2.2 pipe */ 62 IA_CSS_PIPE_VERSION_2_2 = 2, /** ISP2.2 pipe */
63 IA_CSS_PIPE_VERSION_2_6_1 = 3, /**< ISP2.6.1 pipe */ 63 IA_CSS_PIPE_VERSION_2_6_1 = 3, /** ISP2.6.1 pipe */
64 IA_CSS_PIPE_VERSION_2_7 = 4 /**< ISP2.7 pipe */ 64 IA_CSS_PIPE_VERSION_2_7 = 4 /** ISP2.7 pipe */
65}; 65};
66 66
67/** 67/**
@@ -71,79 +71,79 @@ enum ia_css_pipe_version {
71 */ 71 */
72struct ia_css_pipe_config { 72struct ia_css_pipe_config {
73 enum ia_css_pipe_mode mode; 73 enum ia_css_pipe_mode mode;
74 /**< mode, indicates which mode the pipe should use. */ 74 /** mode, indicates which mode the pipe should use. */
75 enum ia_css_pipe_version isp_pipe_version; 75 enum ia_css_pipe_version isp_pipe_version;
76 /**< pipe version, indicates which imaging pipeline the pipe should use. */ 76 /** pipe version, indicates which imaging pipeline the pipe should use. */
77 struct ia_css_resolution input_effective_res; 77 struct ia_css_resolution input_effective_res;
78 /**< input effective resolution */ 78 /** input effective resolution */
79 struct ia_css_resolution bayer_ds_out_res; 79 struct ia_css_resolution bayer_ds_out_res;
80 /**< bayer down scaling */ 80 /** bayer down scaling */
81 struct ia_css_resolution capt_pp_in_res; 81 struct ia_css_resolution capt_pp_in_res;
82#ifndef ISP2401 82#ifndef ISP2401
83 /**< bayer down scaling */ 83 /** bayer down scaling */
84#else 84#else
85 /**< capture post processing input resolution */ 85 /** capture post processing input resolution */
86#endif 86#endif
87 struct ia_css_resolution vf_pp_in_res; 87 struct ia_css_resolution vf_pp_in_res;
88#ifndef ISP2401 88#ifndef ISP2401
89 /**< bayer down scaling */ 89 /** bayer down scaling */
90#else 90#else
91 /**< view finder post processing input resolution */ 91 /** view finder post processing input resolution */
92 struct ia_css_resolution output_system_in_res; 92 struct ia_css_resolution output_system_in_res;
93 /**< For IPU3 only: use output_system_in_res to specify what input resolution 93 /** For IPU3 only: use output_system_in_res to specify what input resolution
94 will OSYS receive, this resolution is equal to the output resolution of GDC 94 will OSYS receive, this resolution is equal to the output resolution of GDC
95 if not determined CSS will set output_system_in_res with main osys output pin resolution 95 if not determined CSS will set output_system_in_res with main osys output pin resolution
96 All other IPUs may ignore this property */ 96 All other IPUs may ignore this property */
97#endif 97#endif
98 struct ia_css_resolution dvs_crop_out_res; 98 struct ia_css_resolution dvs_crop_out_res;
99 /**< dvs crop, video only, not in use yet. Use dvs_envelope below. */ 99 /** dvs crop, video only, not in use yet. Use dvs_envelope below. */
100 struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; 100 struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
101 /**< output of YUV scaling */ 101 /** output of YUV scaling */
102 struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; 102 struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
103 /**< output of VF YUV scaling */ 103 /** output of VF YUV scaling */
104 struct ia_css_fw_info *acc_extension; 104 struct ia_css_fw_info *acc_extension;
105 /**< Pipeline extension accelerator */ 105 /** Pipeline extension accelerator */
106 struct ia_css_fw_info **acc_stages; 106 struct ia_css_fw_info **acc_stages;
107 /**< Standalone accelerator stages */ 107 /** Standalone accelerator stages */
108 uint32_t num_acc_stages; 108 uint32_t num_acc_stages;
109 /**< Number of standalone accelerator stages */ 109 /** Number of standalone accelerator stages */
110 struct ia_css_capture_config default_capture_config; 110 struct ia_css_capture_config default_capture_config;
111 /**< Default capture config for initial capture pipe configuration. */ 111 /** Default capture config for initial capture pipe configuration. */
112 struct ia_css_resolution dvs_envelope; /**< temporary */ 112 struct ia_css_resolution dvs_envelope; /** temporary */
113 enum ia_css_frame_delay dvs_frame_delay; 113 enum ia_css_frame_delay dvs_frame_delay;
114 /**< indicates the DVS loop delay in frame periods */ 114 /** indicates the DVS loop delay in frame periods */
115 int acc_num_execs; 115 int acc_num_execs;
116 /**< For acceleration pipes only: determine how many times the pipe 116 /** For acceleration pipes only: determine how many times the pipe
117 should be run. Setting this to -1 means it will run until 117 should be run. Setting this to -1 means it will run until
118 stopped. */ 118 stopped. */
119 bool enable_dz; 119 bool enable_dz;
120 /**< Disabling digital zoom for a pipeline, if this is set to false, 120 /** Disabling digital zoom for a pipeline, if this is set to false,
121 then setting a zoom factor will have no effect. 121 then setting a zoom factor will have no effect.
122 In some use cases this provides better performance. */ 122 In some use cases this provides better performance. */
123 bool enable_dpc; 123 bool enable_dpc;
124 /**< Disabling "Defect Pixel Correction" for a pipeline, if this is set 124 /** Disabling "Defect Pixel Correction" for a pipeline, if this is set
125 to false. In some use cases this provides better performance. */ 125 to false. In some use cases this provides better performance. */
126 bool enable_vfpp_bci; 126 bool enable_vfpp_bci;
127 /**< Enabling BCI mode will cause yuv_scale binary to be picked up 127 /** Enabling BCI mode will cause yuv_scale binary to be picked up
128 instead of vf_pp. This only applies to viewfinder post 128 instead of vf_pp. This only applies to viewfinder post
129 processing stages. */ 129 processing stages. */
130#ifdef ISP2401 130#ifdef ISP2401
131 bool enable_luma_only; 131 bool enable_luma_only;
132 /**< Enabling of monochrome mode for a pipeline. If enabled only luma processing 132 /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
133 will be done. */ 133 will be done. */
134 bool enable_tnr; 134 bool enable_tnr;
135 /**< Enabling of TNR (temporal noise reduction). This is only applicable to video 135 /** Enabling of TNR (temporal noise reduction). This is only applicable to video
136 pipes. Non video-pipes should always set this parameter to false. */ 136 pipes. Non video-pipes should always set this parameter to false. */
137#endif 137#endif
138 struct ia_css_isp_config *p_isp_config; 138 struct ia_css_isp_config *p_isp_config;
139 /**< Pointer to ISP configuration */ 139 /** Pointer to ISP configuration */
140 struct ia_css_resolution gdc_in_buffer_res; 140 struct ia_css_resolution gdc_in_buffer_res;
141 /**< GDC in buffer resolution. */ 141 /** GDC in buffer resolution. */
142 struct ia_css_point gdc_in_buffer_offset; 142 struct ia_css_point gdc_in_buffer_offset;
143 /**< GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */ 143 /** GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
144#ifdef ISP2401 144#ifdef ISP2401
145 struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl; 145 struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
146 /**< Origin of internal frame positioned on shading table at shading correction in ISP. 146 /** Origin of internal frame positioned on shading table at shading correction in ISP.
147 NOTE: Shading table is larger than or equal to internal frame. 147 NOTE: Shading table is larger than or equal to internal frame.
148 Shading table has shading gains and internal frame has bayer data. 148 Shading table has shading gains and internal frame has bayer data.
149 The origin of internal frame is used in shading correction in ISP 149 The origin of internal frame is used in shading correction in ISP
@@ -228,20 +228,20 @@ struct ia_css_pipe_config {
228 228
229#endif 229#endif
230 230
231/** Pipe info, this struct describes properties of a pipe after it's stream has 231/* Pipe info, this struct describes properties of a pipe after it's stream has
232 * been created. 232 * been created.
233 * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated. 233 * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
234 * - On the Behalf of CSS-API Committee. 234 * - On the Behalf of CSS-API Committee.
235 */ 235 */
236struct ia_css_pipe_info { 236struct ia_css_pipe_info {
237 struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; 237 struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
238 /**< Info about output resolution. This contains the stride which 238 /** Info about output resolution. This contains the stride which
239 should be used for memory allocation. */ 239 should be used for memory allocation. */
240 struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE]; 240 struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
241 /**< Info about viewfinder output resolution (optional). This contains 241 /** Info about viewfinder output resolution (optional). This contains
242 the stride that should be used for memory allocation. */ 242 the stride that should be used for memory allocation. */
243 struct ia_css_frame_info raw_output_info; 243 struct ia_css_frame_info raw_output_info;
244 /**< Raw output resolution. This indicates the resolution of the 244 /** Raw output resolution. This indicates the resolution of the
245 RAW bayer output for pipes that support this. Currently, only the 245 RAW bayer output for pipes that support this. Currently, only the
246 still capture pipes support this feature. When this resolution is 246 still capture pipes support this feature. When this resolution is
247 smaller than the input resolution, cropping will be performed by 247 smaller than the input resolution, cropping will be performed by
@@ -252,17 +252,17 @@ struct ia_css_pipe_info {
252 the input resolution - 8x8. */ 252 the input resolution - 8x8. */
253#ifdef ISP2401 253#ifdef ISP2401
254 struct ia_css_resolution output_system_in_res_info; 254 struct ia_css_resolution output_system_in_res_info;
255 /**< For IPU3 only. Info about output system in resolution which is considered 255 /** For IPU3 only. Info about output system in resolution which is considered
256 as gdc out resolution. */ 256 as gdc out resolution. */
257#endif 257#endif
258 struct ia_css_shading_info shading_info; 258 struct ia_css_shading_info shading_info;
259 /**< After an image pipe is created, this field will contain the info 259 /** After an image pipe is created, this field will contain the info
260 for the shading correction. */ 260 for the shading correction. */
261 struct ia_css_grid_info grid_info; 261 struct ia_css_grid_info grid_info;
262 /**< After an image pipe is created, this field will contain the grid 262 /** After an image pipe is created, this field will contain the grid
263 info for 3A and DVS. */ 263 info for 3A and DVS. */
264 int num_invalid_frames; 264 int num_invalid_frames;
265 /**< The very first frames in a started stream do not contain valid data. 265 /** The very first frames in a started stream do not contain valid data.
266 In this field, the CSS-firmware communicates to the host-driver how 266 In this field, the CSS-firmware communicates to the host-driver how
267 many initial frames will contain invalid data; this allows the 267 many initial frames will contain invalid data; this allows the
268 host-driver to discard those initial invalid frames and start it's 268 host-driver to discard those initial invalid frames and start it's
@@ -299,7 +299,7 @@ struct ia_css_pipe_info {
299 299
300#endif 300#endif
301 301
302/** @brief Load default pipe configuration 302/* @brief Load default pipe configuration
303 * @param[out] pipe_config The pipe configuration. 303 * @param[out] pipe_config The pipe configuration.
304 * @return None 304 * @return None
305 * 305 *
@@ -334,7 +334,7 @@ struct ia_css_pipe_info {
334 */ 334 */
335void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config); 335void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
336 336
337/** @brief Create a pipe 337/* @brief Create a pipe
338 * @param[in] config The pipe configuration. 338 * @param[in] config The pipe configuration.
339 * @param[out] pipe The pipe. 339 * @param[out] pipe The pipe.
340 * @return IA_CSS_SUCCESS or the error code. 340 * @return IA_CSS_SUCCESS or the error code.
@@ -346,7 +346,7 @@ enum ia_css_err
346ia_css_pipe_create(const struct ia_css_pipe_config *config, 346ia_css_pipe_create(const struct ia_css_pipe_config *config,
347 struct ia_css_pipe **pipe); 347 struct ia_css_pipe **pipe);
348 348
349/** @brief Destroy a pipe 349/* @brief Destroy a pipe
350 * @param[in] pipe The pipe. 350 * @param[in] pipe The pipe.
351 * @return IA_CSS_SUCCESS or the error code. 351 * @return IA_CSS_SUCCESS or the error code.
352 * 352 *
@@ -355,7 +355,7 @@ ia_css_pipe_create(const struct ia_css_pipe_config *config,
355enum ia_css_err 355enum ia_css_err
356ia_css_pipe_destroy(struct ia_css_pipe *pipe); 356ia_css_pipe_destroy(struct ia_css_pipe *pipe);
357 357
358/** @brief Provides information about a pipe 358/* @brief Provides information about a pipe
359 * @param[in] pipe The pipe. 359 * @param[in] pipe The pipe.
360 * @param[out] pipe_info The pipe information. 360 * @param[out] pipe_info The pipe information.
361 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS. 361 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
@@ -366,7 +366,7 @@ enum ia_css_err
366ia_css_pipe_get_info(const struct ia_css_pipe *pipe, 366ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
367 struct ia_css_pipe_info *pipe_info); 367 struct ia_css_pipe_info *pipe_info);
368 368
369/** @brief Configure a pipe with filter coefficients. 369/* @brief Configure a pipe with filter coefficients.
370 * @param[in] pipe The pipe. 370 * @param[in] pipe The pipe.
371 * @param[in] config The pointer to ISP configuration. 371 * @param[in] config The pointer to ISP configuration.
372 * @return IA_CSS_SUCCESS or error code upon error. 372 * @return IA_CSS_SUCCESS or error code upon error.
@@ -378,7 +378,7 @@ enum ia_css_err
378ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe, 378ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
379 struct ia_css_isp_config *config); 379 struct ia_css_isp_config *config);
380 380
381/** @brief Controls when the Event generator raises an IRQ to the Host. 381/* @brief Controls when the Event generator raises an IRQ to the Host.
382 * 382 *
383 * @param[in] pipe The pipe. 383 * @param[in] pipe The pipe.
384 * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe 384 * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
@@ -455,7 +455,7 @@ ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
455 unsigned int or_mask, 455 unsigned int or_mask,
456 unsigned int and_mask); 456 unsigned int and_mask);
457 457
458/** @brief Reads the current event IRQ mask from the CSS. 458/* @brief Reads the current event IRQ mask from the CSS.
459 * 459 *
460 * @param[in] pipe The pipe. 460 * @param[in] pipe The pipe.
461 * @param[out] or_mask Current or_mask. The bits in this mask are a binary or 461 * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
@@ -476,7 +476,7 @@ ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
476 unsigned int *or_mask, 476 unsigned int *or_mask,
477 unsigned int *and_mask); 477 unsigned int *and_mask);
478 478
479/** @brief Queue a buffer for an image pipe. 479/* @brief Queue a buffer for an image pipe.
480 * 480 *
481 * @param[in] pipe The pipe that will own the buffer. 481 * @param[in] pipe The pipe that will own the buffer.
482 * @param[in] buffer Pointer to the buffer. 482 * @param[in] buffer Pointer to the buffer.
@@ -498,7 +498,7 @@ enum ia_css_err
498ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe, 498ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
499 const struct ia_css_buffer *buffer); 499 const struct ia_css_buffer *buffer);
500 500
501/** @brief Dequeue a buffer from an image pipe. 501/* @brief Dequeue a buffer from an image pipe.
502 * 502 *
503 * @param[in] pipe The pipeline that the buffer queue belongs to. 503 * @param[in] pipe The pipeline that the buffer queue belongs to.
504 * @param[in,out] buffer The buffer is used to lookup the type which determines 504 * @param[in,out] buffer The buffer is used to lookup the type which determines
@@ -519,7 +519,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
519 struct ia_css_buffer *buffer); 519 struct ia_css_buffer *buffer);
520 520
521 521
522/** @brief Set the state (Enable or Disable) of the Extension stage in the 522/* @brief Set the state (Enable or Disable) of the Extension stage in the
523 * given pipe. 523 * given pipe.
524 * @param[in] pipe Pipe handle. 524 * @param[in] pipe Pipe handle.
525 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle) 525 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
@@ -546,7 +546,7 @@ ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe,
546 uint32_t fw_handle, 546 uint32_t fw_handle,
547 bool enable); 547 bool enable);
548 548
549/** @brief Get the state (Enable or Disable) of the Extension stage in the 549/* @brief Get the state (Enable or Disable) of the Extension stage in the
550 * given pipe. 550 * given pipe.
551 * @param[in] pipe Pipe handle. 551 * @param[in] pipe Pipe handle.
552 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle) 552 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
@@ -573,7 +573,7 @@ ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe,
573 bool * enable); 573 bool * enable);
574 574
575#ifdef ISP2401 575#ifdef ISP2401
576/** @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime. 576/* @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
577 * @param[in] pipe Pipe handle. 577 * @param[in] pipe Pipe handle.
578 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle). 578 * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle).
579 * @param[in] css_seg Parameter memory descriptors for CSS segments. 579 * @param[in] css_seg Parameter memory descriptors for CSS segments.
@@ -595,7 +595,7 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_hand
595 struct ia_css_isp_param_isp_segments *isp_seg); 595 struct ia_css_isp_param_isp_segments *isp_seg);
596 596
597#endif 597#endif
598/** @brief Get selected configuration settings 598/* @brief Get selected configuration settings
599 * @param[in] pipe The pipe. 599 * @param[in] pipe The pipe.
600 * @param[out] config Configuration settings. 600 * @param[out] config Configuration settings.
601 * @return None 601 * @return None
@@ -604,7 +604,7 @@ void
604ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe, 604ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
605 struct ia_css_isp_config *config); 605 struct ia_css_isp_config *config);
606 606
607/** @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit 607/* @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
608 * address space. So the LUT can be freed by caller. 608 * address space. So the LUT can be freed by caller.
609 * @param[in] pipe Pipe handle. 609 * @param[in] pipe Pipe handle.
610 * @param[in] lut Look up tabel 610 * @param[in] lut Look up tabel
@@ -623,7 +623,7 @@ ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
623enum ia_css_err 623enum ia_css_err
624ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe, 624ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
625 const void *lut); 625 const void *lut);
626/** @brief Checking of DVS statistics ability 626/* @brief Checking of DVS statistics ability
627 * @param[in] pipe_info The pipe info. 627 * @param[in] pipe_info The pipe info.
628 * @return true - has DVS statistics ability 628 * @return true - has DVS statistics ability
629 * false - otherwise 629 * false - otherwise
@@ -631,7 +631,7 @@ ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
631bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info); 631bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
632 632
633#ifdef ISP2401 633#ifdef ISP2401
634/** @brief Override the frameformat set on the output pins. 634/* @brief Override the frameformat set on the output pins.
635 * @param[in] pipe Pipe handle. 635 * @param[in] pipe Pipe handle.
636 * @param[in] output_pin Pin index to set the format on 636 * @param[in] output_pin Pin index to set the format on
637 * 0 - main output pin 637 * 0 - main output pin
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
index 9b0eeb08ca04..6f24656b6cb4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_PRBS_H 15#ifndef __IA_CSS_PRBS_H
16#define __IA_CSS_PRBS_H 16#define __IA_CSS_PRBS_H
17 17
18/** @file 18/* @file
19 * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs 19 * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
20 */ 20 */
21 21
22/** Enumerate the PRBS IDs. 22/* Enumerate the PRBS IDs.
23 */ 23 */
24enum ia_css_prbs_id { 24enum ia_css_prbs_id {
25 IA_CSS_PRBS_ID0, 25 IA_CSS_PRBS_ID0,
@@ -44,10 +44,10 @@ enum ia_css_prbs_id {
44 */ 44 */
45struct ia_css_prbs_config { 45struct ia_css_prbs_config {
46 enum ia_css_prbs_id id; 46 enum ia_css_prbs_id id;
47 unsigned int h_blank; /**< horizontal blank */ 47 unsigned int h_blank; /** horizontal blank */
48 unsigned int v_blank; /**< vertical blank */ 48 unsigned int v_blank; /** vertical blank */
49 int seed; /**< random seed for the 1st 2-pixel-components/clock */ 49 int seed; /** random seed for the 1st 2-pixel-components/clock */
50 int seed1; /**< random seed for the 2nd 2-pixel-components/clock */ 50 int seed1; /** random seed for the 2nd 2-pixel-components/clock */
51}; 51};
52 52
53#endif /* __IA_CSS_PRBS_H */ 53#endif /* __IA_CSS_PRBS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
index 19af4021b24c..9a167306611c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_PROPERTIES_H 15#ifndef __IA_CSS_PROPERTIES_H
16#define __IA_CSS_PROPERTIES_H 16#define __IA_CSS_PROPERTIES_H
17 17
18/** @file 18/* @file
19 * This file contains support for retrieving properties of some hardware the CSS system 19 * This file contains support for retrieving properties of some hardware the CSS system
20 */ 20 */
21 21
@@ -24,12 +24,12 @@
24 24
25struct ia_css_properties { 25struct ia_css_properties {
26 int gdc_coord_one; 26 int gdc_coord_one;
27 bool l1_base_is_index; /**< Indicate whether the L1 page base 27 bool l1_base_is_index; /** Indicate whether the L1 page base
28 is a page index or a byte address. */ 28 is a page index or a byte address. */
29 enum ia_css_vamem_type vamem_type; 29 enum ia_css_vamem_type vamem_type;
30}; 30};
31 31
32/** @brief Get hardware properties 32/* @brief Get hardware properties
33 * @param[in,out] properties The hardware properties 33 * @param[in,out] properties The hardware properties
34 * @return None 34 * @return None
35 * 35 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
index cb0f249e98c8..588f53d32b72 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
@@ -15,13 +15,13 @@
15#ifndef __IA_CSS_SHADING_H 15#ifndef __IA_CSS_SHADING_H
16#define __IA_CSS_SHADING_H 16#define __IA_CSS_SHADING_H
17 17
18/** @file 18/* @file
19 * This file contains support for setting the shading table for CSS 19 * This file contains support for setting the shading table for CSS
20 */ 20 */
21 21
22#include <ia_css_types.h> 22#include <ia_css_types.h>
23 23
24/** @brief Shading table 24/* @brief Shading table
25 * @param[in] width Width of the shading table. 25 * @param[in] width Width of the shading table.
26 * @param[in] height Height of the shading table. 26 * @param[in] height Height of the shading table.
27 * @return Pointer to the shading table 27 * @return Pointer to the shading table
@@ -30,7 +30,7 @@ struct ia_css_shading_table *
30ia_css_shading_table_alloc(unsigned int width, 30ia_css_shading_table_alloc(unsigned int width,
31 unsigned int height); 31 unsigned int height);
32 32
33/** @brief Free shading table 33/* @brief Free shading table
34 * @param[in] table Pointer to the shading table. 34 * @param[in] table Pointer to the shading table.
35 * @return None 35 * @return None
36*/ 36*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
index 453fe4db0133..fb6e8c2ca8bf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
@@ -48,7 +48,7 @@ struct ia_css_stream {
48 bool started; 48 bool started;
49}; 49};
50 50
51/** @brief Get a binary in the stream, which binary has the shading correction. 51/* @brief Get a binary in the stream, which binary has the shading correction.
52 * 52 *
53 * @param[in] stream: The stream. 53 * @param[in] stream: The stream.
54 * @return The binary which has the shading correction. 54 * @return The binary which has the shading correction.
@@ -76,7 +76,7 @@ sh_css_invalidate_params(struct ia_css_stream *stream);
76const struct ia_css_fpn_table * 76const struct ia_css_fpn_table *
77ia_css_get_fpn_table(struct ia_css_stream *stream); 77ia_css_get_fpn_table(struct ia_css_stream *stream);
78 78
79/** @brief Get a pointer to the shading table. 79/* @brief Get a pointer to the shading table.
80 * 80 *
81 * @param[in] stream: The stream. 81 * @param[in] stream: The stream.
82 * @return The pointer to the shading table. 82 * @return The pointer to the shading table.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
index ae608a9c9051..f7e9020a86e1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
@@ -15,74 +15,74 @@
15#ifndef __IA_CSS_STREAM_FORMAT_H 15#ifndef __IA_CSS_STREAM_FORMAT_H
16#define __IA_CSS_STREAM_FORMAT_H 16#define __IA_CSS_STREAM_FORMAT_H
17 17
18/** @file 18/* @file
19 * This file contains formats usable for ISP streaming input 19 * This file contains formats usable for ISP streaming input
20 */ 20 */
21 21
22#include <type_support.h> /* bool */ 22#include <type_support.h> /* bool */
23 23
24/** The ISP streaming input interface supports the following formats. 24/* The ISP streaming input interface supports the following formats.
25 * These match the corresponding MIPI formats. 25 * These match the corresponding MIPI formats.
26 */ 26 */
27enum ia_css_stream_format { 27enum ia_css_stream_format {
28 IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, /**< 8 bits per subpixel */ 28 IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, /** 8 bits per subpixel */
29 IA_CSS_STREAM_FORMAT_YUV420_8, /**< 8 bits per subpixel */ 29 IA_CSS_STREAM_FORMAT_YUV420_8, /** 8 bits per subpixel */
30 IA_CSS_STREAM_FORMAT_YUV420_10, /**< 10 bits per subpixel */ 30 IA_CSS_STREAM_FORMAT_YUV420_10, /** 10 bits per subpixel */
31 IA_CSS_STREAM_FORMAT_YUV420_16, /**< 16 bits per subpixel */ 31 IA_CSS_STREAM_FORMAT_YUV420_16, /** 16 bits per subpixel */
32 IA_CSS_STREAM_FORMAT_YUV422_8, /**< UYVY..UYVY, 8 bits per subpixel */ 32 IA_CSS_STREAM_FORMAT_YUV422_8, /** UYVY..UYVY, 8 bits per subpixel */
33 IA_CSS_STREAM_FORMAT_YUV422_10, /**< UYVY..UYVY, 10 bits per subpixel */ 33 IA_CSS_STREAM_FORMAT_YUV422_10, /** UYVY..UYVY, 10 bits per subpixel */
34 IA_CSS_STREAM_FORMAT_YUV422_16, /**< UYVY..UYVY, 16 bits per subpixel */ 34 IA_CSS_STREAM_FORMAT_YUV422_16, /** UYVY..UYVY, 16 bits per subpixel */
35 IA_CSS_STREAM_FORMAT_RGB_444, /**< BGR..BGR, 4 bits per subpixel */ 35 IA_CSS_STREAM_FORMAT_RGB_444, /** BGR..BGR, 4 bits per subpixel */
36 IA_CSS_STREAM_FORMAT_RGB_555, /**< BGR..BGR, 5 bits per subpixel */ 36 IA_CSS_STREAM_FORMAT_RGB_555, /** BGR..BGR, 5 bits per subpixel */
37 IA_CSS_STREAM_FORMAT_RGB_565, /**< BGR..BGR, 5 bits B and R, 6 bits G */ 37 IA_CSS_STREAM_FORMAT_RGB_565, /** BGR..BGR, 5 bits B and R, 6 bits G */
38 IA_CSS_STREAM_FORMAT_RGB_666, /**< BGR..BGR, 6 bits per subpixel */ 38 IA_CSS_STREAM_FORMAT_RGB_666, /** BGR..BGR, 6 bits per subpixel */
39 IA_CSS_STREAM_FORMAT_RGB_888, /**< BGR..BGR, 8 bits per subpixel */ 39 IA_CSS_STREAM_FORMAT_RGB_888, /** BGR..BGR, 8 bits per subpixel */
40 IA_CSS_STREAM_FORMAT_RAW_6, /**< RAW data, 6 bits per pixel */ 40 IA_CSS_STREAM_FORMAT_RAW_6, /** RAW data, 6 bits per pixel */
41 IA_CSS_STREAM_FORMAT_RAW_7, /**< RAW data, 7 bits per pixel */ 41 IA_CSS_STREAM_FORMAT_RAW_7, /** RAW data, 7 bits per pixel */
42 IA_CSS_STREAM_FORMAT_RAW_8, /**< RAW data, 8 bits per pixel */ 42 IA_CSS_STREAM_FORMAT_RAW_8, /** RAW data, 8 bits per pixel */
43 IA_CSS_STREAM_FORMAT_RAW_10, /**< RAW data, 10 bits per pixel */ 43 IA_CSS_STREAM_FORMAT_RAW_10, /** RAW data, 10 bits per pixel */
44 IA_CSS_STREAM_FORMAT_RAW_12, /**< RAW data, 12 bits per pixel */ 44 IA_CSS_STREAM_FORMAT_RAW_12, /** RAW data, 12 bits per pixel */
45 IA_CSS_STREAM_FORMAT_RAW_14, /**< RAW data, 14 bits per pixel */ 45 IA_CSS_STREAM_FORMAT_RAW_14, /** RAW data, 14 bits per pixel */
46 IA_CSS_STREAM_FORMAT_RAW_16, /**< RAW data, 16 bits per pixel, which is 46 IA_CSS_STREAM_FORMAT_RAW_16, /** RAW data, 16 bits per pixel, which is
47 not specified in CSI-MIPI standard*/ 47 not specified in CSI-MIPI standard*/
48 IA_CSS_STREAM_FORMAT_BINARY_8, /**< Binary byte stream, which is target at 48 IA_CSS_STREAM_FORMAT_BINARY_8, /** Binary byte stream, which is target at
49 JPEG. */ 49 JPEG. */
50 50
51 /** CSI2-MIPI specific format: Generic short packet data. It is used to 51 /* CSI2-MIPI specific format: Generic short packet data. It is used to
52 * keep the timing information for the opening/closing of shutters, 52 * keep the timing information for the opening/closing of shutters,
53 * triggering of flashes and etc. 53 * triggering of flashes and etc.
54 */ 54 */
55 IA_CSS_STREAM_FORMAT_GENERIC_SHORT1, /**< Generic Short Packet Code 1 */ 55 IA_CSS_STREAM_FORMAT_GENERIC_SHORT1, /** Generic Short Packet Code 1 */
56 IA_CSS_STREAM_FORMAT_GENERIC_SHORT2, /**< Generic Short Packet Code 2 */ 56 IA_CSS_STREAM_FORMAT_GENERIC_SHORT2, /** Generic Short Packet Code 2 */
57 IA_CSS_STREAM_FORMAT_GENERIC_SHORT3, /**< Generic Short Packet Code 3 */ 57 IA_CSS_STREAM_FORMAT_GENERIC_SHORT3, /** Generic Short Packet Code 3 */
58 IA_CSS_STREAM_FORMAT_GENERIC_SHORT4, /**< Generic Short Packet Code 4 */ 58 IA_CSS_STREAM_FORMAT_GENERIC_SHORT4, /** Generic Short Packet Code 4 */
59 IA_CSS_STREAM_FORMAT_GENERIC_SHORT5, /**< Generic Short Packet Code 5 */ 59 IA_CSS_STREAM_FORMAT_GENERIC_SHORT5, /** Generic Short Packet Code 5 */
60 IA_CSS_STREAM_FORMAT_GENERIC_SHORT6, /**< Generic Short Packet Code 6 */ 60 IA_CSS_STREAM_FORMAT_GENERIC_SHORT6, /** Generic Short Packet Code 6 */
61 IA_CSS_STREAM_FORMAT_GENERIC_SHORT7, /**< Generic Short Packet Code 7 */ 61 IA_CSS_STREAM_FORMAT_GENERIC_SHORT7, /** Generic Short Packet Code 7 */
62 IA_CSS_STREAM_FORMAT_GENERIC_SHORT8, /**< Generic Short Packet Code 8 */ 62 IA_CSS_STREAM_FORMAT_GENERIC_SHORT8, /** Generic Short Packet Code 8 */
63 63
64 /** CSI2-MIPI specific format: YUV data. 64 /* CSI2-MIPI specific format: YUV data.
65 */ 65 */
66 IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */ 66 IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
67 IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */ 67 IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */
68 68
69 /** CSI2-MIPI specific format: Generic long packet data 69 /* CSI2-MIPI specific format: Generic long packet data
70 */ 70 */
71 IA_CSS_STREAM_FORMAT_EMBEDDED, /**< Embedded 8-bit non Image Data */ 71 IA_CSS_STREAM_FORMAT_EMBEDDED, /** Embedded 8-bit non Image Data */
72 72
73 /** CSI2-MIPI specific format: User defined byte-based data. For example, 73 /* CSI2-MIPI specific format: User defined byte-based data. For example,
74 * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as 74 * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
75 * the User Defined Data Type 4 and the MPEG data as the 75 * the User Defined Data Type 4 and the MPEG data as the
76 * User Defined Data Type 7. 76 * User Defined Data Type 7.
77 */ 77 */
78 IA_CSS_STREAM_FORMAT_USER_DEF1, /**< User defined 8-bit data type 1 */ 78 IA_CSS_STREAM_FORMAT_USER_DEF1, /** User defined 8-bit data type 1 */
79 IA_CSS_STREAM_FORMAT_USER_DEF2, /**< User defined 8-bit data type 2 */ 79 IA_CSS_STREAM_FORMAT_USER_DEF2, /** User defined 8-bit data type 2 */
80 IA_CSS_STREAM_FORMAT_USER_DEF3, /**< User defined 8-bit data type 3 */ 80 IA_CSS_STREAM_FORMAT_USER_DEF3, /** User defined 8-bit data type 3 */
81 IA_CSS_STREAM_FORMAT_USER_DEF4, /**< User defined 8-bit data type 4 */ 81 IA_CSS_STREAM_FORMAT_USER_DEF4, /** User defined 8-bit data type 4 */
82 IA_CSS_STREAM_FORMAT_USER_DEF5, /**< User defined 8-bit data type 5 */ 82 IA_CSS_STREAM_FORMAT_USER_DEF5, /** User defined 8-bit data type 5 */
83 IA_CSS_STREAM_FORMAT_USER_DEF6, /**< User defined 8-bit data type 6 */ 83 IA_CSS_STREAM_FORMAT_USER_DEF6, /** User defined 8-bit data type 6 */
84 IA_CSS_STREAM_FORMAT_USER_DEF7, /**< User defined 8-bit data type 7 */ 84 IA_CSS_STREAM_FORMAT_USER_DEF7, /** User defined 8-bit data type 7 */
85 IA_CSS_STREAM_FORMAT_USER_DEF8, /**< User defined 8-bit data type 8 */ 85 IA_CSS_STREAM_FORMAT_USER_DEF8, /** User defined 8-bit data type 8 */
86}; 86};
87 87
88#define IA_CSS_STREAM_FORMAT_NUM IA_CSS_STREAM_FORMAT_USER_DEF8 88#define IA_CSS_STREAM_FORMAT_NUM IA_CSS_STREAM_FORMAT_USER_DEF8
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
index 2c8d9de10a59..ca3203357ff5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_STREAM_PUBLIC_H 15#ifndef __IA_CSS_STREAM_PUBLIC_H
16#define __IA_CSS_STREAM_PUBLIC_H 16#define __IA_CSS_STREAM_PUBLIC_H
17 17
18/** @file 18/* @file
19 * This file contains support for configuring and controlling streams 19 * This file contains support for configuring and controlling streams
20 */ 20 */
21 21
@@ -27,26 +27,26 @@
27#include "ia_css_prbs.h" 27#include "ia_css_prbs.h"
28#include "ia_css_input_port.h" 28#include "ia_css_input_port.h"
29 29
30/** Input modes, these enumerate all supported input modes. 30/* Input modes, these enumerate all supported input modes.
31 * Note that not all ISP modes support all input modes. 31 * Note that not all ISP modes support all input modes.
32 */ 32 */
33enum ia_css_input_mode { 33enum ia_css_input_mode {
34 IA_CSS_INPUT_MODE_SENSOR, /**< data from sensor */ 34 IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
35 IA_CSS_INPUT_MODE_FIFO, /**< data from input-fifo */ 35 IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */
36 IA_CSS_INPUT_MODE_TPG, /**< data from test-pattern generator */ 36 IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */
37 IA_CSS_INPUT_MODE_PRBS, /**< data from pseudo-random bit stream */ 37 IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */
38 IA_CSS_INPUT_MODE_MEMORY, /**< data from a frame in memory */ 38 IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
39 IA_CSS_INPUT_MODE_BUFFERED_SENSOR /**< data is sent through mipi buffer */ 39 IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
40}; 40};
41 41
42/** Structure of the MIPI buffer configuration 42/* Structure of the MIPI buffer configuration
43 */ 43 */
44struct ia_css_mipi_buffer_config { 44struct ia_css_mipi_buffer_config {
45 unsigned int size_mem_words; /**< The frame size in the system memory 45 unsigned int size_mem_words; /** The frame size in the system memory
46 words (32B) */ 46 words (32B) */
47 bool contiguous; /**< Allocated memory physically 47 bool contiguous; /** Allocated memory physically
48 contiguously or not. \deprecated{Will be false always.}*/ 48 contiguously or not. \deprecated{Will be false always.}*/
49 unsigned int nof_mipi_buffers; /**< The number of MIPI buffers required for this 49 unsigned int nof_mipi_buffers; /** The number of MIPI buffers required for this
50 stream */ 50 stream */
51}; 51};
52 52
@@ -57,44 +57,44 @@ enum {
57 IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH 57 IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
58}; 58};
59 59
60/** This is input data configuration for one MIPI data type. We can have 60/* This is input data configuration for one MIPI data type. We can have
61 * multiple of this in one virtual channel. 61 * multiple of this in one virtual channel.
62 */ 62 */
63struct ia_css_stream_isys_stream_config { 63struct ia_css_stream_isys_stream_config {
64 struct ia_css_resolution input_res; /**< Resolution of input data */ 64 struct ia_css_resolution input_res; /** Resolution of input data */
65 enum ia_css_stream_format format; /**< Format of input stream. This data 65 enum ia_css_stream_format format; /** Format of input stream. This data
66 format will be mapped to MIPI data 66 format will be mapped to MIPI data
67 type internally. */ 67 type internally. */
68 int linked_isys_stream_id; /**< default value is -1, other value means 68 int linked_isys_stream_id; /** default value is -1, other value means
69 current isys_stream shares the same buffer with 69 current isys_stream shares the same buffer with
70 indicated isys_stream*/ 70 indicated isys_stream*/
71 bool valid; /**< indicate whether other fields have valid value */ 71 bool valid; /** indicate whether other fields have valid value */
72}; 72};
73 73
74struct ia_css_stream_input_config { 74struct ia_css_stream_input_config {
75 struct ia_css_resolution input_res; /**< Resolution of input data */ 75 struct ia_css_resolution input_res; /** Resolution of input data */
76 struct ia_css_resolution effective_res; /**< Resolution of input data. 76 struct ia_css_resolution effective_res; /** Resolution of input data.
77 Used for CSS 2400/1 System and deprecated for other 77 Used for CSS 2400/1 System and deprecated for other
78 systems (replaced by input_effective_res in 78 systems (replaced by input_effective_res in
79 ia_css_pipe_config) */ 79 ia_css_pipe_config) */
80 enum ia_css_stream_format format; /**< Format of input stream. This data 80 enum ia_css_stream_format format; /** Format of input stream. This data
81 format will be mapped to MIPI data 81 format will be mapped to MIPI data
82 type internally. */ 82 type internally. */
83 enum ia_css_bayer_order bayer_order; /**< Bayer order for RAW streams */ 83 enum ia_css_bayer_order bayer_order; /** Bayer order for RAW streams */
84}; 84};
85 85
86 86
87/** Input stream description. This describes how input will flow into the 87/* Input stream description. This describes how input will flow into the
88 * CSS. This is used to program the CSS hardware. 88 * CSS. This is used to program the CSS hardware.
89 */ 89 */
90struct ia_css_stream_config { 90struct ia_css_stream_config {
91 enum ia_css_input_mode mode; /**< Input mode */ 91 enum ia_css_input_mode mode; /** Input mode */
92 union { 92 union {
93 struct ia_css_input_port port; /**< Port, for sensor only. */ 93 struct ia_css_input_port port; /** Port, for sensor only. */
94 struct ia_css_tpg_config tpg; /**< TPG configuration */ 94 struct ia_css_tpg_config tpg; /** TPG configuration */
95 struct ia_css_prbs_config prbs; /**< PRBS configuration */ 95 struct ia_css_prbs_config prbs; /** PRBS configuration */
96 } source; /**< Source of input data */ 96 } source; /** Source of input data */
97 unsigned int channel_id; /**< Channel on which input data 97 unsigned int channel_id; /** Channel on which input data
98 will arrive. Use this field 98 will arrive. Use this field
99 to specify virtual channel id. 99 to specify virtual channel id.
100 Valid values are: 0, 1, 2, 3 */ 100 Valid values are: 0, 1, 2, 3 */
@@ -110,29 +110,29 @@ struct ia_css_stream_config {
110 * and will be deprecated. In the future,all platforms will use the N*N method 110 * and will be deprecated. In the future,all platforms will use the N*N method
111 */ 111 */
112#endif 112#endif
113 unsigned int sensor_binning_factor; /**< Binning factor used by sensor 113 unsigned int sensor_binning_factor; /** Binning factor used by sensor
114 to produce image data. This is 114 to produce image data. This is
115 used for shading correction. */ 115 used for shading correction. */
116 unsigned int pixels_per_clock; /**< Number of pixels per clock, which can be 116 unsigned int pixels_per_clock; /** Number of pixels per clock, which can be
117 1, 2 or 4. */ 117 1, 2 or 4. */
118 bool online; /**< offline will activate RAW copy on SP, use this for 118 bool online; /** offline will activate RAW copy on SP, use this for
119 continuous capture. */ 119 continuous capture. */
120 /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */ 120 /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
121 unsigned init_num_cont_raw_buf; /**< initial number of raw buffers to 121 unsigned init_num_cont_raw_buf; /** initial number of raw buffers to
122 allocate */ 122 allocate */
123 unsigned target_num_cont_raw_buf; /**< total number of raw buffers to 123 unsigned target_num_cont_raw_buf; /** total number of raw buffers to
124 allocate */ 124 allocate */
125 bool pack_raw_pixels; /**< Pack pixels in the raw buffers */ 125 bool pack_raw_pixels; /** Pack pixels in the raw buffers */
126 bool continuous; /**< Use SP copy feature to continuously capture frames 126 bool continuous; /** Use SP copy feature to continuously capture frames
127 to system memory and run pipes in offline mode */ 127 to system memory and run pipes in offline mode */
128 bool disable_cont_viewfinder; /**< disable continous viewfinder for ZSL use case */ 128 bool disable_cont_viewfinder; /** disable continous viewfinder for ZSL use case */
129 int32_t flash_gpio_pin; /**< pin on which the flash is connected, -1 for no flash */ 129 int32_t flash_gpio_pin; /** pin on which the flash is connected, -1 for no flash */
130 int left_padding; /**< The number of input-formatter left-paddings, -1 for default from binary.*/ 130 int left_padding; /** The number of input-formatter left-paddings, -1 for default from binary.*/
131 struct ia_css_mipi_buffer_config mipi_buffer_config; /**< mipi buffer configuration */ 131 struct ia_css_mipi_buffer_config mipi_buffer_config; /** mipi buffer configuration */
132 struct ia_css_metadata_config metadata_config; /**< Metadata configuration. */ 132 struct ia_css_metadata_config metadata_config; /** Metadata configuration. */
133 bool ia_css_enable_raw_buffer_locking; /**< Enable Raw Buffer Locking for HALv3 Support */ 133 bool ia_css_enable_raw_buffer_locking; /** Enable Raw Buffer Locking for HALv3 Support */
134 bool lock_all; 134 bool lock_all;
135 /**< Lock all RAW buffers (true) or lock only buffers processed by 135 /** Lock all RAW buffers (true) or lock only buffers processed by
136 video or preview pipe (false). 136 video or preview pipe (false).
137 This setting needs to be enabled to allow raw buffer locking 137 This setting needs to be enabled to allow raw buffer locking
138 without continuous viewfinder. */ 138 without continuous viewfinder. */
@@ -140,15 +140,15 @@ struct ia_css_stream_config {
140 140
141struct ia_css_stream; 141struct ia_css_stream;
142 142
143/** Stream info, this struct describes properties of a stream after it has been 143/* Stream info, this struct describes properties of a stream after it has been
144 * created. 144 * created.
145 */ 145 */
146struct ia_css_stream_info { 146struct ia_css_stream_info {
147 struct ia_css_metadata_info metadata_info; 147 struct ia_css_metadata_info metadata_info;
148 /**< Info about the metadata layout, this contains the stride. */ 148 /** Info about the metadata layout, this contains the stride. */
149}; 149};
150 150
151/** @brief Load default stream configuration 151/* @brief Load default stream configuration
152 * @param[in,out] stream_config The stream configuration. 152 * @param[in,out] stream_config The stream configuration.
153 * @return None 153 * @return None
154 * 154 *
@@ -165,7 +165,7 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
165 * create the internal structures and fill in the configuration data and pipes 165 * create the internal structures and fill in the configuration data and pipes
166 */ 166 */
167 167
168 /** @brief Creates a stream 168 /* @brief Creates a stream
169 * @param[in] stream_config The stream configuration. 169 * @param[in] stream_config The stream configuration.
170 * @param[in] num_pipes The number of pipes to incorporate in the stream. 170 * @param[in] num_pipes The number of pipes to incorporate in the stream.
171 * @param[in] pipes The pipes. 171 * @param[in] pipes The pipes.
@@ -180,7 +180,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
180 struct ia_css_pipe *pipes[], 180 struct ia_css_pipe *pipes[],
181 struct ia_css_stream **stream); 181 struct ia_css_stream **stream);
182 182
183/** @brief Destroys a stream 183/* @brief Destroys a stream
184 * @param[in] stream The stream. 184 * @param[in] stream The stream.
185 * @return IA_CSS_SUCCESS or the error code. 185 * @return IA_CSS_SUCCESS or the error code.
186 * 186 *
@@ -189,7 +189,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
189enum ia_css_err 189enum ia_css_err
190ia_css_stream_destroy(struct ia_css_stream *stream); 190ia_css_stream_destroy(struct ia_css_stream *stream);
191 191
192/** @brief Provides information about a stream 192/* @brief Provides information about a stream
193 * @param[in] stream The stream. 193 * @param[in] stream The stream.
194 * @param[out] stream_info The information about the stream. 194 * @param[out] stream_info The information about the stream.
195 * @return IA_CSS_SUCCESS or the error code. 195 * @return IA_CSS_SUCCESS or the error code.
@@ -200,7 +200,7 @@ enum ia_css_err
200ia_css_stream_get_info(const struct ia_css_stream *stream, 200ia_css_stream_get_info(const struct ia_css_stream *stream,
201 struct ia_css_stream_info *stream_info); 201 struct ia_css_stream_info *stream_info);
202 202
203/** @brief load (rebuild) a stream that was unloaded. 203/* @brief load (rebuild) a stream that was unloaded.
204 * @param[in] stream The stream 204 * @param[in] stream The stream
205 * @return IA_CSS_SUCCESS or the error code 205 * @return IA_CSS_SUCCESS or the error code
206 * 206 *
@@ -210,7 +210,7 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
210enum ia_css_err 210enum ia_css_err
211ia_css_stream_load(struct ia_css_stream *stream); 211ia_css_stream_load(struct ia_css_stream *stream);
212 212
213/** @brief Starts the stream. 213/* @brief Starts the stream.
214 * @param[in] stream The stream. 214 * @param[in] stream The stream.
215 * @return IA_CSS_SUCCESS or the error code. 215 * @return IA_CSS_SUCCESS or the error code.
216 * 216 *
@@ -223,7 +223,7 @@ ia_css_stream_load(struct ia_css_stream *stream);
223enum ia_css_err 223enum ia_css_err
224ia_css_stream_start(struct ia_css_stream *stream); 224ia_css_stream_start(struct ia_css_stream *stream);
225 225
226/** @brief Stop the stream. 226/* @brief Stop the stream.
227 * @param[in] stream The stream. 227 * @param[in] stream The stream.
228 * @return IA_CSS_SUCCESS or the error code. 228 * @return IA_CSS_SUCCESS or the error code.
229 * 229 *
@@ -233,7 +233,7 @@ ia_css_stream_start(struct ia_css_stream *stream);
233enum ia_css_err 233enum ia_css_err
234ia_css_stream_stop(struct ia_css_stream *stream); 234ia_css_stream_stop(struct ia_css_stream *stream);
235 235
236/** @brief Check if a stream has stopped 236/* @brief Check if a stream has stopped
237 * @param[in] stream The stream. 237 * @param[in] stream The stream.
238 * @return boolean flag 238 * @return boolean flag
239 * 239 *
@@ -242,7 +242,7 @@ ia_css_stream_stop(struct ia_css_stream *stream);
242bool 242bool
243ia_css_stream_has_stopped(struct ia_css_stream *stream); 243ia_css_stream_has_stopped(struct ia_css_stream *stream);
244 244
245/** @brief destroy a stream according to the stream seed previosly saved in the seed array. 245/* @brief destroy a stream according to the stream seed previosly saved in the seed array.
246 * @param[in] stream The stream. 246 * @param[in] stream The stream.
247 * @return IA_CSS_SUCCESS (no other errors are generated now) 247 * @return IA_CSS_SUCCESS (no other errors are generated now)
248 * 248 *
@@ -251,7 +251,7 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream);
251enum ia_css_err 251enum ia_css_err
252ia_css_stream_unload(struct ia_css_stream *stream); 252ia_css_stream_unload(struct ia_css_stream *stream);
253 253
254/** @brief Returns stream format 254/* @brief Returns stream format
255 * @param[in] stream The stream. 255 * @param[in] stream The stream.
256 * @return format of the string 256 * @return format of the string
257 * 257 *
@@ -260,7 +260,7 @@ ia_css_stream_unload(struct ia_css_stream *stream);
260enum ia_css_stream_format 260enum ia_css_stream_format
261ia_css_stream_get_format(const struct ia_css_stream *stream); 261ia_css_stream_get_format(const struct ia_css_stream *stream);
262 262
263/** @brief Check if the stream is configured for 2 pixels per clock 263/* @brief Check if the stream is configured for 2 pixels per clock
264 * @param[in] stream The stream. 264 * @param[in] stream The stream.
265 * @return boolean flag 265 * @return boolean flag
266 * 266 *
@@ -270,7 +270,7 @@ ia_css_stream_get_format(const struct ia_css_stream *stream);
270bool 270bool
271ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream); 271ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
272 272
273/** @brief Sets the output frame stride (at the last pipe) 273/* @brief Sets the output frame stride (at the last pipe)
274 * @param[in] stream The stream 274 * @param[in] stream The stream
275 * @param[in] output_padded_width - the output buffer stride. 275 * @param[in] output_padded_width - the output buffer stride.
276 * @return ia_css_err 276 * @return ia_css_err
@@ -280,7 +280,7 @@ ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
280enum ia_css_err 280enum ia_css_err
281ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width); 281ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width);
282 282
283/** @brief Return max number of continuous RAW frames. 283/* @brief Return max number of continuous RAW frames.
284 * @param[in] stream The stream. 284 * @param[in] stream The stream.
285 * @param[out] buffer_depth The maximum number of continuous RAW frames. 285 * @param[out] buffer_depth The maximum number of continuous RAW frames.
286 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS 286 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -291,7 +291,7 @@ ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int
291enum ia_css_err 291enum ia_css_err
292ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth); 292ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
293 293
294/** @brief Set nr of continuous RAW frames to use. 294/* @brief Set nr of continuous RAW frames to use.
295 * 295 *
296 * @param[in] stream The stream. 296 * @param[in] stream The stream.
297 * @param[in] buffer_depth Number of frames to set. 297 * @param[in] buffer_depth Number of frames to set.
@@ -302,7 +302,7 @@ ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_dep
302enum ia_css_err 302enum ia_css_err
303ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth); 303ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
304 304
305/** @brief Get number of continuous RAW frames to use. 305/* @brief Get number of continuous RAW frames to use.
306 * @param[in] stream The stream. 306 * @param[in] stream The stream.
307 * @param[out] buffer_depth The number of frames to use 307 * @param[out] buffer_depth The number of frames to use
308 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS 308 * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
@@ -315,7 +315,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
315 315
316/* ===== CAPTURE ===== */ 316/* ===== CAPTURE ===== */
317 317
318/** @brief Configure the continuous capture 318/* @brief Configure the continuous capture
319 * 319 *
320 * @param[in] stream The stream. 320 * @param[in] stream The stream.
321 * @param[in] num_captures The number of RAW frames to be processed to 321 * @param[in] num_captures The number of RAW frames to be processed to
@@ -347,7 +347,7 @@ ia_css_stream_capture(struct ia_css_stream *stream,
347 unsigned int skip, 347 unsigned int skip,
348 int offset); 348 int offset);
349 349
350/** @brief Specify which raw frame to tag based on exp_id found in frame info 350/* @brief Specify which raw frame to tag based on exp_id found in frame info
351 * 351 *
352 * @param[in] stream The stream. 352 * @param[in] stream The stream.
353 * @param[in] exp_id The exposure id of the raw frame to tag. 353 * @param[in] exp_id The exposure id of the raw frame to tag.
@@ -363,7 +363,7 @@ ia_css_stream_capture_frame(struct ia_css_stream *stream,
363 363
364/* ===== VIDEO ===== */ 364/* ===== VIDEO ===== */
365 365
366/** @brief Send streaming data into the css input FIFO 366/* @brief Send streaming data into the css input FIFO
367 * 367 *
368 * @param[in] stream The stream. 368 * @param[in] stream The stream.
369 * @param[in] data Pointer to the pixels to be send. 369 * @param[in] data Pointer to the pixels to be send.
@@ -395,7 +395,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
395 unsigned int width, 395 unsigned int width,
396 unsigned int height); 396 unsigned int height);
397 397
398/** @brief Start an input frame on the CSS input FIFO. 398/* @brief Start an input frame on the CSS input FIFO.
399 * 399 *
400 * @param[in] stream The stream. 400 * @param[in] stream The stream.
401 * @return None 401 * @return None
@@ -411,7 +411,7 @@ ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
411void 411void
412ia_css_stream_start_input_frame(const struct ia_css_stream *stream); 412ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
413 413
414/** @brief Send a line of input data into the CSS input FIFO. 414/* @brief Send a line of input data into the CSS input FIFO.
415 * 415 *
416 * @param[in] stream The stream. 416 * @param[in] stream The stream.
417 * @param[in] data Array of the first line of image data. 417 * @param[in] data Array of the first line of image data.
@@ -435,7 +435,7 @@ ia_css_stream_send_input_line(const struct ia_css_stream *stream,
435 const unsigned short *data2, 435 const unsigned short *data2,
436 unsigned int width2); 436 unsigned int width2);
437 437
438/** @brief Send a line of input embedded data into the CSS input FIFO. 438/* @brief Send a line of input embedded data into the CSS input FIFO.
439 * 439 *
440 * @param[in] stream Pointer of the stream. 440 * @param[in] stream Pointer of the stream.
441 * @param[in] format Format of the embedded data. 441 * @param[in] format Format of the embedded data.
@@ -457,7 +457,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
457 const unsigned short *data, 457 const unsigned short *data,
458 unsigned int width); 458 unsigned int width);
459 459
460/** @brief End an input frame on the CSS input FIFO. 460/* @brief End an input frame on the CSS input FIFO.
461 * 461 *
462 * @param[in] stream The stream. 462 * @param[in] stream The stream.
463 * @return None 463 * @return None
@@ -467,7 +467,7 @@ ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
467void 467void
468ia_css_stream_end_input_frame(const struct ia_css_stream *stream); 468ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
469 469
470/** @brief send a request flash command to SP 470/* @brief send a request flash command to SP
471 * 471 *
472 * @param[in] stream The stream. 472 * @param[in] stream The stream.
473 * @return None 473 * @return None
@@ -481,7 +481,7 @@ ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
481void 481void
482ia_css_stream_request_flash(struct ia_css_stream *stream); 482ia_css_stream_request_flash(struct ia_css_stream *stream);
483 483
484/** @brief Configure a stream with filter coefficients. 484/* @brief Configure a stream with filter coefficients.
485 * @deprecated {Replaced by 485 * @deprecated {Replaced by
486 * ia_css_pipe_set_isp_config_on_pipe()} 486 * ia_css_pipe_set_isp_config_on_pipe()}
487 * 487 *
@@ -503,7 +503,7 @@ ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
503 const struct ia_css_isp_config *config, 503 const struct ia_css_isp_config *config,
504 struct ia_css_pipe *pipe); 504 struct ia_css_pipe *pipe);
505 505
506/** @brief Configure a stream with filter coefficients. 506/* @brief Configure a stream with filter coefficients.
507 * @deprecated {Replaced by 507 * @deprecated {Replaced by
508 * ia_css_pipe_set_isp_config()} 508 * ia_css_pipe_set_isp_config()}
509 * @param[in] stream The stream. 509 * @param[in] stream The stream.
@@ -523,7 +523,7 @@ ia_css_stream_set_isp_config(
523 struct ia_css_stream *stream, 523 struct ia_css_stream *stream,
524 const struct ia_css_isp_config *config); 524 const struct ia_css_isp_config *config);
525 525
526/** @brief Get selected configuration settings 526/* @brief Get selected configuration settings
527 * @param[in] stream The stream. 527 * @param[in] stream The stream.
528 * @param[out] config Configuration settings. 528 * @param[out] config Configuration settings.
529 * @return None 529 * @return None
@@ -532,7 +532,7 @@ void
532ia_css_stream_get_isp_config(const struct ia_css_stream *stream, 532ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
533 struct ia_css_isp_config *config); 533 struct ia_css_isp_config *config);
534 534
535/** @brief allocate continuous raw frames for continuous capture 535/* @brief allocate continuous raw frames for continuous capture
536 * @param[in] stream The stream. 536 * @param[in] stream The stream.
537 * @return IA_CSS_SUCCESS or error code. 537 * @return IA_CSS_SUCCESS or error code.
538 * 538 *
@@ -544,7 +544,7 @@ ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
544enum ia_css_err 544enum ia_css_err
545ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream); 545ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
546 546
547/** @brief allocate continuous raw frames for continuous capture 547/* @brief allocate continuous raw frames for continuous capture
548 * @param[in] stream The stream. 548 * @param[in] stream The stream.
549 * @return IA_CSS_SUCCESS or error code. 549 * @return IA_CSS_SUCCESS or error code.
550 * 550 *
@@ -555,7 +555,7 @@ ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
555enum ia_css_err 555enum ia_css_err
556ia_css_update_continuous_frames(struct ia_css_stream *stream); 556ia_css_update_continuous_frames(struct ia_css_stream *stream);
557 557
558/** @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support) 558/* @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
559 * @param[in] stream The stream. 559 * @param[in] stream The stream.
560 * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer 560 * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
561 * @return ia_css_err IA_CSS_SUCCESS or error code 561 * @return ia_css_err IA_CSS_SUCCESS or error code
@@ -567,7 +567,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream);
567enum ia_css_err 567enum ia_css_err
568ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id); 568ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
569 569
570/** @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe 570/* @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
571 * @param[in] stream The stream. 571 * @param[in] stream The stream.
572 * @param[in] enable - true, disable - false 572 * @param[in] enable - true, disable - false
573 * @return None 573 * @return None
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
index 575bb28b4bec..b256d7c88716 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
@@ -31,47 +31,47 @@ more details.
31#ifndef __IA_CSS_TIMER_H 31#ifndef __IA_CSS_TIMER_H
32#define __IA_CSS_TIMER_H 32#define __IA_CSS_TIMER_H
33 33
34/** @file 34/* @file
35 * Timer interface definitions 35 * Timer interface definitions
36 */ 36 */
37#include <type_support.h> /* for uint32_t */ 37#include <type_support.h> /* for uint32_t */
38#include "ia_css_err.h" 38#include "ia_css_err.h"
39 39
40/** @brief timer reading definition */ 40/* @brief timer reading definition */
41typedef uint32_t clock_value_t; 41typedef uint32_t clock_value_t;
42 42
43/** @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/ 43/* @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
44struct ia_css_clock_tick { 44struct ia_css_clock_tick {
45 clock_value_t ticks; /**< measured time in ticks.*/ 45 clock_value_t ticks; /** measured time in ticks.*/
46}; 46};
47 47
48/** @brief TIMER event codes */ 48/* @brief TIMER event codes */
49enum ia_css_tm_event { 49enum ia_css_tm_event {
50 IA_CSS_TM_EVENT_AFTER_INIT, 50 IA_CSS_TM_EVENT_AFTER_INIT,
51 /**< Timer Event after Initialization */ 51 /** Timer Event after Initialization */
52 IA_CSS_TM_EVENT_MAIN_END, 52 IA_CSS_TM_EVENT_MAIN_END,
53 /**< Timer Event after end of Main */ 53 /** Timer Event after end of Main */
54 IA_CSS_TM_EVENT_THREAD_START, 54 IA_CSS_TM_EVENT_THREAD_START,
55 /**< Timer Event after thread start */ 55 /** Timer Event after thread start */
56 IA_CSS_TM_EVENT_FRAME_PROC_START, 56 IA_CSS_TM_EVENT_FRAME_PROC_START,
57 /**< Timer Event after Frame Process Start */ 57 /** Timer Event after Frame Process Start */
58 IA_CSS_TM_EVENT_FRAME_PROC_END 58 IA_CSS_TM_EVENT_FRAME_PROC_END
59 /**< Timer Event after Frame Process End */ 59 /** Timer Event after Frame Process End */
60}; 60};
61 61
62/** @brief code measurement common struct */ 62/* @brief code measurement common struct */
63struct ia_css_time_meas { 63struct ia_css_time_meas {
64 clock_value_t start_timer_value; /**< measured time in ticks */ 64 clock_value_t start_timer_value; /** measured time in ticks */
65 clock_value_t end_timer_value; /**< measured time in ticks */ 65 clock_value_t end_timer_value; /** measured time in ticks */
66}; 66};
67 67
68/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */ 68/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
69#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t) 69#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
70/** @brief checks to ensure correct alignment for ia_css_time_meas. */ 70/* @brief checks to ensure correct alignment for ia_css_time_meas. */
71#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \ 71#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
72 + sizeof(clock_value_t)) 72 + sizeof(clock_value_t))
73 73
74/** @brief API to fetch timer count directly 74/* @brief API to fetch timer count directly
75* 75*
76* @param curr_ts [out] measured count value 76* @param curr_ts [out] measured count value
77* @return IA_CSS_SUCCESS if success 77* @return IA_CSS_SUCCESS if success
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
index 9238a3317a46..81498bd7485b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_TPG_H 15#ifndef __IA_CSS_TPG_H
16#define __IA_CSS_TPG_H 16#define __IA_CSS_TPG_H
17 17
18/** @file 18/* @file
19 * This file contains support for the test pattern generator (TPG) 19 * This file contains support for the test pattern generator (TPG)
20 */ 20 */
21 21
22/** Enumerate the TPG IDs. 22/* Enumerate the TPG IDs.
23 */ 23 */
24enum ia_css_tpg_id { 24enum ia_css_tpg_id {
25 IA_CSS_TPG_ID0, 25 IA_CSS_TPG_ID0,
@@ -35,7 +35,7 @@ enum ia_css_tpg_id {
35 */ 35 */
36#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1) 36#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1)
37 37
38/** Enumerate the TPG modes. 38/* Enumerate the TPG modes.
39 */ 39 */
40enum ia_css_tpg_mode { 40enum ia_css_tpg_mode {
41 IA_CSS_TPG_MODE_RAMP, 41 IA_CSS_TPG_MODE_RAMP,
@@ -44,7 +44,7 @@ enum ia_css_tpg_mode {
44 IA_CSS_TPG_MODE_MONO 44 IA_CSS_TPG_MODE_MONO
45}; 45};
46 46
47/** @brief Configure the test pattern generator. 47/* @brief Configure the test pattern generator.
48 * 48 *
49 * Configure the Test Pattern Generator, the way these values are used to 49 * Configure the Test Pattern Generator, the way these values are used to
50 * generate the pattern can be seen in the HRT extension for the test pattern 50 * generate the pattern can be seen in the HRT extension for the test pattern
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
index 5fec3d5c89d8..725b90072cfe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
@@ -16,7 +16,7 @@
16#ifndef _IA_CSS_TYPES_H 16#ifndef _IA_CSS_TYPES_H
17#define _IA_CSS_TYPES_H 17#define _IA_CSS_TYPES_H
18 18
19/** @file 19/* @file
20 * This file contains types used for the ia_css parameters. 20 * This file contains types used for the ia_css parameters.
21 * These types are in a separate file because they are expected 21 * These types are in a separate file because they are expected
22 * to be used in software layers that do not access the CSS API 22 * to be used in software layers that do not access the CSS API
@@ -58,7 +58,7 @@
58#include "isp/kernels/output/output_1.0/ia_css_output_types.h" 58#include "isp/kernels/output/output_1.0/ia_css_output_types.h"
59 59
60#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED 60#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
61/**< Should be removed after Driver adaptation will be done */ 61/** Should be removed after Driver adaptation will be done */
62 62
63#define IA_CSS_VERSION_MAJOR 2 63#define IA_CSS_VERSION_MAJOR 2
64#define IA_CSS_VERSION_MINOR 0 64#define IA_CSS_VERSION_MINOR 0
@@ -69,8 +69,8 @@
69/* Min and max exposure IDs. These macros are here to allow 69/* Min and max exposure IDs. These macros are here to allow
70 * the drivers to get this information. Changing these macros 70 * the drivers to get this information. Changing these macros
71 * constitutes a CSS API change. */ 71 * constitutes a CSS API change. */
72#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /**< Minimum exposure ID */ 72#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /** Minimum exposure ID */
73#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /**< Maximum exposure ID */ 73#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /** Maximum exposure ID */
74 74
75/* opaque types */ 75/* opaque types */
76struct ia_css_isp_parameters; 76struct ia_css_isp_parameters;
@@ -79,72 +79,72 @@ struct ia_css_memory_offsets;
79struct ia_css_config_memory_offsets; 79struct ia_css_config_memory_offsets;
80struct ia_css_state_memory_offsets; 80struct ia_css_state_memory_offsets;
81 81
82/** Virtual address within the CSS address space. */ 82/* Virtual address within the CSS address space. */
83typedef uint32_t ia_css_ptr; 83typedef uint32_t ia_css_ptr;
84 84
85/** Generic resolution structure. 85/* Generic resolution structure.
86 */ 86 */
87struct ia_css_resolution { 87struct ia_css_resolution {
88 uint32_t width; /**< Width */ 88 uint32_t width; /** Width */
89 uint32_t height; /**< Height */ 89 uint32_t height; /** Height */
90}; 90};
91 91
92/** Generic coordinate structure. 92/* Generic coordinate structure.
93 */ 93 */
94struct ia_css_coordinate { 94struct ia_css_coordinate {
95 int32_t x; /**< Value of a coordinate on the horizontal axis */ 95 int32_t x; /** Value of a coordinate on the horizontal axis */
96 int32_t y; /**< Value of a coordinate on the vertical axis */ 96 int32_t y; /** Value of a coordinate on the vertical axis */
97}; 97};
98 98
99/** Vector with signed values. This is used to indicate motion for 99/* Vector with signed values. This is used to indicate motion for
100 * Digital Image Stabilization. 100 * Digital Image Stabilization.
101 */ 101 */
102struct ia_css_vector { 102struct ia_css_vector {
103 int32_t x; /**< horizontal motion (in pixels) */ 103 int32_t x; /** horizontal motion (in pixels) */
104 int32_t y; /**< vertical motion (in pixels) */ 104 int32_t y; /** vertical motion (in pixels) */
105}; 105};
106 106
107/* Short hands */ 107/* Short hands */
108#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0 108#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
109#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0 109#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
110 110
111/** CSS data descriptor */ 111/* CSS data descriptor */
112struct ia_css_data { 112struct ia_css_data {
113 ia_css_ptr address; /**< CSS virtual address */ 113 ia_css_ptr address; /** CSS virtual address */
114 uint32_t size; /**< Disabled if 0 */ 114 uint32_t size; /** Disabled if 0 */
115}; 115};
116 116
117/** Host data descriptor */ 117/* Host data descriptor */
118struct ia_css_host_data { 118struct ia_css_host_data {
119 char *address; /**< Host address */ 119 char *address; /** Host address */
120 uint32_t size; /**< Disabled if 0 */ 120 uint32_t size; /** Disabled if 0 */
121}; 121};
122 122
123/** ISP data descriptor */ 123/* ISP data descriptor */
124struct ia_css_isp_data { 124struct ia_css_isp_data {
125 uint32_t address; /**< ISP address */ 125 uint32_t address; /** ISP address */
126 uint32_t size; /**< Disabled if 0 */ 126 uint32_t size; /** Disabled if 0 */
127}; 127};
128 128
129/** Shading Correction types. */ 129/* Shading Correction types. */
130enum ia_css_shading_correction_type { 130enum ia_css_shading_correction_type {
131#ifndef ISP2401 131#ifndef ISP2401
132 IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */ 132 IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
133#else 133#else
134 IA_CSS_SHADING_CORRECTION_NONE, /**< Shading Correction is not processed in the pipe. */ 134 IA_CSS_SHADING_CORRECTION_NONE, /** Shading Correction is not processed in the pipe. */
135 IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */ 135 IA_CSS_SHADING_CORRECTION_TYPE_1 /** Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
136#endif 136#endif
137 137
138 /**< More shading correction types can be added in the future. */ 138 /** More shading correction types can be added in the future. */
139}; 139};
140 140
141/** Shading Correction information. */ 141/* Shading Correction information. */
142struct ia_css_shading_info { 142struct ia_css_shading_info {
143 enum ia_css_shading_correction_type type; /**< Shading Correction type. */ 143 enum ia_css_shading_correction_type type; /** Shading Correction type. */
144 144
145 union { /** Shading Correction information of each Shading Correction types. */ 145 union { /* Shading Correction information of each Shading Correction types. */
146 146
147 /** Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1. 147 /* Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
148 * 148 *
149 * This structure contains the information necessary to generate 149 * This structure contains the information necessary to generate
150 * the shading table required in the isp. 150 * the shading table required in the isp.
@@ -288,20 +288,20 @@ struct ia_css_shading_info {
288 */ 288 */
289 struct { 289 struct {
290#ifndef ISP2401 290#ifndef ISP2401
291 uint32_t enable; /**< Shading correction enabled. 291 uint32_t enable; /** Shading correction enabled.
292 0:disabled, 1:enabled */ 292 0:disabled, 1:enabled */
293 uint32_t num_hor_grids; /**< Number of data points per line 293 uint32_t num_hor_grids; /** Number of data points per line
294 per color on shading table. */ 294 per color on shading table. */
295 uint32_t num_ver_grids; /**< Number of lines of data points 295 uint32_t num_ver_grids; /** Number of lines of data points
296 per color on shading table. */ 296 per color on shading table. */
297 uint32_t bqs_per_grid_cell; /**< Grid cell size 297 uint32_t bqs_per_grid_cell; /** Grid cell size
298 in BQ(Bayer Quad) unit. 298 in BQ(Bayer Quad) unit.
299 (1BQ means {Gr,R,B,Gb}(2x2 pixels).) 299 (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
300 Valid values are 8,16,32,64. */ 300 Valid values are 8,16,32,64. */
301#else 301#else
302 uint32_t num_hor_grids; /**< Number of data points per line per color on shading table. */ 302 uint32_t num_hor_grids; /** Number of data points per line per color on shading table. */
303 uint32_t num_ver_grids; /**< Number of lines of data points per color on shading table. */ 303 uint32_t num_ver_grids; /** Number of lines of data points per color on shading table. */
304 uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ unit. 304 uint32_t bqs_per_grid_cell; /** Grid cell size in BQ unit.
305 NOTE: bqs = size in BQ(Bayer Quad) unit. 305 NOTE: bqs = size in BQ(Bayer Quad) unit.
306 1BQ means {Gr,R,B,Gb} (2x2 pixels). 306 1BQ means {Gr,R,B,Gb} (2x2 pixels).
307 Horizontal 1 bqs corresponds to horizontal 2 pixels. 307 Horizontal 1 bqs corresponds to horizontal 2 pixels.
@@ -310,13 +310,13 @@ struct ia_css_shading_info {
310 uint32_t bayer_scale_hor_ratio_in; 310 uint32_t bayer_scale_hor_ratio_in;
311 uint32_t bayer_scale_hor_ratio_out; 311 uint32_t bayer_scale_hor_ratio_out;
312#ifndef ISP2401 312#ifndef ISP2401
313 /**< Horizontal ratio of bayer scaling 313 /** Horizontal ratio of bayer scaling
314 between input width and output width, for the scaling 314 between input width and output width, for the scaling
315 which should be done before shading correction. 315 which should be done before shading correction.
316 output_width = input_width * bayer_scale_hor_ratio_out 316 output_width = input_width * bayer_scale_hor_ratio_out
317 / bayer_scale_hor_ratio_in */ 317 / bayer_scale_hor_ratio_in */
318#else 318#else
319 /**< Horizontal ratio of bayer scaling between input width and output width, 319 /** Horizontal ratio of bayer scaling between input width and output width,
320 for the scaling which should be done before shading correction. 320 for the scaling which should be done before shading correction.
321 output_width = input_width * bayer_scale_hor_ratio_out 321 output_width = input_width * bayer_scale_hor_ratio_out
322 / bayer_scale_hor_ratio_in + 0.5 */ 322 / bayer_scale_hor_ratio_in + 0.5 */
@@ -324,30 +324,30 @@ struct ia_css_shading_info {
324 uint32_t bayer_scale_ver_ratio_in; 324 uint32_t bayer_scale_ver_ratio_in;
325 uint32_t bayer_scale_ver_ratio_out; 325 uint32_t bayer_scale_ver_ratio_out;
326#ifndef ISP2401 326#ifndef ISP2401
327 /**< Vertical ratio of bayer scaling 327 /** Vertical ratio of bayer scaling
328 between input height and output height, for the scaling 328 between input height and output height, for the scaling
329 which should be done before shading correction. 329 which should be done before shading correction.
330 output_height = input_height * bayer_scale_ver_ratio_out 330 output_height = input_height * bayer_scale_ver_ratio_out
331 / bayer_scale_ver_ratio_in */ 331 / bayer_scale_ver_ratio_in */
332 uint32_t sc_bayer_origin_x_bqs_on_shading_table; 332 uint32_t sc_bayer_origin_x_bqs_on_shading_table;
333 /**< X coordinate (in bqs) of bayer origin on shading table. 333 /** X coordinate (in bqs) of bayer origin on shading table.
334 This indicates the left-most pixel of bayer 334 This indicates the left-most pixel of bayer
335 (not include margin) inputted to the shading correction. 335 (not include margin) inputted to the shading correction.
336 This corresponds to the left-most pixel of bayer 336 This corresponds to the left-most pixel of bayer
337 inputted to isp from sensor. */ 337 inputted to isp from sensor. */
338 uint32_t sc_bayer_origin_y_bqs_on_shading_table; 338 uint32_t sc_bayer_origin_y_bqs_on_shading_table;
339 /**< Y coordinate (in bqs) of bayer origin on shading table. 339 /** Y coordinate (in bqs) of bayer origin on shading table.
340 This indicates the top pixel of bayer 340 This indicates the top pixel of bayer
341 (not include margin) inputted to the shading correction. 341 (not include margin) inputted to the shading correction.
342 This corresponds to the top pixel of bayer 342 This corresponds to the top pixel of bayer
343 inputted to isp from sensor. */ 343 inputted to isp from sensor. */
344#else 344#else
345 /**< Vertical ratio of bayer scaling between input height and output height, 345 /** Vertical ratio of bayer scaling between input height and output height,
346 for the scaling which should be done before shading correction. 346 for the scaling which should be done before shading correction.
347 output_height = input_height * bayer_scale_ver_ratio_out 347 output_height = input_height * bayer_scale_ver_ratio_out
348 / bayer_scale_ver_ratio_in + 0.5 */ 348 / bayer_scale_ver_ratio_in + 0.5 */
349 struct ia_css_resolution isp_input_sensor_data_res_bqs; 349 struct ia_css_resolution isp_input_sensor_data_res_bqs;
350 /**< Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling. 350 /** Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
351 NOTE: This is NOT the size of the physical sensor size. 351 NOTE: This is NOT the size of the physical sensor size.
352 CSS requests the driver that ISP inputs sensor data 352 CSS requests the driver that ISP inputs sensor data
353 by the size of isp_input_sensor_data_res_bqs. 353 by the size of isp_input_sensor_data_res_bqs.
@@ -357,22 +357,22 @@ struct ia_css_shading_info {
357 ISP assumes the area of isp_input_sensor_data_res_bqs 357 ISP assumes the area of isp_input_sensor_data_res_bqs
358 is centered on the physical sensor. */ 358 is centered on the physical sensor. */
359 struct ia_css_resolution sensor_data_res_bqs; 359 struct ia_css_resolution sensor_data_res_bqs;
360 /**< Sensor data size (in bqs) at shading correction. 360 /** Sensor data size (in bqs) at shading correction.
361 This is the size AFTER bayer scaling. */ 361 This is the size AFTER bayer scaling. */
362 struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl; 362 struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
363 /**< Origin of sensor data area positioned on shading table at shading correction. 363 /** Origin of sensor data area positioned on shading table at shading correction.
364 The coordinate x,y should be positive values. */ 364 The coordinate x,y should be positive values. */
365#endif 365#endif
366 } type_1; 366 } type_1;
367 367
368 /**< More structures can be added here when more shading correction types will be added 368 /** More structures can be added here when more shading correction types will be added
369 in the future. */ 369 in the future. */
370 } info; 370 } info;
371}; 371};
372 372
373#ifndef ISP2401 373#ifndef ISP2401
374 374
375/** Default Shading Correction information of Shading Correction Type 1. */ 375/* Default Shading Correction information of Shading Correction Type 1. */
376#define DEFAULT_SHADING_INFO_TYPE_1 \ 376#define DEFAULT_SHADING_INFO_TYPE_1 \
377{ \ 377{ \
378 IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \ 378 IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
@@ -394,7 +394,7 @@ struct ia_css_shading_info {
394 394
395#else 395#else
396 396
397/** Default Shading Correction information of Shading Correction Type 1. */ 397/* Default Shading Correction information of Shading Correction Type 1. */
398#define DEFAULT_SHADING_INFO_TYPE_1 \ 398#define DEFAULT_SHADING_INFO_TYPE_1 \
399{ \ 399{ \
400 IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \ 400 IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
@@ -416,27 +416,27 @@ struct ia_css_shading_info {
416 416
417#endif 417#endif
418 418
419/** Default Shading Correction information. */ 419/* Default Shading Correction information. */
420#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1 420#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1
421 421
422/** structure that describes the 3A and DIS grids */ 422/* structure that describes the 3A and DIS grids */
423struct ia_css_grid_info { 423struct ia_css_grid_info {
424 /** \name ISP input size 424 /* \name ISP input size
425 * that is visible for user 425 * that is visible for user
426 * @{ 426 * @{
427 */ 427 */
428 uint32_t isp_in_width; 428 uint32_t isp_in_width;
429 uint32_t isp_in_height; 429 uint32_t isp_in_height;
430 /** @}*/ 430 /* @}*/
431 431
432 struct ia_css_3a_grid_info s3a_grid; /**< 3A grid info */ 432 struct ia_css_3a_grid_info s3a_grid; /** 3A grid info */
433 union ia_css_dvs_grid_u dvs_grid; 433 union ia_css_dvs_grid_u dvs_grid;
434 /**< All types of DVS statistics grid info union */ 434 /** All types of DVS statistics grid info union */
435 435
436 enum ia_css_vamem_type vamem_type; 436 enum ia_css_vamem_type vamem_type;
437}; 437};
438 438
439/** defaults for ia_css_grid_info structs */ 439/* defaults for ia_css_grid_info structs */
440#define DEFAULT_GRID_INFO \ 440#define DEFAULT_GRID_INFO \
441{ \ 441{ \
442 0, /* isp_in_width */ \ 442 0, /* isp_in_width */ \
@@ -446,25 +446,25 @@ struct ia_css_grid_info {
446 IA_CSS_VAMEM_TYPE_1 /* vamem_type */ \ 446 IA_CSS_VAMEM_TYPE_1 /* vamem_type */ \
447} 447}
448 448
449/** Morphing table, used for geometric distortion and chromatic abberration 449/* Morphing table, used for geometric distortion and chromatic abberration
450 * correction (GDCAC, also called GDC). 450 * correction (GDCAC, also called GDC).
451 * This table describes the imperfections introduced by the lens, the 451 * This table describes the imperfections introduced by the lens, the
452 * advanced ISP can correct for these imperfections using this table. 452 * advanced ISP can correct for these imperfections using this table.
453 */ 453 */
454struct ia_css_morph_table { 454struct ia_css_morph_table {
455 uint32_t enable; /**< To disable GDC, set this field to false. The 455 uint32_t enable; /** To disable GDC, set this field to false. The
456 coordinates fields can be set to NULL in this case. */ 456 coordinates fields can be set to NULL in this case. */
457 uint32_t height; /**< Table height */ 457 uint32_t height; /** Table height */
458 uint32_t width; /**< Table width */ 458 uint32_t width; /** Table width */
459 uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES]; 459 uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
460 /**< X coordinates that describe the sensor imperfection */ 460 /** X coordinates that describe the sensor imperfection */
461 uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES]; 461 uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
462 /**< Y coordinates that describe the sensor imperfection */ 462 /** Y coordinates that describe the sensor imperfection */
463}; 463};
464 464
465struct ia_css_dvs_6axis_config { 465struct ia_css_dvs_6axis_config {
466 unsigned int exp_id; 466 unsigned int exp_id;
467 /**< Exposure ID, see ia_css_event_public.h for more detail */ 467 /** Exposure ID, see ia_css_event_public.h for more detail */
468 uint32_t width_y; 468 uint32_t width_y;
469 uint32_t height_y; 469 uint32_t height_y;
470 uint32_t width_uv; 470 uint32_t width_uv;
@@ -479,16 +479,16 @@ struct ia_css_dvs_6axis_config {
479 * This specifies the coordinates (x,y) 479 * This specifies the coordinates (x,y)
480 */ 480 */
481struct ia_css_point { 481struct ia_css_point {
482 int32_t x; /**< x coordinate */ 482 int32_t x; /** x coordinate */
483 int32_t y; /**< y coordinate */ 483 int32_t y; /** y coordinate */
484}; 484};
485 485
486/** 486/**
487 * This specifies the region 487 * This specifies the region
488 */ 488 */
489struct ia_css_region { 489struct ia_css_region {
490 struct ia_css_point origin; /**< Starting point coordinates for the region */ 490 struct ia_css_point origin; /** Starting point coordinates for the region */
491 struct ia_css_resolution resolution; /**< Region resolution */ 491 struct ia_css_resolution resolution; /** Region resolution */
492}; 492};
493 493
494/** 494/**
@@ -509,30 +509,30 @@ struct ia_css_region {
509 * y + height <= effective input height 509 * y + height <= effective input height
510 */ 510 */
511struct ia_css_dz_config { 511struct ia_css_dz_config {
512 uint32_t dx; /**< Horizontal zoom factor */ 512 uint32_t dx; /** Horizontal zoom factor */
513 uint32_t dy; /**< Vertical zoom factor */ 513 uint32_t dy; /** Vertical zoom factor */
514 struct ia_css_region zoom_region; /**< region for zoom */ 514 struct ia_css_region zoom_region; /** region for zoom */
515}; 515};
516 516
517/** The still capture mode, this can be RAW (simply copy sensor input to DDR), 517/* The still capture mode, this can be RAW (simply copy sensor input to DDR),
518 * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR). 518 * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
519 */ 519 */
520enum ia_css_capture_mode { 520enum ia_css_capture_mode {
521 IA_CSS_CAPTURE_MODE_RAW, /**< no processing, copy data only */ 521 IA_CSS_CAPTURE_MODE_RAW, /** no processing, copy data only */
522 IA_CSS_CAPTURE_MODE_BAYER, /**< bayer processing, up to demosaic */ 522 IA_CSS_CAPTURE_MODE_BAYER, /** bayer processing, up to demosaic */
523 IA_CSS_CAPTURE_MODE_PRIMARY, /**< primary ISP */ 523 IA_CSS_CAPTURE_MODE_PRIMARY, /** primary ISP */
524 IA_CSS_CAPTURE_MODE_ADVANCED, /**< advanced ISP (GDC) */ 524 IA_CSS_CAPTURE_MODE_ADVANCED, /** advanced ISP (GDC) */
525 IA_CSS_CAPTURE_MODE_LOW_LIGHT /**< low light ISP (ANR) */ 525 IA_CSS_CAPTURE_MODE_LOW_LIGHT /** low light ISP (ANR) */
526}; 526};
527 527
528struct ia_css_capture_config { 528struct ia_css_capture_config {
529 enum ia_css_capture_mode mode; /**< Still capture mode */ 529 enum ia_css_capture_mode mode; /** Still capture mode */
530 uint32_t enable_xnr; /**< Enable/disable XNR */ 530 uint32_t enable_xnr; /** Enable/disable XNR */
531 uint32_t enable_raw_output; 531 uint32_t enable_raw_output;
532 bool enable_capture_pp_bli; /**< Enable capture_pp_bli mode */ 532 bool enable_capture_pp_bli; /** Enable capture_pp_bli mode */
533}; 533};
534 534
535/** default settings for ia_css_capture_config structs */ 535/* default settings for ia_css_capture_config structs */
536#define DEFAULT_CAPTURE_CONFIG \ 536#define DEFAULT_CAPTURE_CONFIG \
537{ \ 537{ \
538 IA_CSS_CAPTURE_MODE_PRIMARY, /* mode (capture) */ \ 538 IA_CSS_CAPTURE_MODE_PRIMARY, /* mode (capture) */ \
@@ -542,7 +542,7 @@ struct ia_css_capture_config {
542} 542}
543 543
544 544
545/** ISP filter configuration. This is a collection of configurations 545/* ISP filter configuration. This is a collection of configurations
546 * for each of the ISP filters (modules). 546 * for each of the ISP filters (modules).
547 * 547 *
548 * NOTE! The contents of all pointers is copied when get or set with the 548 * NOTE! The contents of all pointers is copied when get or set with the
@@ -557,98 +557,98 @@ struct ia_css_capture_config {
557 * ["ISP block", 2only] : ISP block is used only for ISP2. 557 * ["ISP block", 2only] : ISP block is used only for ISP2.
558 */ 558 */
559struct ia_css_isp_config { 559struct ia_css_isp_config {
560 struct ia_css_wb_config *wb_config; /**< White Balance 560 struct ia_css_wb_config *wb_config; /** White Balance
561 [WB1, 1&2] */ 561 [WB1, 1&2] */
562 struct ia_css_cc_config *cc_config; /**< Color Correction 562 struct ia_css_cc_config *cc_config; /** Color Correction
563 [CSC1, 1only] */ 563 [CSC1, 1only] */
564 struct ia_css_tnr_config *tnr_config; /**< Temporal Noise Reduction 564 struct ia_css_tnr_config *tnr_config; /** Temporal Noise Reduction
565 [TNR1, 1&2] */ 565 [TNR1, 1&2] */
566 struct ia_css_ecd_config *ecd_config; /**< Eigen Color Demosaicing 566 struct ia_css_ecd_config *ecd_config; /** Eigen Color Demosaicing
567 [DE2, 2only] */ 567 [DE2, 2only] */
568 struct ia_css_ynr_config *ynr_config; /**< Y(Luma) Noise Reduction 568 struct ia_css_ynr_config *ynr_config; /** Y(Luma) Noise Reduction
569 [YNR2&YEE2, 2only] */ 569 [YNR2&YEE2, 2only] */
570 struct ia_css_fc_config *fc_config; /**< Fringe Control 570 struct ia_css_fc_config *fc_config; /** Fringe Control
571 [FC2, 2only] */ 571 [FC2, 2only] */
572 struct ia_css_formats_config *formats_config; /**< Formats Control for main output 572 struct ia_css_formats_config *formats_config; /** Formats Control for main output
573 [FORMATS, 1&2] */ 573 [FORMATS, 1&2] */
574 struct ia_css_cnr_config *cnr_config; /**< Chroma Noise Reduction 574 struct ia_css_cnr_config *cnr_config; /** Chroma Noise Reduction
575 [CNR2, 2only] */ 575 [CNR2, 2only] */
576 struct ia_css_macc_config *macc_config; /**< MACC 576 struct ia_css_macc_config *macc_config; /** MACC
577 [MACC2, 2only] */ 577 [MACC2, 2only] */
578 struct ia_css_ctc_config *ctc_config; /**< Chroma Tone Control 578 struct ia_css_ctc_config *ctc_config; /** Chroma Tone Control
579 [CTC2, 2only] */ 579 [CTC2, 2only] */
580 struct ia_css_aa_config *aa_config; /**< YUV Anti-Aliasing 580 struct ia_css_aa_config *aa_config; /** YUV Anti-Aliasing
581 [AA2, 2only] 581 [AA2, 2only]
582 (not used currently) */ 582 (not used currently) */
583 struct ia_css_aa_config *baa_config; /**< Bayer Anti-Aliasing 583 struct ia_css_aa_config *baa_config; /** Bayer Anti-Aliasing
584 [BAA2, 1&2] */ 584 [BAA2, 1&2] */
585 struct ia_css_ce_config *ce_config; /**< Chroma Enhancement 585 struct ia_css_ce_config *ce_config; /** Chroma Enhancement
586 [CE1, 1only] */ 586 [CE1, 1only] */
587 struct ia_css_dvs_6axis_config *dvs_6axis_config; 587 struct ia_css_dvs_6axis_config *dvs_6axis_config;
588 struct ia_css_ob_config *ob_config; /**< Objective Black 588 struct ia_css_ob_config *ob_config; /** Objective Black
589 [OB1, 1&2] */ 589 [OB1, 1&2] */
590 struct ia_css_dp_config *dp_config; /**< Defect Pixel Correction 590 struct ia_css_dp_config *dp_config; /** Defect Pixel Correction
591 [DPC1/DPC2, 1&2] */ 591 [DPC1/DPC2, 1&2] */
592 struct ia_css_nr_config *nr_config; /**< Noise Reduction 592 struct ia_css_nr_config *nr_config; /** Noise Reduction
593 [BNR1&YNR1&CNR1, 1&2]*/ 593 [BNR1&YNR1&CNR1, 1&2]*/
594 struct ia_css_ee_config *ee_config; /**< Edge Enhancement 594 struct ia_css_ee_config *ee_config; /** Edge Enhancement
595 [YEE1, 1&2] */ 595 [YEE1, 1&2] */
596 struct ia_css_de_config *de_config; /**< Demosaic 596 struct ia_css_de_config *de_config; /** Demosaic
597 [DE1, 1only] */ 597 [DE1, 1only] */
598 struct ia_css_gc_config *gc_config; /**< Gamma Correction (for YUV) 598 struct ia_css_gc_config *gc_config; /** Gamma Correction (for YUV)
599 [GC1, 1only] */ 599 [GC1, 1only] */
600 struct ia_css_anr_config *anr_config; /**< Advanced Noise Reduction */ 600 struct ia_css_anr_config *anr_config; /** Advanced Noise Reduction */
601 struct ia_css_3a_config *s3a_config; /**< 3A Statistics config */ 601 struct ia_css_3a_config *s3a_config; /** 3A Statistics config */
602 struct ia_css_xnr_config *xnr_config; /**< eXtra Noise Reduction */ 602 struct ia_css_xnr_config *xnr_config; /** eXtra Noise Reduction */
603 struct ia_css_dz_config *dz_config; /**< Digital Zoom */ 603 struct ia_css_dz_config *dz_config; /** Digital Zoom */
604 struct ia_css_cc_config *yuv2rgb_cc_config; /**< Color Correction 604 struct ia_css_cc_config *yuv2rgb_cc_config; /** Color Correction
605 [CCM2, 2only] */ 605 [CCM2, 2only] */
606 struct ia_css_cc_config *rgb2yuv_cc_config; /**< Color Correction 606 struct ia_css_cc_config *rgb2yuv_cc_config; /** Color Correction
607 [CSC2, 2only] */ 607 [CSC2, 2only] */
608 struct ia_css_macc_table *macc_table; /**< MACC 608 struct ia_css_macc_table *macc_table; /** MACC
609 [MACC1/MACC2, 1&2]*/ 609 [MACC1/MACC2, 1&2]*/
610 struct ia_css_gamma_table *gamma_table; /**< Gamma Correction (for YUV) 610 struct ia_css_gamma_table *gamma_table; /** Gamma Correction (for YUV)
611 [GC1, 1only] */ 611 [GC1, 1only] */
612 struct ia_css_ctc_table *ctc_table; /**< Chroma Tone Control 612 struct ia_css_ctc_table *ctc_table; /** Chroma Tone Control
613 [CTC1, 1only] */ 613 [CTC1, 1only] */
614 614
615 /** \deprecated */ 615 /* \deprecated */
616 struct ia_css_xnr_table *xnr_table; /**< eXtra Noise Reduction 616 struct ia_css_xnr_table *xnr_table; /** eXtra Noise Reduction
617 [XNR1, 1&2] */ 617 [XNR1, 1&2] */
618 struct ia_css_rgb_gamma_table *r_gamma_table;/**< sRGB Gamma Correction 618 struct ia_css_rgb_gamma_table *r_gamma_table;/** sRGB Gamma Correction
619 [GC2, 2only] */ 619 [GC2, 2only] */
620 struct ia_css_rgb_gamma_table *g_gamma_table;/**< sRGB Gamma Correction 620 struct ia_css_rgb_gamma_table *g_gamma_table;/** sRGB Gamma Correction
621 [GC2, 2only] */ 621 [GC2, 2only] */
622 struct ia_css_rgb_gamma_table *b_gamma_table;/**< sRGB Gamma Correction 622 struct ia_css_rgb_gamma_table *b_gamma_table;/** sRGB Gamma Correction
623 [GC2, 2only] */ 623 [GC2, 2only] */
624 struct ia_css_vector *motion_vector; /**< For 2-axis DVS */ 624 struct ia_css_vector *motion_vector; /** For 2-axis DVS */
625 struct ia_css_shading_table *shading_table; 625 struct ia_css_shading_table *shading_table;
626 struct ia_css_morph_table *morph_table; 626 struct ia_css_morph_table *morph_table;
627 struct ia_css_dvs_coefficients *dvs_coefs; /**< DVS 1.0 coefficients */ 627 struct ia_css_dvs_coefficients *dvs_coefs; /** DVS 1.0 coefficients */
628 struct ia_css_dvs2_coefficients *dvs2_coefs; /**< DVS 2.0 coefficients */ 628 struct ia_css_dvs2_coefficients *dvs2_coefs; /** DVS 2.0 coefficients */
629 struct ia_css_capture_config *capture_config; 629 struct ia_css_capture_config *capture_config;
630 struct ia_css_anr_thres *anr_thres; 630 struct ia_css_anr_thres *anr_thres;
631 /** @deprecated{Old shading settings, see bugzilla bz675 for details} */ 631 /* @deprecated{Old shading settings, see bugzilla bz675 for details} */
632 struct ia_css_shading_settings *shading_settings; 632 struct ia_css_shading_settings *shading_settings;
633 struct ia_css_xnr3_config *xnr3_config; /**< eXtreme Noise Reduction v3 */ 633 struct ia_css_xnr3_config *xnr3_config; /** eXtreme Noise Reduction v3 */
634 /** comment from Lasse: Be aware how this feature will affect coordinate 634 /* comment from Lasse: Be aware how this feature will affect coordinate
635 * normalization in different parts of the system. (e.g. face detection, 635 * normalization in different parts of the system. (e.g. face detection,
636 * touch focus, 3A statistics and windows of interest, shading correction, 636 * touch focus, 3A statistics and windows of interest, shading correction,
637 * DVS, GDC) from IQ tool level and application level down-to ISP FW level. 637 * DVS, GDC) from IQ tool level and application level down-to ISP FW level.
638 * the risk for regression is not in the individual blocks, but how they 638 * the risk for regression is not in the individual blocks, but how they
639 * integrate together. */ 639 * integrate together. */
640 struct ia_css_output_config *output_config; /**< Main Output Mirroring, flipping */ 640 struct ia_css_output_config *output_config; /** Main Output Mirroring, flipping */
641 641
642#ifdef ISP2401 642#ifdef ISP2401
643 struct ia_css_tnr3_kernel_config *tnr3_config; /**< TNR3 config */ 643 struct ia_css_tnr3_kernel_config *tnr3_config; /** TNR3 config */
644#endif 644#endif
645 struct ia_css_scaler_config *scaler_config; /**< Skylake: scaler config (optional) */ 645 struct ia_css_scaler_config *scaler_config; /** Skylake: scaler config (optional) */
646 struct ia_css_formats_config *formats_config_display;/**< Formats control for viewfinder/display output (optional) 646 struct ia_css_formats_config *formats_config_display;/** Formats control for viewfinder/display output (optional)
647 [OSYS, n/a] */ 647 [OSYS, n/a] */
648 struct ia_css_output_config *output_config_display; /**< Viewfinder/display output mirroring, flipping (optional) */ 648 struct ia_css_output_config *output_config_display; /** Viewfinder/display output mirroring, flipping (optional) */
649 649
650 struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */ 650 struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
651 uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */ 651 uint32_t isp_config_id; /** Unique ID to track which config was actually applied to a particular frame */
652}; 652};
653 653
654#endif /* _IA_CSS_TYPES_H */ 654#endif /* _IA_CSS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
index 48c59896e847..1e88901e0b82 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
@@ -15,16 +15,16 @@
15#ifndef __IA_CSS_VERSION_H 15#ifndef __IA_CSS_VERSION_H
16#define __IA_CSS_VERSION_H 16#define __IA_CSS_VERSION_H
17 17
18/** @file 18/* @file
19 * This file contains functions to retrieve CSS-API version information 19 * This file contains functions to retrieve CSS-API version information
20 */ 20 */
21 21
22#include <ia_css_err.h> 22#include <ia_css_err.h>
23 23
24/** a common size for the version arrays */ 24/* a common size for the version arrays */
25#define MAX_VERSION_SIZE 500 25#define MAX_VERSION_SIZE 500
26 26
27/** @brief Retrieves the current CSS version 27/* @brief Retrieves the current CSS version
28 * @param[out] version A pointer to a buffer where to put the generated 28 * @param[out] version A pointer to a buffer where to put the generated
29 * version string. NULL is ignored. 29 * version string. NULL is ignored.
30 * @param[in] max_size Size of the version buffer. If version string 30 * @param[in] max_size Size of the version buffer. If version string
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
index 834eedbbeeff..0b95bf9b9aaf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
@@ -15,12 +15,12 @@
15#ifndef __IA_CSS_AA2_TYPES_H 15#ifndef __IA_CSS_AA2_TYPES_H
16#define __IA_CSS_AA2_TYPES_H 16#define __IA_CSS_AA2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Anti-Aliasing parameters. 19* CSS-API header file for Anti-Aliasing parameters.
20*/ 20*/
21 21
22 22
23/** Anti-Aliasing configuration. 23/* Anti-Aliasing configuration.
24 * 24 *
25 * This structure is used both for YUV AA and Bayer AA. 25 * This structure is used both for YUV AA and Bayer AA.
26 * 26 *
@@ -39,7 +39,7 @@
39 * ISP2: BAA2 is used. 39 * ISP2: BAA2 is used.
40 */ 40 */
41struct ia_css_aa_config { 41struct ia_css_aa_config {
42 uint16_t strength; /**< Strength of the filter. 42 uint16_t strength; /** Strength of the filter.
43 u0.13, [0,8191], 43 u0.13, [0,8191],
44 default/ineffective 0 */ 44 default/ineffective 0 */
45}; 45};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
index e205574098f2..dc317a857369 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_ANR_TYPES_H 15#ifndef __IA_CSS_ANR_TYPES_H
16#define __IA_CSS_ANR_TYPES_H 16#define __IA_CSS_ANR_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Advanced Noise Reduction kernel v1 19* CSS-API header file for Advanced Noise Reduction kernel v1
20*/ 20*/
21 21
@@ -23,11 +23,11 @@
23#define ANR_BPP 10 23#define ANR_BPP 10
24#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8))*8) 24#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8))*8)
25 25
26/** Advanced Noise Reduction configuration. 26/* Advanced Noise Reduction configuration.
27 * This is also known as Low-Light. 27 * This is also known as Low-Light.
28 */ 28 */
29struct ia_css_anr_config { 29struct ia_css_anr_config {
30 int32_t threshold; /**< Threshold */ 30 int32_t threshold; /** Threshold */
31 int32_t thresholds[4*4*4]; 31 int32_t thresholds[4*4*4];
32 int32_t factors[3]; 32 int32_t factors[3];
33}; 33};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
index 3832ada433ec..9b611315392c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_ANR2_TYPES_H 15#ifndef __IA_CSS_ANR2_TYPES_H
16#define __IA_CSS_ANR2_TYPES_H 16#define __IA_CSS_ANR2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Advanced Noise Reduction kernel v2 19* CSS-API header file for Advanced Noise Reduction kernel v2
20*/ 20*/
21 21
@@ -23,7 +23,7 @@
23 23
24#define ANR_PARAM_SIZE 13 24#define ANR_PARAM_SIZE 13
25 25
26/** Advanced Noise Reduction (ANR) thresholds */ 26/* Advanced Noise Reduction (ANR) thresholds */
27struct ia_css_anr_thres { 27struct ia_css_anr_thres {
28 int16_t data[13*64]; 28 int16_t data[13*64];
29}; 29};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
index 4a289853367a..312141793fd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
@@ -18,7 +18,7 @@
18#include "vmem.h" 18#include "vmem.h"
19#include "ia_css_anr2_types.h" 19#include "ia_css_anr2_types.h"
20 20
21/** Advanced Noise Reduction (ANR) thresholds */ 21/* Advanced Noise Reduction (ANR) thresholds */
22 22
23struct ia_css_isp_anr2_params { 23struct ia_css_isp_anr2_params {
24 VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS); 24 VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
index 75ca7606b95c..a0d355454aa3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
@@ -27,7 +27,7 @@
27#define BAYER_QUAD_HEIGHT 2 27#define BAYER_QUAD_HEIGHT 2
28#define NOF_BAYER_VECTORS 4 28#define NOF_BAYER_VECTORS 4
29 29
30/** bayer load/store */ 30/* bayer load/store */
31struct sh_css_isp_bayer_ls_isp_config { 31struct sh_css_isp_bayer_ls_isp_config {
32 uint32_t base_address[NUM_BAYER_LS]; 32 uint32_t base_address[NUM_BAYER_LS];
33 uint32_t width[NUM_BAYER_LS]; 33 uint32_t width[NUM_BAYER_LS];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
index 9ae27a9e0baa..ec1688e7352d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_BH_TYPES_H 15#ifndef __IA_CSS_BH_TYPES_H
16#define __IA_CSS_BH_TYPES_H 16#define __IA_CSS_BH_TYPES_H
17 17
18/** Number of elements in the BH table. 18/* Number of elements in the BH table.
19 * Should be consistent with hmem.h 19 * Should be consistent with hmem.h
20 */ 20 */
21#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH 21#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH
@@ -27,7 +27,7 @@
27#define BH_COLOR_Y (3) 27#define BH_COLOR_Y (3)
28#define BH_COLOR_NUM (4) 28#define BH_COLOR_NUM (4)
29 29
30/** BH table */ 30/* BH table */
31struct ia_css_bh_table { 31struct ia_css_bh_table {
32 uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE]; 32 uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
33}; 33};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
index 219fb835cb26..87e0f19c856b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
@@ -15,13 +15,13 @@
15#ifndef __IA_CSS_BNLM_TYPES_H 15#ifndef __IA_CSS_BNLM_TYPES_H
16#define __IA_CSS_BNLM_TYPES_H 16#define __IA_CSS_BNLM_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Bayer Non-Linear Mean parameters. 19* CSS-API header file for Bayer Non-Linear Mean parameters.
20*/ 20*/
21 21
22#include "type_support.h" /* int32_t */ 22#include "type_support.h" /* int32_t */
23 23
24/** Bayer Non-Linear Mean configuration 24/* Bayer Non-Linear Mean configuration
25 * 25 *
26 * \brief BNLM public parameters. 26 * \brief BNLM public parameters.
27 * \details Struct with all parameters for the BNLM kernel that can be set 27 * \details Struct with all parameters for the BNLM kernel that can be set
@@ -30,16 +30,16 @@
30 * ISP2.6.1: BNLM is used. 30 * ISP2.6.1: BNLM is used.
31 */ 31 */
32struct ia_css_bnlm_config { 32struct ia_css_bnlm_config {
33 bool rad_enable; /**< Enable a radial dependency in a weight calculation */ 33 bool rad_enable; /** Enable a radial dependency in a weight calculation */
34 int32_t rad_x_origin; /**< Initial x coordinate for a radius calculation */ 34 int32_t rad_x_origin; /** Initial x coordinate for a radius calculation */
35 int32_t rad_y_origin; /**< Initial x coordinate for a radius calculation */ 35 int32_t rad_y_origin; /** Initial x coordinate for a radius calculation */
36 /* a threshold for average of weights if this < Th, do not denoise pixel */ 36 /* a threshold for average of weights if this < Th, do not denoise pixel */
37 int32_t avg_min_th; 37 int32_t avg_min_th;
38 /* minimum weight for denoising if max < th, do not denoise pixel */ 38 /* minimum weight for denoising if max < th, do not denoise pixel */
39 int32_t max_min_th; 39 int32_t max_min_th;
40 40
41 /**@{*/ 41 /**@{*/
42 /** Coefficient for approximation, in the form of (1 + x / N)^N, 42 /* Coefficient for approximation, in the form of (1 + x / N)^N,
43 * that fits the first-order exp() to default exp_lut in BNLM sheet 43 * that fits the first-order exp() to default exp_lut in BNLM sheet
44 * */ 44 * */
45 int32_t exp_coeff_a; 45 int32_t exp_coeff_a;
@@ -48,55 +48,55 @@ struct ia_css_bnlm_config {
48 uint32_t exp_exponent; 48 uint32_t exp_exponent;
49 /**@}*/ 49 /**@}*/
50 50
51 int32_t nl_th[3]; /**< Detail thresholds */ 51 int32_t nl_th[3]; /** Detail thresholds */
52 52
53 /** Index for n-th maximum candidate weight for each detail group */ 53 /* Index for n-th maximum candidate weight for each detail group */
54 int32_t match_quality_max_idx[4]; 54 int32_t match_quality_max_idx[4];
55 55
56 /**@{*/ 56 /**@{*/
57 /** A lookup table for 1/sqrt(1+mu) approximation */ 57 /* A lookup table for 1/sqrt(1+mu) approximation */
58 int32_t mu_root_lut_thr[15]; 58 int32_t mu_root_lut_thr[15];
59 int32_t mu_root_lut_val[16]; 59 int32_t mu_root_lut_val[16];
60 /**@}*/ 60 /**@}*/
61 /**@{*/ 61 /**@{*/
62 /** A lookup table for SAD normalization */ 62 /* A lookup table for SAD normalization */
63 int32_t sad_norm_lut_thr[15]; 63 int32_t sad_norm_lut_thr[15];
64 int32_t sad_norm_lut_val[16]; 64 int32_t sad_norm_lut_val[16];
65 /**@}*/ 65 /**@}*/
66 /**@{*/ 66 /**@{*/
67 /** A lookup table that models a weight's dependency on textures */ 67 /* A lookup table that models a weight's dependency on textures */
68 int32_t sig_detail_lut_thr[15]; 68 int32_t sig_detail_lut_thr[15];
69 int32_t sig_detail_lut_val[16]; 69 int32_t sig_detail_lut_val[16];
70 /**@}*/ 70 /**@}*/
71 /**@{*/ 71 /**@{*/
72 /** A lookup table that models a weight's dependency on a pixel's radial distance */ 72 /* A lookup table that models a weight's dependency on a pixel's radial distance */
73 int32_t sig_rad_lut_thr[15]; 73 int32_t sig_rad_lut_thr[15];
74 int32_t sig_rad_lut_val[16]; 74 int32_t sig_rad_lut_val[16];
75 /**@}*/ 75 /**@}*/
76 /**@{*/ 76 /**@{*/
77 /** A lookup table to control denoise power depending on a pixel's radial distance */ 77 /* A lookup table to control denoise power depending on a pixel's radial distance */
78 int32_t rad_pow_lut_thr[15]; 78 int32_t rad_pow_lut_thr[15];
79 int32_t rad_pow_lut_val[16]; 79 int32_t rad_pow_lut_val[16];
80 /**@}*/ 80 /**@}*/
81 /**@{*/ 81 /**@{*/
82 /** Non linear transfer functions to calculate the blending coefficient depending on detail group */ 82 /* Non linear transfer functions to calculate the blending coefficient depending on detail group */
83 /** detail group 0 */ 83 /* detail group 0 */
84 /**@{*/ 84 /**@{*/
85 int32_t nl_0_lut_thr[15]; 85 int32_t nl_0_lut_thr[15];
86 int32_t nl_0_lut_val[16]; 86 int32_t nl_0_lut_val[16];
87 /**@}*/ 87 /**@}*/
88 /**@{*/ 88 /**@{*/
89 /** detail group 1 */ 89 /* detail group 1 */
90 int32_t nl_1_lut_thr[15]; 90 int32_t nl_1_lut_thr[15];
91 int32_t nl_1_lut_val[16]; 91 int32_t nl_1_lut_val[16];
92 /**@}*/ 92 /**@}*/
93 /**@{*/ 93 /**@{*/
94 /** detail group 2 */ 94 /* detail group 2 */
95 int32_t nl_2_lut_thr[15]; 95 int32_t nl_2_lut_thr[15];
96 int32_t nl_2_lut_val[16]; 96 int32_t nl_2_lut_val[16];
97 /**@}*/ 97 /**@}*/
98 /**@{*/ 98 /**@{*/
99 /** detail group 3 */ 99 /* detail group 3 */
100 int32_t nl_3_lut_thr[15]; 100 int32_t nl_3_lut_thr[15];
101 int32_t nl_3_lut_val[16]; 101 int32_t nl_3_lut_val[16];
102 /**@}*/ 102 /**@}*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
index be80f705d8a1..551bd0ed3bac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
@@ -15,13 +15,13 @@
15#ifndef __IA_CSS_BNR2_2_TYPES_H 15#ifndef __IA_CSS_BNR2_2_TYPES_H
16#define __IA_CSS_BNR2_2_TYPES_H 16#define __IA_CSS_BNR2_2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Bayer Noise Reduction parameters. 19* CSS-API header file for Bayer Noise Reduction parameters.
20*/ 20*/
21 21
22#include "type_support.h" /* int32_t */ 22#include "type_support.h" /* int32_t */
23 23
24/** Bayer Noise Reduction 2.2 configuration 24/* Bayer Noise Reduction 2.2 configuration
25 * 25 *
26 * \brief BNR2_2 public parameters. 26 * \brief BNR2_2 public parameters.
27 * \details Struct with all parameters for the BNR2.2 kernel that can be set 27 * \details Struct with all parameters for the BNR2.2 kernel that can be set
@@ -31,41 +31,41 @@
31 */ 31 */
32struct ia_css_bnr2_2_config { 32struct ia_css_bnr2_2_config {
33 /**@{*/ 33 /**@{*/
34 /** Directional variance gain for R/G/B components in dark region */ 34 /* Directional variance gain for R/G/B components in dark region */
35 int32_t d_var_gain_r; 35 int32_t d_var_gain_r;
36 int32_t d_var_gain_g; 36 int32_t d_var_gain_g;
37 int32_t d_var_gain_b; 37 int32_t d_var_gain_b;
38 /**@}*/ 38 /**@}*/
39 /**@{*/ 39 /**@{*/
40 /** Slope of Directional variance gain between dark and bright region */ 40 /* Slope of Directional variance gain between dark and bright region */
41 int32_t d_var_gain_slope_r; 41 int32_t d_var_gain_slope_r;
42 int32_t d_var_gain_slope_g; 42 int32_t d_var_gain_slope_g;
43 int32_t d_var_gain_slope_b; 43 int32_t d_var_gain_slope_b;
44 /**@}*/ 44 /**@}*/
45 /**@{*/ 45 /**@{*/
46 /** Non-Directional variance gain for R/G/B components in dark region */ 46 /* Non-Directional variance gain for R/G/B components in dark region */
47 int32_t n_var_gain_r; 47 int32_t n_var_gain_r;
48 int32_t n_var_gain_g; 48 int32_t n_var_gain_g;
49 int32_t n_var_gain_b; 49 int32_t n_var_gain_b;
50 /**@}*/ 50 /**@}*/
51 /**@{*/ 51 /**@{*/
52 /** Slope of Non-Directional variance gain between dark and bright region */ 52 /* Slope of Non-Directional variance gain between dark and bright region */
53 int32_t n_var_gain_slope_r; 53 int32_t n_var_gain_slope_r;
54 int32_t n_var_gain_slope_g; 54 int32_t n_var_gain_slope_g;
55 int32_t n_var_gain_slope_b; 55 int32_t n_var_gain_slope_b;
56 /**@}*/ 56 /**@}*/
57 57
58 int32_t dir_thres; /**< Threshold for directional filtering */ 58 int32_t dir_thres; /** Threshold for directional filtering */
59 int32_t dir_thres_w; /**< Threshold width for directional filtering */ 59 int32_t dir_thres_w; /** Threshold width for directional filtering */
60 int32_t var_offset_coef; /**< Variance offset coefficient */ 60 int32_t var_offset_coef; /** Variance offset coefficient */
61 int32_t dir_gain; /**< Gain for directional coefficient */ 61 int32_t dir_gain; /** Gain for directional coefficient */
62 int32_t detail_gain; /**< Gain for low contrast texture control */ 62 int32_t detail_gain; /** Gain for low contrast texture control */
63 int32_t detail_gain_divisor; /**< Gain divisor for low contrast texture control */ 63 int32_t detail_gain_divisor; /** Gain divisor for low contrast texture control */
64 int32_t detail_level_offset; /**< Bias value for low contrast texture control */ 64 int32_t detail_level_offset; /** Bias value for low contrast texture control */
65 int32_t d_var_th_min; /**< Minimum clipping value for directional variance*/ 65 int32_t d_var_th_min; /** Minimum clipping value for directional variance*/
66 int32_t d_var_th_max; /**< Maximum clipping value for diretional variance*/ 66 int32_t d_var_th_max; /** Maximum clipping value for diretional variance*/
67 int32_t n_var_th_min; /**< Minimum clipping value for non-directional variance*/ 67 int32_t n_var_th_min; /** Minimum clipping value for non-directional variance*/
68 int32_t n_var_th_max; /**< Maximum clipping value for non-directional variance*/ 68 int32_t n_var_th_max; /** Maximum clipping value for non-directional variance*/
69}; 69};
70 70
71#endif /* __IA_CSS_BNR2_2_TYPES_H */ 71#endif /* __IA_CSS_BNR2_2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
index 6df6c2be9a70..3ebc069d8ada 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_CNR2_TYPES_H 15#ifndef __IA_CSS_CNR2_TYPES_H
16#define __IA_CSS_CNR2_TYPES_H 16#define __IA_CSS_CNR2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Chroma Noise Reduction (CNR) parameters 19* CSS-API header file for Chroma Noise Reduction (CNR) parameters
20*/ 20*/
21 21
22/** Chroma Noise Reduction configuration. 22/* Chroma Noise Reduction configuration.
23 * 23 *
24 * Small sensitivity of edge means strong smoothness and NR performance. 24 * Small sensitivity of edge means strong smoothness and NR performance.
25 * If you see blurred color on vertical edges, 25 * If you see blurred color on vertical edges,
@@ -33,21 +33,21 @@
33 * ISP2: CNR2 is used for Still. 33 * ISP2: CNR2 is used for Still.
34 */ 34 */
35struct ia_css_cnr_config { 35struct ia_css_cnr_config {
36 uint16_t coring_u; /**< Coring level of U. 36 uint16_t coring_u; /** Coring level of U.
37 u0.13, [0,8191], default/ineffective 0 */ 37 u0.13, [0,8191], default/ineffective 0 */
38 uint16_t coring_v; /**< Coring level of V. 38 uint16_t coring_v; /** Coring level of V.
39 u0.13, [0,8191], default/ineffective 0 */ 39 u0.13, [0,8191], default/ineffective 0 */
40 uint16_t sense_gain_vy; /**< Sensitivity of horizontal edge of Y. 40 uint16_t sense_gain_vy; /** Sensitivity of horizontal edge of Y.
41 u13.0, [0,8191], default 100, ineffective 8191 */ 41 u13.0, [0,8191], default 100, ineffective 8191 */
42 uint16_t sense_gain_vu; /**< Sensitivity of horizontal edge of U. 42 uint16_t sense_gain_vu; /** Sensitivity of horizontal edge of U.
43 u13.0, [0,8191], default 100, ineffective 8191 */ 43 u13.0, [0,8191], default 100, ineffective 8191 */
44 uint16_t sense_gain_vv; /**< Sensitivity of horizontal edge of V. 44 uint16_t sense_gain_vv; /** Sensitivity of horizontal edge of V.
45 u13.0, [0,8191], default 100, ineffective 8191 */ 45 u13.0, [0,8191], default 100, ineffective 8191 */
46 uint16_t sense_gain_hy; /**< Sensitivity of vertical edge of Y. 46 uint16_t sense_gain_hy; /** Sensitivity of vertical edge of Y.
47 u13.0, [0,8191], default 50, ineffective 8191 */ 47 u13.0, [0,8191], default 50, ineffective 8191 */
48 uint16_t sense_gain_hu; /**< Sensitivity of vertical edge of U. 48 uint16_t sense_gain_hu; /** Sensitivity of vertical edge of U.
49 u13.0, [0,8191], default 50, ineffective 8191 */ 49 u13.0, [0,8191], default 50, ineffective 8191 */
50 uint16_t sense_gain_hv; /**< Sensitivity of vertical edge of V. 50 uint16_t sense_gain_hv; /** Sensitivity of vertical edge of V.
51 u13.0, [0,8191], default 50, ineffective 8191 */ 51 u13.0, [0,8191], default 50, ineffective 8191 */
52}; 52};
53 53
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
index 3f11442500f0..47a38fd65950 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
@@ -23,10 +23,10 @@
23 * 23 *
24 */ 24 */
25struct ia_css_conversion_config { 25struct ia_css_conversion_config {
26 uint32_t en; /**< en parameter */ 26 uint32_t en; /** en parameter */
27 uint32_t dummy0; /**< dummy0 dummy parameter 0 */ 27 uint32_t dummy0; /** dummy0 dummy parameter 0 */
28 uint32_t dummy1; /**< dummy1 dummy parameter 1 */ 28 uint32_t dummy1; /** dummy1 dummy parameter 1 */
29 uint32_t dummy2; /**< dummy2 dummy parameter 2 */ 29 uint32_t dummy2; /** dummy2 dummy parameter 2 */
30}; 30};
31 31
32#endif /* __IA_CSS_CONVERSION_TYPES_H */ 32#endif /* __IA_CSS_CONVERSION_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
index 8bfc8dad37a8..0f1812cdd92a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
@@ -19,7 +19,7 @@
19#include "dma.h" 19#include "dma.h"
20#include "sh_css_internal.h" /* sh_css_crop_pos */ 20#include "sh_css_internal.h" /* sh_css_crop_pos */
21 21
22/** Crop frame */ 22/* Crop frame */
23struct sh_css_isp_crop_isp_config { 23struct sh_css_isp_crop_isp_config {
24 uint32_t width_a_over_b; 24 uint32_t width_a_over_b;
25 struct dma_port_config port_b; 25 struct dma_port_config port_b;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
index 8091ad4d4602..b5d454225f89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_CROP_TYPES_H 15#ifndef __IA_CSS_CROP_TYPES_H
16#define __IA_CSS_CROP_TYPES_H 16#define __IA_CSS_CROP_TYPES_H
17 17
18/** Crop frame 18/* Crop frame
19 * 19 *
20 * ISP block: crop frame 20 * ISP block: crop frame
21 */ 21 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
index 54ced072467f..10404380c637 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_CSC_TYPES_H 15#ifndef __IA_CSS_CSC_TYPES_H
16#define __IA_CSS_CSC_TYPES_H 16#define __IA_CSS_CSC_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Color Space Conversion parameters. 19* CSS-API header file for Color Space Conversion parameters.
20*/ 20*/
21 21
22/** Color Correction configuration. 22/* Color Correction configuration.
23 * 23 *
24 * This structure is used for 3 cases. 24 * This structure is used for 3 cases.
25 * ("YCgCo" is the output format of Demosaic.) 25 * ("YCgCo" is the output format of Demosaic.)
@@ -68,9 +68,9 @@
68 * 4096 -3430 -666 68 * 4096 -3430 -666
69 */ 69 */
70struct ia_css_cc_config { 70struct ia_css_cc_config {
71 uint32_t fraction_bits;/**< Fractional bits of matrix. 71 uint32_t fraction_bits;/** Fractional bits of matrix.
72 u8.0, [0,13] */ 72 u8.0, [0,13] */
73 int32_t matrix[3 * 3]; /**< Conversion matrix. 73 int32_t matrix[3 * 3]; /** Conversion matrix.
74 s[13-fraction_bits].[fraction_bits], 74 s[13-fraction_bits].[fraction_bits],
75 [-8192,8191] */ 75 [-8192,8191] */
76}; 76};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
index c66e823618f6..ad7040c9d7cb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
@@ -22,11 +22,11 @@
22 22
23/*VMEM Luma params*/ 23/*VMEM Luma params*/
24struct ia_css_isp_ctc2_vmem_params { 24struct ia_css_isp_ctc2_vmem_params {
25 /**< Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/ 25 /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
26 VMEM_ARRAY(y_x, ISP_VEC_NELEMS); 26 VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
27 /** kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/ 27 /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
28 VMEM_ARRAY(y_y, ISP_VEC_NELEMS); 28 VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
29 /** Slopes of lines interconnecting 29 /* Slopes of lines interconnecting
30 * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/ 30 * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
31 VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS); 31 VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
32}; 32};
@@ -34,15 +34,15 @@ struct ia_css_isp_ctc2_vmem_params {
34/*DMEM Chroma params*/ 34/*DMEM Chroma params*/
35struct ia_css_isp_ctc2_dmem_params { 35struct ia_css_isp_ctc2_dmem_params {
36 36
37 /** Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/ 37 /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
38 int32_t uv_y0; 38 int32_t uv_y0;
39 int32_t uv_y1; 39 int32_t uv_y1;
40 40
41 /** Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/ 41 /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
42 int32_t uv_x0; 42 int32_t uv_x0;
43 int32_t uv_x1; 43 int32_t uv_x1;
44 44
45 /** Slope of line interconnecting uv_x0 -> uv_x1*/ 45 /* Slope of line interconnecting uv_x0 -> uv_x1*/
46 int32_t uv_dydx; 46 int32_t uv_dydx;
47 47
48}; 48};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
index 7b75f01e2ad2..1222cf33e851 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_CTC2_TYPES_H 15#ifndef __IA_CSS_CTC2_TYPES_H
16#define __IA_CSS_CTC2_TYPES_H 16#define __IA_CSS_CTC2_TYPES_H
17 17
18/** Chroma Tone Control configuration. 18/* Chroma Tone Control configuration.
19* 19*
20* ISP block: CTC2 (CTC by polygonal approximation) 20* ISP block: CTC2 (CTC by polygonal approximation)
21* (ISP1: CTC1 (CTC by look-up table) is used.) 21* (ISP1: CTC1 (CTC by look-up table) is used.)
@@ -24,7 +24,7 @@
24*/ 24*/
25struct ia_css_ctc2_config { 25struct ia_css_ctc2_config {
26 26
27 /**< Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5 27 /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
28 * --default/ineffective value: 4096(0.5f) 28 * --default/ineffective value: 4096(0.5f)
29 */ 29 */
30 int32_t y_y0; 30 int32_t y_y0;
@@ -33,19 +33,19 @@ struct ia_css_ctc2_config {
33 int32_t y_y3; 33 int32_t y_y3;
34 int32_t y_y4; 34 int32_t y_y4;
35 int32_t y_y5; 35 int32_t y_y5;
36 /** 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a 36 /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
37 * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0 37 * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
38 */ 38 */
39 int32_t y_x1; 39 int32_t y_x1;
40 int32_t y_x2; 40 int32_t y_x2;
41 int32_t y_x3; 41 int32_t y_x3;
42 int32_t y_x4; 42 int32_t y_x4;
43 /** Gains by UV(Chroma) under threholds uv_x0 and uv_x1 43 /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1
44 * --default/ineffective value: 4096(0.5f) 44 * --default/ineffective value: 4096(0.5f)
45 */ 45 */
46 int32_t uv_y0; 46 int32_t uv_y0;
47 int32_t uv_y1; 47 int32_t uv_y1;
48 /** Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1 48 /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
49 * --default/ineffective value: n/a 49 * --default/ineffective value: n/a
50 */ 50 */
51 int32_t uv_x0; 51 int32_t uv_x0;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
index 1da215bb966d..4ac47ce10566 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_CTC_TYPES_H 15#ifndef __IA_CSS_CTC_TYPES_H
16#define __IA_CSS_CTC_TYPES_H 16#define __IA_CSS_CTC_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Chroma Tone Control parameters. 19* CSS-API header file for Chroma Tone Control parameters.
20*/ 20*/
21 21
22/** Fractional bits for CTC gain (used only for ISP1). 22/* Fractional bits for CTC gain (used only for ISP1).
23 * 23 *
24 * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits 24 * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
25 * of gain(=8), but also the bits(=5) to convert chroma 25 * of gain(=8), but also the bits(=5) to convert chroma
@@ -32,14 +32,14 @@
32 */ 32 */
33#define IA_CSS_CTC_COEF_SHIFT 13 33#define IA_CSS_CTC_COEF_SHIFT 13
34 34
35/** Number of elements in the CTC table. */ 35/* Number of elements in the CTC table. */
36#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10 36#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10
37/** Number of elements in the CTC table. */ 37/* Number of elements in the CTC table. */
38#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2) 38#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
39 39
40/** Number of elements in the CTC table. */ 40/* Number of elements in the CTC table. */
41#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8 41#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8
42/** Number of elements in the CTC table. */ 42/* Number of elements in the CTC table. */
43#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1) 43#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
44 44
45enum ia_css_vamem_type { 45enum ia_css_vamem_type {
@@ -47,44 +47,44 @@ enum ia_css_vamem_type {
47 IA_CSS_VAMEM_TYPE_2 47 IA_CSS_VAMEM_TYPE_2
48}; 48};
49 49
50/** Chroma Tone Control configuration. 50/* Chroma Tone Control configuration.
51 * 51 *
52 * ISP block: CTC2 (CTC by polygonal line approximation) 52 * ISP block: CTC2 (CTC by polygonal line approximation)
53 * (ISP1: CTC1 (CTC by look-up table) is used.) 53 * (ISP1: CTC1 (CTC by look-up table) is used.)
54 * ISP2: CTC2 is used. 54 * ISP2: CTC2 is used.
55 */ 55 */
56struct ia_css_ctc_config { 56struct ia_css_ctc_config {
57 uint16_t y0; /**< 1st kneepoint gain. 57 uint16_t y0; /** 1st kneepoint gain.
58 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 58 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
59 default/ineffective 4096(0.5) */ 59 default/ineffective 4096(0.5) */
60 uint16_t y1; /**< 2nd kneepoint gain. 60 uint16_t y1; /** 2nd kneepoint gain.
61 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 61 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
62 default/ineffective 4096(0.5) */ 62 default/ineffective 4096(0.5) */
63 uint16_t y2; /**< 3rd kneepoint gain. 63 uint16_t y2; /** 3rd kneepoint gain.
64 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 64 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
65 default/ineffective 4096(0.5) */ 65 default/ineffective 4096(0.5) */
66 uint16_t y3; /**< 4th kneepoint gain. 66 uint16_t y3; /** 4th kneepoint gain.
67 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 67 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
68 default/ineffective 4096(0.5) */ 68 default/ineffective 4096(0.5) */
69 uint16_t y4; /**< 5th kneepoint gain. 69 uint16_t y4; /** 5th kneepoint gain.
70 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 70 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
71 default/ineffective 4096(0.5) */ 71 default/ineffective 4096(0.5) */
72 uint16_t y5; /**< 6th kneepoint gain. 72 uint16_t y5; /** 6th kneepoint gain.
73 u[ce_gain_exp].[13-ce_gain_exp], [0,8191], 73 u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
74 default/ineffective 4096(0.5) */ 74 default/ineffective 4096(0.5) */
75 uint16_t ce_gain_exp; /**< Common exponent of y-axis gain. 75 uint16_t ce_gain_exp; /** Common exponent of y-axis gain.
76 u8.0, [0,13], 76 u8.0, [0,13],
77 default/ineffective 1 */ 77 default/ineffective 1 */
78 uint16_t x1; /**< 2nd kneepoint luma. 78 uint16_t x1; /** 2nd kneepoint luma.
79 u0.13, [0,8191], constraints: 0<x1<x2, 79 u0.13, [0,8191], constraints: 0<x1<x2,
80 default/ineffective 1024 */ 80 default/ineffective 1024 */
81 uint16_t x2; /**< 3rd kneepoint luma. 81 uint16_t x2; /** 3rd kneepoint luma.
82 u0.13, [0,8191], constraints: x1<x2<x3, 82 u0.13, [0,8191], constraints: x1<x2<x3,
83 default/ineffective 2048 */ 83 default/ineffective 2048 */
84 uint16_t x3; /**< 4th kneepoint luma. 84 uint16_t x3; /** 4th kneepoint luma.
85 u0.13, [0,8191], constraints: x2<x3<x4, 85 u0.13, [0,8191], constraints: x2<x3<x4,
86 default/ineffective 6144 */ 86 default/ineffective 6144 */
87 uint16_t x4; /**< 5tn kneepoint luma. 87 uint16_t x4; /** 5tn kneepoint luma.
88 u0.13, [0,8191], constraints: x3<x4<8191, 88 u0.13, [0,8191], constraints: x3<x4<8191,
89 default/ineffective 7168 */ 89 default/ineffective 7168 */
90}; 90};
@@ -94,7 +94,7 @@ union ia_css_ctc_data {
94 uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE]; 94 uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
95}; 95};
96 96
97/** CTC table, used for Chroma Tone Control. 97/* CTC table, used for Chroma Tone Control.
98 * 98 *
99 * ISP block: CTC1 (CTC by look-up table) 99 * ISP block: CTC1 (CTC by look-up table)
100 * ISP1: CTC1 is used. 100 * ISP1: CTC1 is used.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
index 525c838d5a99..803be68abc54 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
@@ -15,25 +15,25 @@
15#ifndef __IA_CSS_DE_TYPES_H 15#ifndef __IA_CSS_DE_TYPES_H
16#define __IA_CSS_DE_TYPES_H 16#define __IA_CSS_DE_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters. 19* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
20*/ 20*/
21 21
22/** Demosaic (bayer-to-YCgCo) configuration. 22/* Demosaic (bayer-to-YCgCo) configuration.
23 * 23 *
24 * ISP block: DE1 24 * ISP block: DE1
25 * ISP1: DE1 is used. 25 * ISP1: DE1 is used.
26 * (ISP2: DE2 is used.) 26 * (ISP2: DE2 is used.)
27 */ 27 */
28struct ia_css_de_config { 28struct ia_css_de_config {
29 ia_css_u0_16 pixelnoise; /**< Pixel noise used in moire elimination. 29 ia_css_u0_16 pixelnoise; /** Pixel noise used in moire elimination.
30 u0.16, [0,65535], 30 u0.16, [0,65535],
31 default 0, ineffective 0 */ 31 default 0, ineffective 0 */
32 ia_css_u0_16 c1_coring_threshold; /**< Coring threshold for C1. 32 ia_css_u0_16 c1_coring_threshold; /** Coring threshold for C1.
33 This is the same as nr_config.threshold_cb. 33 This is the same as nr_config.threshold_cb.
34 u0.16, [0,65535], 34 u0.16, [0,65535],
35 default 128(0.001953125), ineffective 0 */ 35 default 128(0.001953125), ineffective 0 */
36 ia_css_u0_16 c2_coring_threshold; /**< Coring threshold for C2. 36 ia_css_u0_16 c2_coring_threshold; /** Coring threshold for C2.
37 This is the same as nr_config.threshold_cr. 37 This is the same as nr_config.threshold_cr.
38 u0.16, [0,65535], 38 u0.16, [0,65535],
39 default 128(0.001953125), ineffective 0 */ 39 default 128(0.001953125), ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
index eac1b2779857..50bdde419bb1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
@@ -15,24 +15,24 @@
15#ifndef __IA_CSS_DE2_TYPES_H 15#ifndef __IA_CSS_DE2_TYPES_H
16#define __IA_CSS_DE2_TYPES_H 16#define __IA_CSS_DE2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Demosaicing parameters. 19* CSS-API header file for Demosaicing parameters.
20*/ 20*/
21 21
22/** Eigen Color Demosaicing configuration. 22/* Eigen Color Demosaicing configuration.
23 * 23 *
24 * ISP block: DE2 24 * ISP block: DE2
25 * (ISP1: DE1 is used.) 25 * (ISP1: DE1 is used.)
26 * ISP2: DE2 is used. 26 * ISP2: DE2 is used.
27 */ 27 */
28struct ia_css_ecd_config { 28struct ia_css_ecd_config {
29 uint16_t zip_strength; /**< Strength of zipper reduction. 29 uint16_t zip_strength; /** Strength of zipper reduction.
30 u0.13, [0,8191], 30 u0.13, [0,8191],
31 default 5489(0.67), ineffective 0 */ 31 default 5489(0.67), ineffective 0 */
32 uint16_t fc_strength; /**< Strength of false color reduction. 32 uint16_t fc_strength; /** Strength of false color reduction.
33 u0.13, [0,8191], 33 u0.13, [0,8191],
34 default 8191(almost 1.0), ineffective 0 */ 34 default 8191(almost 1.0), ineffective 0 */
35 uint16_t fc_debias; /**< Prevent color change 35 uint16_t fc_debias; /** Prevent color change
36 on noise or Gr/Gb imbalance. 36 on noise or Gr/Gb imbalance.
37 u0.13, [0,8191], 37 u0.13, [0,8191],
38 default 0, ineffective 0 */ 38 default 0, ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
index b5d7b6b175b6..1bf6dcef7dc7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
@@ -15,12 +15,12 @@
15#ifndef __IA_CSS_DP_TYPES_H 15#ifndef __IA_CSS_DP_TYPES_H
16#define __IA_CSS_DP_TYPES_H 16#define __IA_CSS_DP_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Defect Pixel Correction (DPC) parameters. 19* CSS-API header file for Defect Pixel Correction (DPC) parameters.
20*/ 20*/
21 21
22 22
23/** Defect Pixel Correction configuration. 23/* Defect Pixel Correction configuration.
24 * 24 *
25 * ISP block: DPC1 (DPC after WB) 25 * ISP block: DPC1 (DPC after WB)
26 * DPC2 (DPC before WB) 26 * DPC2 (DPC before WB)
@@ -28,14 +28,14 @@
28 * ISP2: DPC2 is used. 28 * ISP2: DPC2 is used.
29 */ 29 */
30struct ia_css_dp_config { 30struct ia_css_dp_config {
31 ia_css_u0_16 threshold; /**< The threshold of defect pixel correction, 31 ia_css_u0_16 threshold; /** The threshold of defect pixel correction,
32 representing the permissible difference of 32 representing the permissible difference of
33 intensity between one pixel and its 33 intensity between one pixel and its
34 surrounding pixels. Smaller values result 34 surrounding pixels. Smaller values result
35 in more frequent pixel corrections. 35 in more frequent pixel corrections.
36 u0.16, [0,65535], 36 u0.16, [0,65535],
37 default 8192, ineffective 65535 */ 37 default 8192, ineffective 65535 */
38 ia_css_u8_8 gain; /**< The sensitivity of mis-correction. ISP will 38 ia_css_u8_8 gain; /** The sensitivity of mis-correction. ISP will
39 miss a lot of defects if the value is set 39 miss a lot of defects if the value is set
40 too large. 40 too large.
41 u8.8, [0,65535], 41 u8.8, [0,65535],
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
index b2c974196ce8..6727682d287f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
@@ -15,14 +15,14 @@
15#ifndef __IA_CSS_DPC2_TYPES_H 15#ifndef __IA_CSS_DPC2_TYPES_H
16#define __IA_CSS_DPC2_TYPES_H 16#define __IA_CSS_DPC2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters. 19* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
20*/ 20*/
21 21
22#include "type_support.h" 22#include "type_support.h"
23 23
24/**@{*/ 24/**@{*/
25/** Floating point constants for different metrics. */ 25/* Floating point constants for different metrics. */
26#define METRIC1_ONE_FP (1<<12) 26#define METRIC1_ONE_FP (1<<12)
27#define METRIC2_ONE_FP (1<<5) 27#define METRIC2_ONE_FP (1<<5)
28#define METRIC3_ONE_FP (1<<12) 28#define METRIC3_ONE_FP (1<<12)
@@ -30,7 +30,7 @@
30/**@}*/ 30/**@}*/
31 31
32/**@{*/ 32/**@{*/
33/** Defect Pixel Correction 2 configuration. 33/* Defect Pixel Correction 2 configuration.
34 * 34 *
35 * \brief DPC2 public parameters. 35 * \brief DPC2 public parameters.
36 * \details Struct with all parameters for the Defect Pixel Correction 2 36 * \details Struct with all parameters for the Defect Pixel Correction 2
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
index 4d0abfe4d0fd..66a7e58659c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
@@ -30,7 +30,7 @@
30#ifdef ISP2401 30#ifdef ISP2401
31 31
32#endif 32#endif
33/** dvserence frame */ 33/* dvserence frame */
34struct sh_css_isp_dvs_isp_config { 34struct sh_css_isp_dvs_isp_config {
35 uint32_t num_horizontal_blocks; 35 uint32_t num_horizontal_blocks;
36 uint32_t num_vertical_blocks; 36 uint32_t num_vertical_blocks;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
index 216c54a21ea5..30772d217fb2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_DVS_TYPES_H 15#ifndef __IA_CSS_DVS_TYPES_H
16#define __IA_CSS_DVS_TYPES_H 16#define __IA_CSS_DVS_TYPES_H
17 17
18/** DVS frame 18/* DVS frame
19 * 19 *
20 * ISP block: dvs frame 20 * ISP block: dvs frame
21 */ 21 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
index 07651f0ac558..32e91824a5e5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_EED1_8_TYPES_H 15#ifndef __IA_CSS_EED1_8_TYPES_H
16#define __IA_CSS_EED1_8_TYPES_H 16#define __IA_CSS_EED1_8_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Edge Enhanced Demosaic parameters. 19* CSS-API header file for Edge Enhanced Demosaic parameters.
20*/ 20*/
21 21
@@ -36,51 +36,51 @@
36 */ 36 */
37#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9 37#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9
38 38
39/** Edge Enhanced Demosaic configuration 39/* Edge Enhanced Demosaic configuration
40 * 40 *
41 * ISP2.6.1: EED1_8 is used. 41 * ISP2.6.1: EED1_8 is used.
42 */ 42 */
43 43
44struct ia_css_eed1_8_config { 44struct ia_css_eed1_8_config {
45 int32_t rbzp_strength; /**< Strength of zipper reduction. */ 45 int32_t rbzp_strength; /** Strength of zipper reduction. */
46 46
47 int32_t fcstrength; /**< Strength of false color reduction. */ 47 int32_t fcstrength; /** Strength of false color reduction. */
48 int32_t fcthres_0; /**< Threshold to prevent chroma coring due to noise or green disparity in dark region. */ 48 int32_t fcthres_0; /** Threshold to prevent chroma coring due to noise or green disparity in dark region. */
49 int32_t fcthres_1; /**< Threshold to prevent chroma coring due to noise or green disparity in bright region. */ 49 int32_t fcthres_1; /** Threshold to prevent chroma coring due to noise or green disparity in bright region. */
50 int32_t fc_sat_coef; /**< How much color saturation to maintain in high color saturation region. */ 50 int32_t fc_sat_coef; /** How much color saturation to maintain in high color saturation region. */
51 int32_t fc_coring_prm; /**< Chroma coring coefficient for tint color suppression. */ 51 int32_t fc_coring_prm; /** Chroma coring coefficient for tint color suppression. */
52 52
53 int32_t aerel_thres0; /**< Threshold for Non-Directional Reliability at dark region. */ 53 int32_t aerel_thres0; /** Threshold for Non-Directional Reliability at dark region. */
54 int32_t aerel_gain0; /**< Gain for Non-Directional Reliability at dark region. */ 54 int32_t aerel_gain0; /** Gain for Non-Directional Reliability at dark region. */
55 int32_t aerel_thres1; /**< Threshold for Non-Directional Reliability at bright region. */ 55 int32_t aerel_thres1; /** Threshold for Non-Directional Reliability at bright region. */
56 int32_t aerel_gain1; /**< Gain for Non-Directional Reliability at bright region. */ 56 int32_t aerel_gain1; /** Gain for Non-Directional Reliability at bright region. */
57 57
58 int32_t derel_thres0; /**< Threshold for Directional Reliability at dark region. */ 58 int32_t derel_thres0; /** Threshold for Directional Reliability at dark region. */
59 int32_t derel_gain0; /**< Gain for Directional Reliability at dark region. */ 59 int32_t derel_gain0; /** Gain for Directional Reliability at dark region. */
60 int32_t derel_thres1; /**< Threshold for Directional Reliability at bright region. */ 60 int32_t derel_thres1; /** Threshold for Directional Reliability at bright region. */
61 int32_t derel_gain1; /**< Gain for Directional Reliability at bright region. */ 61 int32_t derel_gain1; /** Gain for Directional Reliability at bright region. */
62 62
63 int32_t coring_pos0; /**< Positive Edge Coring Threshold in dark region. */ 63 int32_t coring_pos0; /** Positive Edge Coring Threshold in dark region. */
64 int32_t coring_pos1; /**< Positive Edge Coring Threshold in bright region. */ 64 int32_t coring_pos1; /** Positive Edge Coring Threshold in bright region. */
65 int32_t coring_neg0; /**< Negative Edge Coring Threshold in dark region. */ 65 int32_t coring_neg0; /** Negative Edge Coring Threshold in dark region. */
66 int32_t coring_neg1; /**< Negative Edge Coring Threshold in bright region. */ 66 int32_t coring_neg1; /** Negative Edge Coring Threshold in bright region. */
67 67
68 int32_t gain_exp; /**< Common Exponent of Gain. */ 68 int32_t gain_exp; /** Common Exponent of Gain. */
69 int32_t gain_pos0; /**< Gain for Positive Edge in dark region. */ 69 int32_t gain_pos0; /** Gain for Positive Edge in dark region. */
70 int32_t gain_pos1; /**< Gain for Positive Edge in bright region. */ 70 int32_t gain_pos1; /** Gain for Positive Edge in bright region. */
71 int32_t gain_neg0; /**< Gain for Negative Edge in dark region. */ 71 int32_t gain_neg0; /** Gain for Negative Edge in dark region. */
72 int32_t gain_neg1; /**< Gain for Negative Edge in bright region. */ 72 int32_t gain_neg1; /** Gain for Negative Edge in bright region. */
73 73
74 int32_t pos_margin0; /**< Margin for Positive Edge in dark region. */ 74 int32_t pos_margin0; /** Margin for Positive Edge in dark region. */
75 int32_t pos_margin1; /**< Margin for Positive Edge in bright region. */ 75 int32_t pos_margin1; /** Margin for Positive Edge in bright region. */
76 int32_t neg_margin0; /**< Margin for Negative Edge in dark region. */ 76 int32_t neg_margin0; /** Margin for Negative Edge in dark region. */
77 int32_t neg_margin1; /**< Margin for Negative Edge in bright region. */ 77 int32_t neg_margin1; /** Margin for Negative Edge in bright region. */
78 78
79 int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: X. */ 79 int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: X. */
80 int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: Y. */ 80 int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /** Segment data for directional edge weight: Y. */
81 int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Slope. */ 81 int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Slope. */
82 int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Exponent. */ 82 int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /** Segment data for directional edge weight: Exponent. */
83 int32_t dedgew_max; /**< Max Weight for Directional Edge. */ 83 int32_t dedgew_max; /** Max Weight for Directional Edge. */
84}; 84};
85 85
86#endif /* __IA_CSS_EED1_8_TYPES_H */ 86#endif /* __IA_CSS_EED1_8_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
index df1565a5914c..49479572b40d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
@@ -15,20 +15,20 @@
15#ifndef __IA_CSS_FORMATS_TYPES_H 15#ifndef __IA_CSS_FORMATS_TYPES_H
16#define __IA_CSS_FORMATS_TYPES_H 16#define __IA_CSS_FORMATS_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for output format parameters. 19* CSS-API header file for output format parameters.
20*/ 20*/
21 21
22#include "type_support.h" 22#include "type_support.h"
23 23
24/** Formats configuration. 24/* Formats configuration.
25 * 25 *
26 * ISP block: FORMATS 26 * ISP block: FORMATS
27 * ISP1: FORMATS is used. 27 * ISP1: FORMATS is used.
28 * ISP2: FORMATS is used. 28 * ISP2: FORMATS is used.
29 */ 29 */
30struct ia_css_formats_config { 30struct ia_css_formats_config {
31 uint32_t video_full_range_flag; /**< selects the range of YUV output. 31 uint32_t video_full_range_flag; /** selects the range of YUV output.
32 u8.0, [0,1], 32 u8.0, [0,1],
33 default 1, ineffective n/a\n 33 default 1, ineffective n/a\n
34 1 - full range, luma 0-255, chroma 0-255\n 34 1 - full range, luma 0-255, chroma 0-255\n
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
index 5a2f0c06a80d..ef287fa3c428 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_FPN_TYPES_H 15#ifndef __IA_CSS_FPN_TYPES_H
16#define __IA_CSS_FPN_TYPES_H 16#define __IA_CSS_FPN_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Fixed Pattern Noise parameters. 19* CSS-API header file for Fixed Pattern Noise parameters.
20*/ 20*/
21 21
22/** Fixed Pattern Noise table. 22/* Fixed Pattern Noise table.
23 * 23 *
24 * This contains the fixed patterns noise values 24 * This contains the fixed patterns noise values
25 * obtained from a black frame capture. 25 * obtained from a black frame capture.
@@ -33,15 +33,15 @@
33 */ 33 */
34 34
35struct ia_css_fpn_table { 35struct ia_css_fpn_table {
36 int16_t *data; /**< Table content (fixed patterns noise). 36 int16_t *data; /** Table content (fixed patterns noise).
37 u0.[13-shift], [0,63] */ 37 u0.[13-shift], [0,63] */
38 uint32_t width; /**< Table width (in pixels). 38 uint32_t width; /** Table width (in pixels).
39 This is the input frame width. */ 39 This is the input frame width. */
40 uint32_t height; /**< Table height (in pixels). 40 uint32_t height; /** Table height (in pixels).
41 This is the input frame height. */ 41 This is the input frame height. */
42 uint32_t shift; /**< Common exponent of table content. 42 uint32_t shift; /** Common exponent of table content.
43 u8.0, [0,13] */ 43 u8.0, [0,13] */
44 uint32_t enabled; /**< Fpn is enabled. 44 uint32_t enabled; /** Fpn is enabled.
45 bool */ 45 bool */
46}; 46};
47 47
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
index dd9f0eda3353..594807fe2925 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
@@ -15,36 +15,36 @@
15#ifndef __IA_CSS_GC_TYPES_H 15#ifndef __IA_CSS_GC_TYPES_H
16#define __IA_CSS_GC_TYPES_H 16#define __IA_CSS_GC_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Gamma Correction parameters. 19* CSS-API header file for Gamma Correction parameters.
20*/ 20*/
21 21
22#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */ 22#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */
23 23
24/** Fractional bits for GAMMA gain */ 24/* Fractional bits for GAMMA gain */
25#define IA_CSS_GAMMA_GAIN_K_SHIFT 13 25#define IA_CSS_GAMMA_GAIN_K_SHIFT 13
26 26
27/** Number of elements in the gamma table. */ 27/* Number of elements in the gamma table. */
28#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10 28#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10
29#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2) 29#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
30 30
31/** Number of elements in the gamma table. */ 31/* Number of elements in the gamma table. */
32#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8 32#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8
33#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1) 33#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
34 34
35/** Gamma table, used for Y(Luma) Gamma Correction. 35/* Gamma table, used for Y(Luma) Gamma Correction.
36 * 36 *
37 * ISP block: GC1 (YUV Gamma Correction) 37 * ISP block: GC1 (YUV Gamma Correction)
38 * ISP1: GC1 is used. 38 * ISP1: GC1 is used.
39 * (ISP2: GC2(sRGB Gamma Correction) is used.) 39 * (ISP2: GC2(sRGB Gamma Correction) is used.)
40 */ 40 */
41/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or 41/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
42 IA_CSS_VAMEM_TYPE_2(ISP2400) */ 42 IA_CSS_VAMEM_TYPE_2(ISP2400) */
43union ia_css_gc_data { 43union ia_css_gc_data {
44 uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE]; 44 uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
45 /**< Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */ 45 /** Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
46 uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE]; 46 uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
47 /**< Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */ 47 /** Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
48}; 48};
49 49
50struct ia_css_gamma_table { 50struct ia_css_gamma_table {
@@ -52,22 +52,22 @@ struct ia_css_gamma_table {
52 union ia_css_gc_data data; 52 union ia_css_gc_data data;
53}; 53};
54 54
55/** Gamma Correction configuration (used only for YUV Gamma Correction). 55/* Gamma Correction configuration (used only for YUV Gamma Correction).
56 * 56 *
57 * ISP block: GC1 (YUV Gamma Correction) 57 * ISP block: GC1 (YUV Gamma Correction)
58 * ISP1: GC1 is used. 58 * ISP1: GC1 is used.
59 * (ISP2: GC2 (sRGB Gamma Correction) is used.) 59 * (ISP2: GC2 (sRGB Gamma Correction) is used.)
60 */ 60 */
61struct ia_css_gc_config { 61struct ia_css_gc_config {
62 uint16_t gain_k1; /**< Gain to adjust U after YUV Gamma Correction. 62 uint16_t gain_k1; /** Gain to adjust U after YUV Gamma Correction.
63 u0.16, [0,65535], 63 u0.16, [0,65535],
64 default/ineffective 19000(0.29) */ 64 default/ineffective 19000(0.29) */
65 uint16_t gain_k2; /**< Gain to adjust V after YUV Gamma Correction. 65 uint16_t gain_k2; /** Gain to adjust V after YUV Gamma Correction.
66 u0.16, [0,65535], 66 u0.16, [0,65535],
67 default/ineffective 19000(0.29) */ 67 default/ineffective 19000(0.29) */
68}; 68};
69 69
70/** Chroma Enhancement configuration. 70/* Chroma Enhancement configuration.
71 * 71 *
72 * This parameter specifies range of chroma output level. 72 * This parameter specifies range of chroma output level.
73 * The standard range is [0,255] or [16,240]. 73 * The standard range is [0,255] or [16,240].
@@ -77,20 +77,20 @@ struct ia_css_gc_config {
77 * (ISP2: CE1 is not used.) 77 * (ISP2: CE1 is not used.)
78 */ 78 */
79struct ia_css_ce_config { 79struct ia_css_ce_config {
80 uint8_t uv_level_min; /**< Minimum of chroma output level. 80 uint8_t uv_level_min; /** Minimum of chroma output level.
81 u0.8, [0,255], default/ineffective 0 */ 81 u0.8, [0,255], default/ineffective 0 */
82 uint8_t uv_level_max; /**< Maximum of chroma output level. 82 uint8_t uv_level_max; /** Maximum of chroma output level.
83 u0.8, [0,255], default/ineffective 255 */ 83 u0.8, [0,255], default/ineffective 255 */
84}; 84};
85 85
86/** Multi-Axes Color Correction (MACC) configuration. 86/* Multi-Axes Color Correction (MACC) configuration.
87 * 87 *
88 * ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config)) 88 * ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
89 * (ISP1: MACC1 (MACC by only matrix) is used.) 89 * (ISP1: MACC1 (MACC by only matrix) is used.)
90 * ISP2: MACC2 is used. 90 * ISP2: MACC2 is used.
91 */ 91 */
92struct ia_css_macc_config { 92struct ia_css_macc_config {
93 uint8_t exp; /**< Common exponent of ia_css_macc_table. 93 uint8_t exp; /** Common exponent of ia_css_macc_table.
94 u8.0, [0,13], default 1, ineffective 1 */ 94 u8.0, [0,13], default 1, ineffective 1 */
95}; 95};
96 96
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
index e439583bdfb6..fab7467d30a5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
@@ -17,33 +17,33 @@
17 17
18#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */ 18#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */
19 19
20/** @file 20/* @file
21* CSS-API header file for Gamma Correction parameters. 21* CSS-API header file for Gamma Correction parameters.
22*/ 22*/
23 23
24/** sRGB Gamma table, used for sRGB Gamma Correction. 24/* sRGB Gamma table, used for sRGB Gamma Correction.
25 * 25 *
26 * ISP block: GC2 (sRGB Gamma Correction) 26 * ISP block: GC2 (sRGB Gamma Correction)
27 * (ISP1: GC1(YUV Gamma Correction) is used.) 27 * (ISP1: GC1(YUV Gamma Correction) is used.)
28 * ISP2: GC2 is used. 28 * ISP2: GC2 is used.
29 */ 29 */
30 30
31/** Number of elements in the sRGB gamma table. */ 31/* Number of elements in the sRGB gamma table. */
32#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8 32#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
33#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2) 33#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
34 34
35/** Number of elements in the sRGB gamma table. */ 35/* Number of elements in the sRGB gamma table. */
36#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8 36#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8
37#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1) 37#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
38 38
39/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or 39/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
40 IA_CSS_VAMEM_TYPE_2(ISP2400) */ 40 IA_CSS_VAMEM_TYPE_2(ISP2400) */
41union ia_css_rgb_gamma_data { 41union ia_css_rgb_gamma_data {
42 uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE]; 42 uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
43 /**< RGB Gamma table on vamem type1. This table is not used, 43 /** RGB Gamma table on vamem type1. This table is not used,
44 because sRGB Gamma Correction is not implemented for ISP2300. */ 44 because sRGB Gamma Correction is not implemented for ISP2300. */
45 uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE]; 45 uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
46 /**< RGB Gamma table on vamem type2. u0.12, [0,4095] */ 46 /** RGB Gamma table on vamem type2. u0.12, [0,4095] */
47}; 47};
48 48
49struct ia_css_rgb_gamma_table { 49struct ia_css_rgb_gamma_table {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
index c3345b32e3e6..26464421b077 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
@@ -24,14 +24,14 @@
24 * \detail Currently HDR paramters are used only for testing purposes 24 * \detail Currently HDR paramters are used only for testing purposes
25 */ 25 */
26struct ia_css_hdr_irradiance_params { 26struct ia_css_hdr_irradiance_params {
27 int test_irr; /**< Test parameter */ 27 int test_irr; /** Test parameter */
28 int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching shift parameter */ 28 int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching shift parameter */
29 int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching multiplication parameter */ 29 int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Histogram matching multiplication parameter */
30 int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold low bound parameter */ 30 int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold low bound parameter */
31 int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold high bound parameter */ 31 int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Weight map soft threshold high bound parameter */
32 int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold linear function coefficien */ 32 int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold linear function coefficien */
33 int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold precision shift parameter */ 33 int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /** Soft threshold precision shift parameter */
34 int weight_bpp; /**< Weight map bits per pixel */ 34 int weight_bpp; /** Weight map bits per pixel */
35}; 35};
36 36
37/** 37/**
@@ -39,7 +39,7 @@ struct ia_css_hdr_irradiance_params {
39 * \detail Currently HDR paramters are used only for testing purposes 39 * \detail Currently HDR paramters are used only for testing purposes
40 */ 40 */
41struct ia_css_hdr_deghost_params { 41struct ia_css_hdr_deghost_params {
42 int test_deg; /**< Test parameter */ 42 int test_deg; /** Test parameter */
43}; 43};
44 44
45/** 45/**
@@ -47,7 +47,7 @@ struct ia_css_hdr_deghost_params {
47 * \detail Currently HDR paramters are used only for testing purposes 47 * \detail Currently HDR paramters are used only for testing purposes
48 */ 48 */
49struct ia_css_hdr_exclusion_params { 49struct ia_css_hdr_exclusion_params {
50 int test_excl; /**< Test parameter */ 50 int test_excl; /** Test parameter */
51}; 51};
52 52
53/** 53/**
@@ -56,9 +56,9 @@ struct ia_css_hdr_exclusion_params {
56 * the CSS API. Currenly, only test paramters are defined. 56 * the CSS API. Currenly, only test paramters are defined.
57 */ 57 */
58struct ia_css_hdr_config { 58struct ia_css_hdr_config {
59 struct ia_css_hdr_irradiance_params irradiance; /**< HDR irradiance paramaters */ 59 struct ia_css_hdr_irradiance_params irradiance; /** HDR irradiance paramaters */
60 struct ia_css_hdr_deghost_params deghost; /**< HDR deghosting parameters */ 60 struct ia_css_hdr_deghost_params deghost; /** HDR deghosting parameters */
61 struct ia_css_hdr_exclusion_params exclusion; /**< HDR exclusion parameters */ 61 struct ia_css_hdr_exclusion_params exclusion; /** HDR exclusion parameters */
62}; 62};
63 63
64#endif /* __IA_CSS_HDR_TYPES_H */ 64#endif /* __IA_CSS_HDR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
index 78e159c04851..f80480cf9de2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -1,5 +1,5 @@
1#ifdef ISP2401 1#ifdef ISP2401
2/** 2/*
3Support for Intel Camera Imaging ISP subsystem. 3Support for Intel Camera Imaging ISP subsystem.
4Copyright (c) 2010 - 2015, Intel Corporation. 4Copyright (c) 2010 - 2015, Intel Corporation.
5 5
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
index f7e1a632c47e..eb9e9439cc21 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -1,5 +1,5 @@
1#ifdef ISP2401 1#ifdef ISP2401
2/** 2/*
3Support for Intel Camera Imaging ISP subsystem. 3Support for Intel Camera Imaging ISP subsystem.
4Copyright (c) 2010 - 2015, Intel Corporation. 4Copyright (c) 2010 - 2015, Intel Corporation.
5 5
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
index 3d510bf5886a..9cd31c2c0253 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
@@ -15,22 +15,22 @@
15#ifndef __IA_CSS_MACC1_5_TYPES_H 15#ifndef __IA_CSS_MACC1_5_TYPES_H
16#define __IA_CSS_MACC1_5_TYPES_H 16#define __IA_CSS_MACC1_5_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Multi-Axis Color Conversion algorithm parameters. 19* CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
20*/ 20*/
21 21
22/** Multi-Axis Color Conversion configuration 22/* Multi-Axis Color Conversion configuration
23 * 23 *
24 * ISP2.6.1: MACC1_5 is used. 24 * ISP2.6.1: MACC1_5 is used.
25 */ 25 */
26 26
27 27
28/** Number of axes in the MACC table. */ 28/* Number of axes in the MACC table. */
29#define IA_CSS_MACC_NUM_AXES 16 29#define IA_CSS_MACC_NUM_AXES 16
30/** Number of coefficients per MACC axes. */ 30/* Number of coefficients per MACC axes. */
31#define IA_CSS_MACC_NUM_COEFS 4 31#define IA_CSS_MACC_NUM_COEFS 4
32 32
33/** Multi-Axes Color Correction (MACC) table. 33/* Multi-Axes Color Correction (MACC) table.
34 * 34 *
35 * ISP block: MACC (MACC by only matrix) 35 * ISP block: MACC (MACC by only matrix)
36 * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config)) 36 * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
@@ -55,19 +55,19 @@
55 */ 55 */
56struct ia_css_macc1_5_table { 56struct ia_css_macc1_5_table {
57 int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES]; 57 int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
58 /**< 16 of 2x2 matix 58 /** 16 of 2x2 matix
59 MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191] 59 MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
60 default/ineffective: (s1.12) 60 default/ineffective: (s1.12)
61 16 of "identity 2x2 matix" {4096,0,0,4096} */ 61 16 of "identity 2x2 matix" {4096,0,0,4096} */
62}; 62};
63 63
64/** Multi-Axes Color Correction (MACC) configuration. 64/* Multi-Axes Color Correction (MACC) configuration.
65 * 65 *
66 * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config)) 66 * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
67 * ISP2: MACC1_5 is used. 67 * ISP2: MACC1_5 is used.
68 */ 68 */
69struct ia_css_macc1_5_config { 69struct ia_css_macc1_5_config {
70 uint8_t exp; /**< Common exponent of ia_css_macc_table. 70 uint8_t exp; /** Common exponent of ia_css_macc_table.
71 u8.0, [0,13], default 1, ineffective 1 */ 71 u8.0, [0,13], default 1, ineffective 1 */
72}; 72};
73 73
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
index a25581c6f3ac..2c9e5a8ceb98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
@@ -15,17 +15,17 @@
15#ifndef __IA_CSS_MACC_TYPES_H 15#ifndef __IA_CSS_MACC_TYPES_H
16#define __IA_CSS_MACC_TYPES_H 16#define __IA_CSS_MACC_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Multi-Axis Color Correction (MACC) parameters. 19* CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
20*/ 20*/
21 21
22/** Number of axes in the MACC table. */ 22/* Number of axes in the MACC table. */
23#define IA_CSS_MACC_NUM_AXES 16 23#define IA_CSS_MACC_NUM_AXES 16
24/** Number of coefficients per MACC axes. */ 24/* Number of coefficients per MACC axes. */
25#define IA_CSS_MACC_NUM_COEFS 4 25#define IA_CSS_MACC_NUM_COEFS 4
26/** The number of planes in the morphing table. */ 26/* The number of planes in the morphing table. */
27 27
28/** Multi-Axis Color Correction (MACC) table. 28/* Multi-Axis Color Correction (MACC) table.
29 * 29 *
30 * ISP block: MACC1 (MACC by only matrix) 30 * ISP block: MACC1 (MACC by only matrix)
31 * MACC2 (MACC by matrix and exponent(ia_css_macc_config)) 31 * MACC2 (MACC by matrix and exponent(ia_css_macc_config))
@@ -51,7 +51,7 @@
51 51
52struct ia_css_macc_table { 52struct ia_css_macc_table {
53 int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES]; 53 int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
54 /**< 16 of 2x2 matix 54 /** 16 of 2x2 matix
55 MACC1: s2.13, [-65536,65535] 55 MACC1: s2.13, [-65536,65535]
56 default/ineffective: 56 default/ineffective:
57 16 of "identity 2x2 matix" {8192,0,0,8192} 57 16 of "identity 2x2 matix" {8192,0,0,8192}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
index eeaadfeb5a1e..d981394c1c11 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_OB2_TYPES_H 15#ifndef __IA_CSS_OB2_TYPES_H
16#define __IA_CSS_OB2_TYPES_H 16#define __IA_CSS_OB2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Optical Black algorithm parameters. 19* CSS-API header file for Optical Black algorithm parameters.
20*/ 20*/
21 21
22/** Optical Black configuration 22/* Optical Black configuration
23 * 23 *
24 * ISP2.6.1: OB2 is used. 24 * ISP2.6.1: OB2 is used.
25 */ 25 */
@@ -27,16 +27,16 @@
27#include "ia_css_frac.h" 27#include "ia_css_frac.h"
28 28
29struct ia_css_ob2_config { 29struct ia_css_ob2_config {
30 ia_css_u0_16 level_gr; /**< Black level for GR pixels. 30 ia_css_u0_16 level_gr; /** Black level for GR pixels.
31 u0.16, [0,65535], 31 u0.16, [0,65535],
32 default/ineffective 0 */ 32 default/ineffective 0 */
33 ia_css_u0_16 level_r; /**< Black level for R pixels. 33 ia_css_u0_16 level_r; /** Black level for R pixels.
34 u0.16, [0,65535], 34 u0.16, [0,65535],
35 default/ineffective 0 */ 35 default/ineffective 0 */
36 ia_css_u0_16 level_b; /**< Black level for B pixels. 36 ia_css_u0_16 level_b; /** Black level for B pixels.
37 u0.16, [0,65535], 37 u0.16, [0,65535],
38 default/ineffective 0 */ 38 default/ineffective 0 */
39 ia_css_u0_16 level_gb; /**< Black level for GB pixels. 39 ia_css_u0_16 level_gb; /** Black level for GB pixels.
40 u0.16, [0,65535], 40 u0.16, [0,65535],
41 default/ineffective 0 */ 41 default/ineffective 0 */
42}; 42};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
index 88459b6c003d..a9717b8f44ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
@@ -15,51 +15,51 @@
15#ifndef __IA_CSS_OB_TYPES_H 15#ifndef __IA_CSS_OB_TYPES_H
16#define __IA_CSS_OB_TYPES_H 16#define __IA_CSS_OB_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Optical Black level parameters. 19* CSS-API header file for Optical Black level parameters.
20*/ 20*/
21 21
22#include "ia_css_frac.h" 22#include "ia_css_frac.h"
23 23
24/** Optical black mode. 24/* Optical black mode.
25 */ 25 */
26enum ia_css_ob_mode { 26enum ia_css_ob_mode {
27 IA_CSS_OB_MODE_NONE, /**< OB has no effect. */ 27 IA_CSS_OB_MODE_NONE, /** OB has no effect. */
28 IA_CSS_OB_MODE_FIXED, /**< Fixed OB */ 28 IA_CSS_OB_MODE_FIXED, /** Fixed OB */
29 IA_CSS_OB_MODE_RASTER /**< Raster OB */ 29 IA_CSS_OB_MODE_RASTER /** Raster OB */
30}; 30};
31 31
32/** Optical Black level configuration. 32/* Optical Black level configuration.
33 * 33 *
34 * ISP block: OB1 34 * ISP block: OB1
35 * ISP1: OB1 is used. 35 * ISP1: OB1 is used.
36 * ISP2: OB1 is used. 36 * ISP2: OB1 is used.
37 */ 37 */
38struct ia_css_ob_config { 38struct ia_css_ob_config {
39 enum ia_css_ob_mode mode; /**< Mode (None / Fixed / Raster). 39 enum ia_css_ob_mode mode; /** Mode (None / Fixed / Raster).
40 enum, [0,2], 40 enum, [0,2],
41 default 1, ineffective 0 */ 41 default 1, ineffective 0 */
42 ia_css_u0_16 level_gr; /**< Black level for GR pixels 42 ia_css_u0_16 level_gr; /** Black level for GR pixels
43 (used for Fixed Mode only). 43 (used for Fixed Mode only).
44 u0.16, [0,65535], 44 u0.16, [0,65535],
45 default/ineffective 0 */ 45 default/ineffective 0 */
46 ia_css_u0_16 level_r; /**< Black level for R pixels 46 ia_css_u0_16 level_r; /** Black level for R pixels
47 (used for Fixed Mode only). 47 (used for Fixed Mode only).
48 u0.16, [0,65535], 48 u0.16, [0,65535],
49 default/ineffective 0 */ 49 default/ineffective 0 */
50 ia_css_u0_16 level_b; /**< Black level for B pixels 50 ia_css_u0_16 level_b; /** Black level for B pixels
51 (used for Fixed Mode only). 51 (used for Fixed Mode only).
52 u0.16, [0,65535], 52 u0.16, [0,65535],
53 default/ineffective 0 */ 53 default/ineffective 0 */
54 ia_css_u0_16 level_gb; /**< Black level for GB pixels 54 ia_css_u0_16 level_gb; /** Black level for GB pixels
55 (used for Fixed Mode only). 55 (used for Fixed Mode only).
56 u0.16, [0,65535], 56 u0.16, [0,65535],
57 default/ineffective 0 */ 57 default/ineffective 0 */
58 uint16_t start_position; /**< Start position of OB area 58 uint16_t start_position; /** Start position of OB area
59 (used for Raster Mode only). 59 (used for Raster Mode only).
60 u16.0, [0,63], 60 u16.0, [0,63],
61 default/ineffective 0 */ 61 default/ineffective 0 */
62 uint16_t end_position; /**< End position of OB area 62 uint16_t end_position; /** End position of OB area
63 (used for Raster Mode only). 63 (used for Raster Mode only).
64 u16.0, [0,63], 64 u16.0, [0,63],
65 default/ineffective 0 */ 65 default/ineffective 0 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
index 26ec27e085c1..eb7defa41145 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
@@ -19,7 +19,7 @@
19#include "dma.h" 19#include "dma.h"
20#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */ 20#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
21 21
22/** output frame */ 22/* output frame */
23struct sh_css_isp_output_isp_config { 23struct sh_css_isp_output_isp_config {
24 uint32_t width_a_over_b; 24 uint32_t width_a_over_b;
25 uint32_t height; 25 uint32_t height;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
index 4335ac28b31d..9c7342fb8145 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_OUTPUT_TYPES_H 15#ifndef __IA_CSS_OUTPUT_TYPES_H
16#define __IA_CSS_OUTPUT_TYPES_H 16#define __IA_CSS_OUTPUT_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for parameters of output frames. 19* CSS-API header file for parameters of output frames.
20*/ 20*/
21 21
22/** Output frame 22/* Output frame
23 * 23 *
24 * ISP block: output frame 24 * ISP block: output frame
25 */ 25 */
@@ -40,8 +40,8 @@ struct ia_css_output1_configuration {
40}; 40};
41 41
42struct ia_css_output_config { 42struct ia_css_output_config {
43 uint8_t enable_hflip; /**< enable horizontal output mirroring */ 43 uint8_t enable_hflip; /** enable horizontal output mirroring */
44 uint8_t enable_vflip; /**< enable vertical output mirroring */ 44 uint8_t enable_vflip; /** enable vertical output mirroring */
45}; 45};
46 46
47#endif /* __IA_CSS_OUTPUT_TYPES_H */ 47#endif /* __IA_CSS_OUTPUT_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
index 955fd472a241..62d371841619 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
@@ -18,7 +18,7 @@
18#include <ia_css_frame_public.h> 18#include <ia_css_frame_public.h>
19#include "sh_css_internal.h" 19#include "sh_css_internal.h"
20 20
21/** qplane frame 21/* qplane frame
22 * 22 *
23 * ISP block: qplane frame 23 * ISP block: qplane frame
24 */ 24 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
index 54f8c299d227..5c0b8febd79a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
@@ -18,7 +18,7 @@
18#include <ia_css_frame_public.h> 18#include <ia_css_frame_public.h>
19#include "sh_css_internal.h" 19#include "sh_css_internal.h"
20 20
21/** Raw frame 21/* Raw frame
22 * 22 *
23 * ISP block: Raw frame 23 * ISP block: Raw frame
24 */ 24 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
index 1f1b72a417d1..026443b999a6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
@@ -19,7 +19,7 @@
19#include "sh_css_defs.h" 19#include "sh_css_defs.h"
20#include "dma.h" 20#include "dma.h"
21 21
22/** Reference frame */ 22/* Reference frame */
23struct ia_css_ref_configuration { 23struct ia_css_ref_configuration {
24 const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; 24 const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
25 uint32_t dvs_frame_delay; 25 uint32_t dvs_frame_delay;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
index ce0eaeeee9c6..4750fba268b9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_REF_TYPES_H 15#ifndef __IA_CSS_REF_TYPES_H
16#define __IA_CSS_REF_TYPES_H 16#define __IA_CSS_REF_TYPES_H
17 17
18/** Reference frame 18/* Reference frame
19 * 19 *
20 * ISP block: reference frame 20 * ISP block: reference frame
21 */ 21 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
index f57ed1ec5981..8d674d2c6427 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_S3A_TYPES_H 15#ifndef __IA_CSS_S3A_TYPES_H
16#define __IA_CSS_S3A_TYPES_H 16#define __IA_CSS_S3A_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for 3A statistics parameters. 19* CSS-API header file for 3A statistics parameters.
20*/ 20*/
21 21
@@ -25,11 +25,11 @@
25#include "../../../../components/stats_3a/src/stats_3a_public.h" 25#include "../../../../components/stats_3a/src/stats_3a_public.h"
26#endif 26#endif
27 27
28/** 3A configuration. This configures the 3A statistics collection 28/* 3A configuration. This configures the 3A statistics collection
29 * module. 29 * module.
30 */ 30 */
31 31
32/** 3A statistics grid 32/* 3A statistics grid
33 * 33 *
34 * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE)) 34 * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
35 * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE)) 35 * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
@@ -39,23 +39,23 @@
39struct ia_css_3a_grid_info { 39struct ia_css_3a_grid_info {
40 40
41#if defined(SYSTEM_css_skycam_c0_system) 41#if defined(SYSTEM_css_skycam_c0_system)
42 uint32_t ae_enable; /**< ae enabled in binary, 42 uint32_t ae_enable; /** ae enabled in binary,
43 0:disabled, 1:enabled */ 43 0:disabled, 1:enabled */
44 struct ae_public_config_grid_config ae_grd_info; /**< see description in ae_public.h*/ 44 struct ae_public_config_grid_config ae_grd_info; /** see description in ae_public.h*/
45 45
46 uint32_t awb_enable; /**< awb enabled in binary, 46 uint32_t awb_enable; /** awb enabled in binary,
47 0:disabled, 1:enabled */ 47 0:disabled, 1:enabled */
48 struct awb_public_config_grid_config awb_grd_info; /**< see description in awb_public.h*/ 48 struct awb_public_config_grid_config awb_grd_info; /** see description in awb_public.h*/
49 49
50 uint32_t af_enable; /**< af enabled in binary, 50 uint32_t af_enable; /** af enabled in binary,
51 0:disabled, 1:enabled */ 51 0:disabled, 1:enabled */
52 struct af_public_grid_config af_grd_info; /**< see description in af_public.h*/ 52 struct af_public_grid_config af_grd_info; /** see description in af_public.h*/
53 53
54 uint32_t awb_fr_enable; /**< awb_fr enabled in binary, 54 uint32_t awb_fr_enable; /** awb_fr enabled in binary,
55 0:disabled, 1:enabled */ 55 0:disabled, 1:enabled */
56 struct awb_fr_public_grid_config awb_fr_grd_info;/**< see description in awb_fr_public.h*/ 56 struct awb_fr_public_grid_config awb_fr_grd_info;/** see description in awb_fr_public.h*/
57 57
58 uint32_t elem_bit_depth; /**< TODO:Taken from BYT - need input from AIQ 58 uint32_t elem_bit_depth; /** TODO:Taken from BYT - need input from AIQ
59 if needed for SKC 59 if needed for SKC
60 Bit depth of element used 60 Bit depth of element used
61 to calculate 3A statistics. 61 to calculate 3A statistics.
@@ -63,34 +63,34 @@ struct ia_css_3a_grid_info {
63 bayer bit depth in DSP. */ 63 bayer bit depth in DSP. */
64 64
65#else 65#else
66 uint32_t enable; /**< 3A statistics enabled. 66 uint32_t enable; /** 3A statistics enabled.
67 0:disabled, 1:enabled */ 67 0:disabled, 1:enabled */
68 uint32_t use_dmem; /**< DMEM or VMEM determines layout. 68 uint32_t use_dmem; /** DMEM or VMEM determines layout.
69 0:3A statistics are stored to VMEM, 69 0:3A statistics are stored to VMEM,
70 1:3A statistics are stored to DMEM */ 70 1:3A statistics are stored to DMEM */
71 uint32_t has_histogram; /**< Statistics include histogram. 71 uint32_t has_histogram; /** Statistics include histogram.
72 0:no histogram, 1:has histogram */ 72 0:no histogram, 1:has histogram */
73 uint32_t width; /**< Width of 3A grid table. 73 uint32_t width; /** Width of 3A grid table.
74 (= Horizontal number of grid cells 74 (= Horizontal number of grid cells
75 in table, which cells have effective 75 in table, which cells have effective
76 statistics.) */ 76 statistics.) */
77 uint32_t height; /**< Height of 3A grid table. 77 uint32_t height; /** Height of 3A grid table.
78 (= Vertical number of grid cells 78 (= Vertical number of grid cells
79 in table, which cells have effective 79 in table, which cells have effective
80 statistics.) */ 80 statistics.) */
81 uint32_t aligned_width; /**< Horizontal stride (for alloc). 81 uint32_t aligned_width; /** Horizontal stride (for alloc).
82 (= Horizontal number of grid cells 82 (= Horizontal number of grid cells
83 in table, which means 83 in table, which means
84 the allocated width.) */ 84 the allocated width.) */
85 uint32_t aligned_height; /**< Vertical stride (for alloc). 85 uint32_t aligned_height; /** Vertical stride (for alloc).
86 (= Vertical number of grid cells 86 (= Vertical number of grid cells
87 in table, which means 87 in table, which means
88 the allocated height.) */ 88 the allocated height.) */
89 uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit. 89 uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
90 (1BQ means {Gr,R,B,Gb}(2x2 pixels).) 90 (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
91 Valid values are 8,16,32,64. */ 91 Valid values are 8,16,32,64. */
92 uint32_t deci_factor_log2; /**< log2 of bqs_per_grid_cell. */ 92 uint32_t deci_factor_log2; /** log2 of bqs_per_grid_cell. */
93 uint32_t elem_bit_depth; /**< Bit depth of element used 93 uint32_t elem_bit_depth; /** Bit depth of element used
94 to calculate 3A statistics. 94 to calculate 3A statistics.
95 This is 13, which is the normalized 95 This is 13, which is the normalized
96 bayer bit depth in DSP. */ 96 bayer bit depth in DSP. */
@@ -148,7 +148,7 @@ struct ia_css_3a_grid_info {
148 * However, that will require driver/ 3A lib modifications. 148 * However, that will require driver/ 3A lib modifications.
149 */ 149 */
150 150
151/** 3A configuration. This configures the 3A statistics collection 151/* 3A configuration. This configures the 3A statistics collection
152 * module. 152 * module.
153 * 153 *
154 * ae_y_*: Coefficients to calculate luminance from bayer. 154 * ae_y_*: Coefficients to calculate luminance from bayer.
@@ -167,38 +167,38 @@ struct ia_css_3a_grid_info {
167 * ISP2: S3A2 and SDVS2 are used. 167 * ISP2: S3A2 and SDVS2 are used.
168 */ 168 */
169struct ia_css_3a_config { 169struct ia_css_3a_config {
170 ia_css_u0_16 ae_y_coef_r; /**< Weight of R for Y. 170 ia_css_u0_16 ae_y_coef_r; /** Weight of R for Y.
171 u0.16, [0,65535], 171 u0.16, [0,65535],
172 default/ineffective 25559 */ 172 default/ineffective 25559 */
173 ia_css_u0_16 ae_y_coef_g; /**< Weight of G for Y. 173 ia_css_u0_16 ae_y_coef_g; /** Weight of G for Y.
174 u0.16, [0,65535], 174 u0.16, [0,65535],
175 default/ineffective 32768 */ 175 default/ineffective 32768 */
176 ia_css_u0_16 ae_y_coef_b; /**< Weight of B for Y. 176 ia_css_u0_16 ae_y_coef_b; /** Weight of B for Y.
177 u0.16, [0,65535], 177 u0.16, [0,65535],
178 default/ineffective 7209 */ 178 default/ineffective 7209 */
179 ia_css_u0_16 awb_lg_high_raw; /**< AWB level gate high for raw. 179 ia_css_u0_16 awb_lg_high_raw; /** AWB level gate high for raw.
180 u0.16, [0,65535], 180 u0.16, [0,65535],
181 default 65472(=1023*64), 181 default 65472(=1023*64),
182 ineffective 65535 */ 182 ineffective 65535 */
183 ia_css_u0_16 awb_lg_low; /**< AWB level gate low. 183 ia_css_u0_16 awb_lg_low; /** AWB level gate low.
184 u0.16, [0,65535], 184 u0.16, [0,65535],
185 default 64(=1*64), 185 default 64(=1*64),
186 ineffective 0 */ 186 ineffective 0 */
187 ia_css_u0_16 awb_lg_high; /**< AWB level gate high. 187 ia_css_u0_16 awb_lg_high; /** AWB level gate high.
188 u0.16, [0,65535], 188 u0.16, [0,65535],
189 default 65535, 189 default 65535,
190 ineffective 65535 */ 190 ineffective 65535 */
191 ia_css_s0_15 af_fir1_coef[7]; /**< AF FIR coefficients of fir1. 191 ia_css_s0_15 af_fir1_coef[7]; /** AF FIR coefficients of fir1.
192 s0.15, [-32768,32767], 192 s0.15, [-32768,32767],
193 default/ineffective 193 default/ineffective
194 -6689,-12207,-32768,32767,12207,6689,0 */ 194 -6689,-12207,-32768,32767,12207,6689,0 */
195 ia_css_s0_15 af_fir2_coef[7]; /**< AF FIR coefficients of fir2. 195 ia_css_s0_15 af_fir2_coef[7]; /** AF FIR coefficients of fir2.
196 s0.15, [-32768,32767], 196 s0.15, [-32768,32767],
197 default/ineffective 197 default/ineffective
198 2053,0,-18437,32767,-18437,2053,0 */ 198 2053,0,-18437,32767,-18437,2053,0 */
199}; 199};
200 200
201/** 3A statistics. This structure describes the data stored 201/* 3A statistics. This structure describes the data stored
202 * in each 3A grid point. 202 * in each 3A grid point.
203 * 203 *
204 * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE) 204 * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
@@ -209,43 +209,43 @@ struct ia_css_3a_config {
209 * ISP2: S3A2 is used. 209 * ISP2: S3A2 is used.
210 */ 210 */
211struct ia_css_3a_output { 211struct ia_css_3a_output {
212 int32_t ae_y; /**< Sum of Y in a statistics window, for AE. 212 int32_t ae_y; /** Sum of Y in a statistics window, for AE.
213 (u19.13) */ 213 (u19.13) */
214 int32_t awb_cnt; /**< Number of effective pixels 214 int32_t awb_cnt; /** Number of effective pixels
215 in a statistics window. 215 in a statistics window.
216 Pixels passed by the AWB level gate check are 216 Pixels passed by the AWB level gate check are
217 judged as "effective". (u32) */ 217 judged as "effective". (u32) */
218 int32_t awb_gr; /**< Sum of Gr in a statistics window, for AWB. 218 int32_t awb_gr; /** Sum of Gr in a statistics window, for AWB.
219 All Gr pixels (not only for effective pixels) 219 All Gr pixels (not only for effective pixels)
220 are summed. (u19.13) */ 220 are summed. (u19.13) */
221 int32_t awb_r; /**< Sum of R in a statistics window, for AWB. 221 int32_t awb_r; /** Sum of R in a statistics window, for AWB.
222 All R pixels (not only for effective pixels) 222 All R pixels (not only for effective pixels)
223 are summed. (u19.13) */ 223 are summed. (u19.13) */
224 int32_t awb_b; /**< Sum of B in a statistics window, for AWB. 224 int32_t awb_b; /** Sum of B in a statistics window, for AWB.
225 All B pixels (not only for effective pixels) 225 All B pixels (not only for effective pixels)
226 are summed. (u19.13) */ 226 are summed. (u19.13) */
227 int32_t awb_gb; /**< Sum of Gb in a statistics window, for AWB. 227 int32_t awb_gb; /** Sum of Gb in a statistics window, for AWB.
228 All Gb pixels (not only for effective pixels) 228 All Gb pixels (not only for effective pixels)
229 are summed. (u19.13) */ 229 are summed. (u19.13) */
230 int32_t af_hpf1; /**< Sum of |Y| following high pass filter af_fir1 230 int32_t af_hpf1; /** Sum of |Y| following high pass filter af_fir1
231 within a statistics window, for AF. (u19.13) */ 231 within a statistics window, for AF. (u19.13) */
232 int32_t af_hpf2; /**< Sum of |Y| following high pass filter af_fir2 232 int32_t af_hpf2; /** Sum of |Y| following high pass filter af_fir2
233 within a statistics window, for AF. (u19.13) */ 233 within a statistics window, for AF. (u19.13) */
234}; 234};
235 235
236 236
237/** 3A Statistics. This structure describes the statistics that are generated 237/* 3A Statistics. This structure describes the statistics that are generated
238 * using the provided configuration (ia_css_3a_config). 238 * using the provided configuration (ia_css_3a_config).
239 */ 239 */
240struct ia_css_3a_statistics { 240struct ia_css_3a_statistics {
241 struct ia_css_3a_grid_info grid; /**< grid info contains the dimensions of the 3A grid */ 241 struct ia_css_3a_grid_info grid; /** grid info contains the dimensions of the 3A grid */
242 struct ia_css_3a_output *data; /**< the pointer to 3a_output[grid.width * grid.height] 242 struct ia_css_3a_output *data; /** the pointer to 3a_output[grid.width * grid.height]
243 containing the 3A statistics */ 243 containing the 3A statistics */
244 struct ia_css_3a_rgby_output *rgby_data;/**< the pointer to 3a_rgby_output[256] 244 struct ia_css_3a_rgby_output *rgby_data;/** the pointer to 3a_rgby_output[256]
245 containing the histogram */ 245 containing the histogram */
246}; 246};
247 247
248/** Histogram (Statistics for AE). 248/* Histogram (Statistics for AE).
249 * 249 *
250 * 4 histograms(r,g,b,y), 250 * 4 histograms(r,g,b,y),
251 * 256 bins for each histogram, unsigned 24bit value for each bin. 251 * 256 bins for each histogram, unsigned 24bit value for each bin.
@@ -256,10 +256,10 @@ struct ia_css_3a_statistics {
256 * ISP2: HIST2 is used. 256 * ISP2: HIST2 is used.
257 */ 257 */
258struct ia_css_3a_rgby_output { 258struct ia_css_3a_rgby_output {
259 uint32_t r; /**< Number of R of one bin of the histogram R. (u24) */ 259 uint32_t r; /** Number of R of one bin of the histogram R. (u24) */
260 uint32_t g; /**< Number of G of one bin of the histogram G. (u24) */ 260 uint32_t g; /** Number of G of one bin of the histogram G. (u24) */
261 uint32_t b; /**< Number of B of one bin of the histogram B. (u24) */ 261 uint32_t b; /** Number of B of one bin of the histogram B. (u24) */
262 uint32_t y; /**< Number of Y of one bin of the histogram Y. (u24) */ 262 uint32_t y; /** Number of Y of one bin of the histogram Y. (u24) */
263}; 263};
264 264
265#endif /* __IA_CSS_S3A_TYPES_H */ 265#endif /* __IA_CSS_S3A_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
index 8b2b56b0310b..9aa019539f47 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
@@ -22,7 +22,7 @@
22 22
23#define NUM_S3A_LS 1 23#define NUM_S3A_LS 1
24 24
25/** s3a statistics store */ 25/* s3a statistics store */
26#ifdef ISP2401 26#ifdef ISP2401
27struct ia_css_s3a_stat_ls_configuration { 27struct ia_css_s3a_stat_ls_configuration {
28 uint32_t s3a_grid_size_log2; 28 uint32_t s3a_grid_size_log2;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
index 44e3c43a5d4a..b35ac3e4009b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -32,7 +32,7 @@ ia_css_sc_dump(
32 unsigned level); 32 unsigned level);
33 33
34#ifdef ISP2401 34#ifdef ISP2401
35/** @brief Configure the shading correction. 35/* @brief Configure the shading correction.
36 * @param[out] to Parameters used in the shading correction kernel in the isp. 36 * @param[out] to Parameters used in the shading correction kernel in the isp.
37 * @param[in] from Parameters passed from the host. 37 * @param[in] from Parameters passed from the host.
38 * @param[in] size Size of the sh_css_isp_sc_isp_config structure. 38 * @param[in] size Size of the sh_css_isp_sc_isp_config structure.
@@ -45,7 +45,7 @@ ia_css_sc_config(
45 const struct ia_css_sc_configuration *from, 45 const struct ia_css_sc_configuration *from,
46 unsigned size); 46 unsigned size);
47 47
48/** @brief Configure the shading correction. 48/* @brief Configure the shading correction.
49 * @param[in] binary The binary, which has the shading correction. 49 * @param[in] binary The binary, which has the shading correction.
50 * @param[in] internal_frame_origin_x_bqs_on_sctbl 50 * @param[in] internal_frame_origin_x_bqs_on_sctbl
51 * X coordinate (in bqs) of the origin of the internal frame on the shading table. 51 * X coordinate (in bqs) of the origin of the internal frame on the shading table.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
index 5a833bc48af1..30ce499ac8cf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -15,25 +15,25 @@
15#ifndef __IA_CSS_SC_TYPES_H 15#ifndef __IA_CSS_SC_TYPES_H
16#define __IA_CSS_SC_TYPES_H 16#define __IA_CSS_SC_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Lens Shading Correction (SC) parameters. 19* CSS-API header file for Lens Shading Correction (SC) parameters.
20*/ 20*/
21 21
22 22
23/** Number of color planes in the shading table. */ 23/* Number of color planes in the shading table. */
24#define IA_CSS_SC_NUM_COLORS 4 24#define IA_CSS_SC_NUM_COLORS 4
25 25
26/** The 4 colors that a shading table consists of. 26/* The 4 colors that a shading table consists of.
27 * For each color we store a grid of values. 27 * For each color we store a grid of values.
28 */ 28 */
29enum ia_css_sc_color { 29enum ia_css_sc_color {
30 IA_CSS_SC_COLOR_GR, /**< Green on a green-red line */ 30 IA_CSS_SC_COLOR_GR, /** Green on a green-red line */
31 IA_CSS_SC_COLOR_R, /**< Red */ 31 IA_CSS_SC_COLOR_R, /** Red */
32 IA_CSS_SC_COLOR_B, /**< Blue */ 32 IA_CSS_SC_COLOR_B, /** Blue */
33 IA_CSS_SC_COLOR_GB /**< Green on a green-blue line */ 33 IA_CSS_SC_COLOR_GB /** Green on a green-blue line */
34}; 34};
35 35
36/** Lens Shading Correction table. 36/* Lens Shading Correction table.
37 * 37 *
38 * This describes the color shading artefacts 38 * This describes the color shading artefacts
39 * introduced by lens imperfections. To correct artefacts, 39 * introduced by lens imperfections. To correct artefacts,
@@ -64,39 +64,39 @@ enum ia_css_sc_color {
64 * ISP2: SC1 is used. 64 * ISP2: SC1 is used.
65 */ 65 */
66struct ia_css_shading_table { 66struct ia_css_shading_table {
67 uint32_t enable; /**< Set to false for no shading correction. 67 uint32_t enable; /** Set to false for no shading correction.
68 The data field can be NULL when enable == true */ 68 The data field can be NULL when enable == true */
69/* ------ deprecated(bz675) : from ------ */ 69/* ------ deprecated(bz675) : from ------ */
70 uint32_t sensor_width; /**< Native sensor width in pixels. */ 70 uint32_t sensor_width; /** Native sensor width in pixels. */
71 uint32_t sensor_height; /**< Native sensor height in lines. 71 uint32_t sensor_height; /** Native sensor height in lines.
72 When shading_settings.enable_shading_table_conversion is set 72 When shading_settings.enable_shading_table_conversion is set
73 as 0, sensor_width and sensor_height are NOT used. 73 as 0, sensor_width and sensor_height are NOT used.
74 These are used only in the legacy shading table conversion 74 These are used only in the legacy shading table conversion
75 in the css, when shading_settings. 75 in the css, when shading_settings.
76 enable_shading_table_conversion is set as 1. */ 76 enable_shading_table_conversion is set as 1. */
77/* ------ deprecated(bz675) : to ------ */ 77/* ------ deprecated(bz675) : to ------ */
78 uint32_t width; /**< Number of data points per line per color. 78 uint32_t width; /** Number of data points per line per color.
79 u8.0, [0,81] */ 79 u8.0, [0,81] */
80 uint32_t height; /**< Number of lines of data points per color. 80 uint32_t height; /** Number of lines of data points per color.
81 u8.0, [0,61] */ 81 u8.0, [0,61] */
82 uint32_t fraction_bits; /**< Bits of fractional part in the data 82 uint32_t fraction_bits; /** Bits of fractional part in the data
83 points. 83 points.
84 u8.0, [0,13] */ 84 u8.0, [0,13] */
85 uint16_t *data[IA_CSS_SC_NUM_COLORS]; 85 uint16_t *data[IA_CSS_SC_NUM_COLORS];
86 /**< Table data, one array for each color. 86 /** Table data, one array for each color.
87 Use ia_css_sc_color to index this array. 87 Use ia_css_sc_color to index this array.
88 u[13-fraction_bits].[fraction_bits], [0,8191] */ 88 u[13-fraction_bits].[fraction_bits], [0,8191] */
89}; 89};
90 90
91/* ------ deprecated(bz675) : from ------ */ 91/* ------ deprecated(bz675) : from ------ */
92/** Shading Correction settings. 92/* Shading Correction settings.
93 * 93 *
94 * NOTE: 94 * NOTE:
95 * This structure should be removed when the shading table conversion is 95 * This structure should be removed when the shading table conversion is
96 * removed from the css. 96 * removed from the css.
97 */ 97 */
98struct ia_css_shading_settings { 98struct ia_css_shading_settings {
99 uint32_t enable_shading_table_conversion; /**< Set to 0, 99 uint32_t enable_shading_table_conversion; /** Set to 0,
100 if the conversion of the shading table should be disabled 100 if the conversion of the shading table should be disabled
101 in the css. (default 1) 101 in the css. (default 1)
102 0: The shading table is directly sent to the isp. 102 0: The shading table is directly sent to the isp.
@@ -119,14 +119,14 @@ struct ia_css_shading_settings {
119 119
120#ifdef ISP2401 120#ifdef ISP2401
121 121
122/** Shading Correction configuration. 122/* Shading Correction configuration.
123 * 123 *
124 * NOTE: The shading table size is larger than or equal to the internal frame size. 124 * NOTE: The shading table size is larger than or equal to the internal frame size.
125 */ 125 */
126struct ia_css_sc_configuration { 126struct ia_css_sc_configuration {
127 uint32_t internal_frame_origin_x_bqs_on_sctbl; /**< Origin X (in bqs) of internal frame on shading table. */ 127 uint32_t internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
128 uint32_t internal_frame_origin_y_bqs_on_sctbl; /**< Origin Y (in bqs) of internal frame on shading table. */ 128 uint32_t internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
129 /**< NOTE: bqs = size in BQ(Bayer Quad) unit. 129 /** NOTE: bqs = size in BQ(Bayer Quad) unit.
130 1BQ means {Gr,R,B,Gb}(2x2 pixels). 130 1BQ means {Gr,R,B,Gb}(2x2 pixels).
131 Horizontal 1 bqs corresponds to horizontal 2 pixels. 131 Horizontal 1 bqs corresponds to horizontal 2 pixels.
132 Vertical 1 bqs corresponds to vertical 2 pixels. */ 132 Vertical 1 bqs corresponds to vertical 2 pixels. */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
index 295dc60b778c..031983c357e4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
@@ -15,21 +15,21 @@
15#ifndef __IA_CSS_SDIS_COMMON_TYPES_H 15#ifndef __IA_CSS_SDIS_COMMON_TYPES_H
16#define __IA_CSS_SDIS_COMMON_TYPES_H 16#define __IA_CSS_SDIS_COMMON_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for DVS statistics parameters. 19* CSS-API header file for DVS statistics parameters.
20*/ 20*/
21 21
22#include <type_support.h> 22#include <type_support.h>
23 23
24/** DVS statistics grid dimensions in number of cells. 24/* DVS statistics grid dimensions in number of cells.
25 */ 25 */
26 26
27struct ia_css_dvs_grid_dim { 27struct ia_css_dvs_grid_dim {
28 uint32_t width; /**< Width of DVS grid table in cells */ 28 uint32_t width; /** Width of DVS grid table in cells */
29 uint32_t height; /**< Height of DVS grid table in cells */ 29 uint32_t height; /** Height of DVS grid table in cells */
30}; 30};
31 31
32/** DVS statistics dimensions in number of cells for 32/* DVS statistics dimensions in number of cells for
33 * grid, coeffieicient and projection. 33 * grid, coeffieicient and projection.
34 */ 34 */
35 35
@@ -55,7 +55,7 @@ struct ia_css_sdis_info {
55 0, /* dis_deci_factor_log2 */ \ 55 0, /* dis_deci_factor_log2 */ \
56 } 56 }
57 57
58/** DVS statistics grid 58/* DVS statistics grid
59 * 59 *
60 * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes)) 60 * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
61 * SDVS2 (DVS Support for DVS ver.2 (6-axes)) 61 * SDVS2 (DVS Support for DVS ver.2 (6-axes))
@@ -63,23 +63,23 @@ struct ia_css_sdis_info {
63 * ISP2: SDVS2 is used. 63 * ISP2: SDVS2 is used.
64 */ 64 */
65struct ia_css_dvs_grid_res { 65struct ia_css_dvs_grid_res {
66 uint32_t width; /**< Width of DVS grid table. 66 uint32_t width; /** Width of DVS grid table.
67 (= Horizontal number of grid cells 67 (= Horizontal number of grid cells
68 in table, which cells have effective 68 in table, which cells have effective
69 statistics.) 69 statistics.)
70 For DVS1, this is equal to 70 For DVS1, this is equal to
71 the number of vertical statistics. */ 71 the number of vertical statistics. */
72 uint32_t aligned_width; /**< Stride of each grid line. 72 uint32_t aligned_width; /** Stride of each grid line.
73 (= Horizontal number of grid cells 73 (= Horizontal number of grid cells
74 in table, which means 74 in table, which means
75 the allocated width.) */ 75 the allocated width.) */
76 uint32_t height; /**< Height of DVS grid table. 76 uint32_t height; /** Height of DVS grid table.
77 (= Vertical number of grid cells 77 (= Vertical number of grid cells
78 in table, which cells have effective 78 in table, which cells have effective
79 statistics.) 79 statistics.)
80 For DVS1, This is equal to 80 For DVS1, This is equal to
81 the number of horizontal statistics. */ 81 the number of horizontal statistics. */
82 uint32_t aligned_height;/**< Stride of each grid column. 82 uint32_t aligned_height;/** Stride of each grid column.
83 (= Vertical number of grid cells 83 (= Vertical number of grid cells
84 in table, which means 84 in table, which means
85 the allocated height.) */ 85 the allocated height.) */
@@ -89,125 +89,125 @@ struct ia_css_dvs_grid_res {
89 * However, that implies driver I/F changes 89 * However, that implies driver I/F changes
90 */ 90 */
91struct ia_css_dvs_grid_info { 91struct ia_css_dvs_grid_info {
92 uint32_t enable; /**< DVS statistics enabled. 92 uint32_t enable; /** DVS statistics enabled.
93 0:disabled, 1:enabled */ 93 0:disabled, 1:enabled */
94 uint32_t width; /**< Width of DVS grid table. 94 uint32_t width; /** Width of DVS grid table.
95 (= Horizontal number of grid cells 95 (= Horizontal number of grid cells
96 in table, which cells have effective 96 in table, which cells have effective
97 statistics.) 97 statistics.)
98 For DVS1, this is equal to 98 For DVS1, this is equal to
99 the number of vertical statistics. */ 99 the number of vertical statistics. */
100 uint32_t aligned_width; /**< Stride of each grid line. 100 uint32_t aligned_width; /** Stride of each grid line.
101 (= Horizontal number of grid cells 101 (= Horizontal number of grid cells
102 in table, which means 102 in table, which means
103 the allocated width.) */ 103 the allocated width.) */
104 uint32_t height; /**< Height of DVS grid table. 104 uint32_t height; /** Height of DVS grid table.
105 (= Vertical number of grid cells 105 (= Vertical number of grid cells
106 in table, which cells have effective 106 in table, which cells have effective
107 statistics.) 107 statistics.)
108 For DVS1, This is equal to 108 For DVS1, This is equal to
109 the number of horizontal statistics. */ 109 the number of horizontal statistics. */
110 uint32_t aligned_height;/**< Stride of each grid column. 110 uint32_t aligned_height;/** Stride of each grid column.
111 (= Vertical number of grid cells 111 (= Vertical number of grid cells
112 in table, which means 112 in table, which means
113 the allocated height.) */ 113 the allocated height.) */
114 uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit. 114 uint32_t bqs_per_grid_cell; /** Grid cell size in BQ(Bayer Quad) unit.
115 (1BQ means {Gr,R,B,Gb}(2x2 pixels).) 115 (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
116 For DVS1, valid value is 64. 116 For DVS1, valid value is 64.
117 For DVS2, valid value is only 64, 117 For DVS2, valid value is only 64,
118 currently. */ 118 currently. */
119 uint32_t num_hor_coefs; /**< Number of horizontal coefficients. */ 119 uint32_t num_hor_coefs; /** Number of horizontal coefficients. */
120 uint32_t num_ver_coefs; /**< Number of vertical coefficients. */ 120 uint32_t num_ver_coefs; /** Number of vertical coefficients. */
121}; 121};
122 122
123/** Number of DVS statistics levels 123/* Number of DVS statistics levels
124 */ 124 */
125#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3 125#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3
126 126
127/** DVS statistics generated by accelerator global configuration 127/* DVS statistics generated by accelerator global configuration
128 */ 128 */
129struct dvs_stat_public_dvs_global_cfg { 129struct dvs_stat_public_dvs_global_cfg {
130 unsigned char kappa; 130 unsigned char kappa;
131 /**< DVS statistics global configuration - kappa */ 131 /** DVS statistics global configuration - kappa */
132 unsigned char match_shift; 132 unsigned char match_shift;
133 /**< DVS statistics global configuration - match_shift */ 133 /** DVS statistics global configuration - match_shift */
134 unsigned char ybin_mode; 134 unsigned char ybin_mode;
135 /**< DVS statistics global configuration - y binning mode */ 135 /** DVS statistics global configuration - y binning mode */
136}; 136};
137 137
138/** DVS statistics generated by accelerator level grid 138/* DVS statistics generated by accelerator level grid
139 * configuration 139 * configuration
140 */ 140 */
141struct dvs_stat_public_dvs_level_grid_cfg { 141struct dvs_stat_public_dvs_level_grid_cfg {
142 unsigned char grid_width; 142 unsigned char grid_width;
143 /**< DVS statistics grid width */ 143 /** DVS statistics grid width */
144 unsigned char grid_height; 144 unsigned char grid_height;
145 /**< DVS statistics grid height */ 145 /** DVS statistics grid height */
146 unsigned char block_width; 146 unsigned char block_width;
147 /**< DVS statistics block width */ 147 /** DVS statistics block width */
148 unsigned char block_height; 148 unsigned char block_height;
149 /**< DVS statistics block height */ 149 /** DVS statistics block height */
150}; 150};
151 151
152/** DVS statistics generated by accelerator level grid start 152/* DVS statistics generated by accelerator level grid start
153 * configuration 153 * configuration
154 */ 154 */
155struct dvs_stat_public_dvs_level_grid_start { 155struct dvs_stat_public_dvs_level_grid_start {
156 unsigned short x_start; 156 unsigned short x_start;
157 /**< DVS statistics level x start */ 157 /** DVS statistics level x start */
158 unsigned short y_start; 158 unsigned short y_start;
159 /**< DVS statistics level y start */ 159 /** DVS statistics level y start */
160 unsigned char enable; 160 unsigned char enable;
161 /**< DVS statistics level enable */ 161 /** DVS statistics level enable */
162}; 162};
163 163
164/** DVS statistics generated by accelerator level grid end 164/* DVS statistics generated by accelerator level grid end
165 * configuration 165 * configuration
166 */ 166 */
167struct dvs_stat_public_dvs_level_grid_end { 167struct dvs_stat_public_dvs_level_grid_end {
168 unsigned short x_end; 168 unsigned short x_end;
169 /**< DVS statistics level x end */ 169 /** DVS statistics level x end */
170 unsigned short y_end; 170 unsigned short y_end;
171 /**< DVS statistics level y end */ 171 /** DVS statistics level y end */
172}; 172};
173 173
174/** DVS statistics generated by accelerator Feature Extraction 174/* DVS statistics generated by accelerator Feature Extraction
175 * Region Of Interest (FE-ROI) configuration 175 * Region Of Interest (FE-ROI) configuration
176 */ 176 */
177struct dvs_stat_public_dvs_level_fe_roi_cfg { 177struct dvs_stat_public_dvs_level_fe_roi_cfg {
178 unsigned char x_start; 178 unsigned char x_start;
179 /**< DVS statistics fe-roi level x start */ 179 /** DVS statistics fe-roi level x start */
180 unsigned char y_start; 180 unsigned char y_start;
181 /**< DVS statistics fe-roi level y start */ 181 /** DVS statistics fe-roi level y start */
182 unsigned char x_end; 182 unsigned char x_end;
183 /**< DVS statistics fe-roi level x end */ 183 /** DVS statistics fe-roi level x end */
184 unsigned char y_end; 184 unsigned char y_end;
185 /**< DVS statistics fe-roi level y end */ 185 /** DVS statistics fe-roi level y end */
186}; 186};
187 187
188/** DVS statistics generated by accelerator public configuration 188/* DVS statistics generated by accelerator public configuration
189 */ 189 */
190struct dvs_stat_public_dvs_grd_cfg { 190struct dvs_stat_public_dvs_grd_cfg {
191 struct dvs_stat_public_dvs_level_grid_cfg grd_cfg; 191 struct dvs_stat_public_dvs_level_grid_cfg grd_cfg;
192 /**< DVS statistics level grid configuration */ 192 /** DVS statistics level grid configuration */
193 struct dvs_stat_public_dvs_level_grid_start grd_start; 193 struct dvs_stat_public_dvs_level_grid_start grd_start;
194 /**< DVS statistics level grid start configuration */ 194 /** DVS statistics level grid start configuration */
195 struct dvs_stat_public_dvs_level_grid_end grd_end; 195 struct dvs_stat_public_dvs_level_grid_end grd_end;
196 /**< DVS statistics level grid end configuration */ 196 /** DVS statistics level grid end configuration */
197}; 197};
198 198
199/** DVS statistics grid generated by accelerator 199/* DVS statistics grid generated by accelerator
200 */ 200 */
201struct ia_css_dvs_stat_grid_info { 201struct ia_css_dvs_stat_grid_info {
202 struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg; 202 struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg;
203 /**< DVS statistics global configuration (kappa, match, binning) */ 203 /** DVS statistics global configuration (kappa, match, binning) */
204 struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS]; 204 struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
205 /**< DVS statistics grid configuration (blocks and grids) */ 205 /** DVS statistics grid configuration (blocks and grids) */
206 struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS]; 206 struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
207 /**< DVS statistics FE ROI (region of interest) configuration */ 207 /** DVS statistics FE ROI (region of interest) configuration */
208}; 208};
209 209
210/** DVS statistics generated by accelerator default grid info 210/* DVS statistics generated by accelerator default grid info
211 */ 211 */
212#define DEFAULT_DVS_GRID_INFO { \ 212#define DEFAULT_DVS_GRID_INFO { \
213{ \ 213{ \
@@ -219,14 +219,14 @@ struct ia_css_dvs_stat_grid_info {
219} 219}
220 220
221 221
222/** Union that holds all types of DVS statistics grid info in 222/* Union that holds all types of DVS statistics grid info in
223 * CSS format 223 * CSS format
224 * */ 224 * */
225union ia_css_dvs_grid_u { 225union ia_css_dvs_grid_u {
226 struct ia_css_dvs_stat_grid_info dvs_stat_grid_info; 226 struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
227 /**< DVS statistics produced by accelerator grid info */ 227 /** DVS statistics produced by accelerator grid info */
228 struct ia_css_dvs_grid_info dvs_grid_info; 228 struct ia_css_dvs_grid_info dvs_grid_info;
229 /**< DVS (DVS1/DVS2) grid info */ 229 /** DVS (DVS1/DVS2) grid info */
230}; 230};
231 231
232#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */ 232#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
index d408b58a027d..d2ee57008fb6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
@@ -15,38 +15,38 @@
15#ifndef __IA_CSS_SDIS_TYPES_H 15#ifndef __IA_CSS_SDIS_TYPES_H
16#define __IA_CSS_SDIS_TYPES_H 16#define __IA_CSS_SDIS_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for DVS statistics parameters. 19* CSS-API header file for DVS statistics parameters.
20*/ 20*/
21 21
22/** Number of DVS coefficient types */ 22/* Number of DVS coefficient types */
23#define IA_CSS_DVS_NUM_COEF_TYPES 6 23#define IA_CSS_DVS_NUM_COEF_TYPES 6
24 24
25#ifndef PIPE_GENERATION 25#ifndef PIPE_GENERATION
26#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h" 26#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
27#endif 27#endif
28 28
29/** DVS 1.0 Coefficients. 29/* DVS 1.0 Coefficients.
30 * This structure describes the coefficients that are needed for the dvs statistics. 30 * This structure describes the coefficients that are needed for the dvs statistics.
31 */ 31 */
32 32
33struct ia_css_dvs_coefficients { 33struct ia_css_dvs_coefficients {
34 struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */ 34 struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
35 int16_t *hor_coefs; /**< the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES] 35 int16_t *hor_coefs; /** the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
36 containing the horizontal coefficients */ 36 containing the horizontal coefficients */
37 int16_t *ver_coefs; /**< the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES] 37 int16_t *ver_coefs; /** the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
38 containing the vertical coefficients */ 38 containing the vertical coefficients */
39}; 39};
40 40
41/** DVS 1.0 Statistics. 41/* DVS 1.0 Statistics.
42 * This structure describes the statistics that are generated using the provided coefficients. 42 * This structure describes the statistics that are generated using the provided coefficients.
43 */ 43 */
44 44
45struct ia_css_dvs_statistics { 45struct ia_css_dvs_statistics {
46 struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */ 46 struct ia_css_dvs_grid_info grid;/** grid info contains the dimensions of the dvs grid */
47 int32_t *hor_proj; /**< the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES] 47 int32_t *hor_proj; /** the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
48 containing the horizontal projections */ 48 containing the horizontal projections */
49 int32_t *ver_proj; /**< the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES] 49 int32_t *ver_proj; /** the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
50 containing the vertical projections */ 50 containing the vertical projections */
51}; 51};
52 52
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
index 7db7dd10fe00..2a0bc4031746 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -15,55 +15,55 @@
15#ifndef __IA_CSS_SDIS2_TYPES_H 15#ifndef __IA_CSS_SDIS2_TYPES_H
16#define __IA_CSS_SDIS2_TYPES_H 16#define __IA_CSS_SDIS2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for DVS statistics parameters. 19* CSS-API header file for DVS statistics parameters.
20*/ 20*/
21 21
22/** Number of DVS coefficient types */ 22/* Number of DVS coefficient types */
23#define IA_CSS_DVS2_NUM_COEF_TYPES 4 23#define IA_CSS_DVS2_NUM_COEF_TYPES 4
24 24
25#ifndef PIPE_GENERATION 25#ifndef PIPE_GENERATION
26#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h" 26#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
27#endif 27#endif
28 28
29/** DVS 2.0 Coefficient types. This structure contains 4 pointers to 29/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
30 * arrays that contain the coeffients for each type. 30 * arrays that contain the coeffients for each type.
31 */ 31 */
32struct ia_css_dvs2_coef_types { 32struct ia_css_dvs2_coef_types {
33 int16_t *odd_real; /**< real part of the odd coefficients*/ 33 int16_t *odd_real; /** real part of the odd coefficients*/
34 int16_t *odd_imag; /**< imaginary part of the odd coefficients*/ 34 int16_t *odd_imag; /** imaginary part of the odd coefficients*/
35 int16_t *even_real;/**< real part of the even coefficients*/ 35 int16_t *even_real;/** real part of the even coefficients*/
36 int16_t *even_imag;/**< imaginary part of the even coefficients*/ 36 int16_t *even_imag;/** imaginary part of the even coefficients*/
37}; 37};
38 38
39/** DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics. 39/* DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
40 * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real 40 * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real
41 * coefficients. 41 * coefficients.
42 */ 42 */
43struct ia_css_dvs2_coefficients { 43struct ia_css_dvs2_coefficients {
44 struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */ 44 struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */
45 struct ia_css_dvs2_coef_types hor_coefs; /**< struct with pointers that contain the horizontal coefficients */ 45 struct ia_css_dvs2_coef_types hor_coefs; /** struct with pointers that contain the horizontal coefficients */
46 struct ia_css_dvs2_coef_types ver_coefs; /**< struct with pointers that contain the vertical coefficients */ 46 struct ia_css_dvs2_coef_types ver_coefs; /** struct with pointers that contain the vertical coefficients */
47}; 47};
48 48
49/** DVS 2.0 Statistic types. This structure contains 4 pointers to 49/* DVS 2.0 Statistic types. This structure contains 4 pointers to
50 * arrays that contain the statistics for each type. 50 * arrays that contain the statistics for each type.
51 */ 51 */
52struct ia_css_dvs2_stat_types { 52struct ia_css_dvs2_stat_types {
53 int32_t *odd_real; /**< real part of the odd statistics*/ 53 int32_t *odd_real; /** real part of the odd statistics*/
54 int32_t *odd_imag; /**< imaginary part of the odd statistics*/ 54 int32_t *odd_imag; /** imaginary part of the odd statistics*/
55 int32_t *even_real;/**< real part of the even statistics*/ 55 int32_t *even_real;/** real part of the even statistics*/
56 int32_t *even_imag;/**< imaginary part of the even statistics*/ 56 int32_t *even_imag;/** imaginary part of the even statistics*/
57}; 57};
58 58
59/** DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients. 59/* DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
60 * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing 60 * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing
61 * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1] 61 * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
62 */ 62 */
63struct ia_css_dvs2_statistics { 63struct ia_css_dvs2_statistics {
64 struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */ 64 struct ia_css_dvs_grid_info grid; /** grid info contains the dimensions of the dvs grid */
65 struct ia_css_dvs2_stat_types hor_prod; /**< struct with pointers that contain the horizontal statistics */ 65 struct ia_css_dvs2_stat_types hor_prod; /** struct with pointers that contain the horizontal statistics */
66 struct ia_css_dvs2_stat_types ver_prod; /**< struct with pointers that contain the vertical statistics */ 66 struct ia_css_dvs2_stat_types ver_prod; /** struct with pointers that contain the vertical statistics */
67}; 67};
68 68
69#endif /* __IA_CSS_SDIS2_TYPES_H */ 69#endif /* __IA_CSS_SDIS2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
index cc47a50e5ad5..91ea8dd4651d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
@@ -15,13 +15,13 @@
15#ifndef __IA_CSS_TDF_TYPES_H 15#ifndef __IA_CSS_TDF_TYPES_H
16#define __IA_CSS_TDF_TYPES_H 16#define __IA_CSS_TDF_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Transform Domain Filter parameters. 19* CSS-API header file for Transform Domain Filter parameters.
20*/ 20*/
21 21
22#include "type_support.h" 22#include "type_support.h"
23 23
24/** Transform Domain Filter configuration 24/* Transform Domain Filter configuration
25 * 25 *
26 * \brief TDF public parameters. 26 * \brief TDF public parameters.
27 * \details Struct with all parameters for the TDF kernel that can be set 27 * \details Struct with all parameters for the TDF kernel that can be set
@@ -30,23 +30,23 @@
30 * ISP2.6.1: TDF is used. 30 * ISP2.6.1: TDF is used.
31 */ 31 */
32struct ia_css_tdf_config { 32struct ia_css_tdf_config {
33 int32_t thres_flat_table[64]; /**< Final optimized strength table of NR for flat region. */ 33 int32_t thres_flat_table[64]; /** Final optimized strength table of NR for flat region. */
34 int32_t thres_detail_table[64]; /**< Final optimized strength table of NR for detail region. */ 34 int32_t thres_detail_table[64]; /** Final optimized strength table of NR for detail region. */
35 int32_t epsilon_0; /**< Coefficient to control variance for dark area (for flat region). */ 35 int32_t epsilon_0; /** Coefficient to control variance for dark area (for flat region). */
36 int32_t epsilon_1; /**< Coefficient to control variance for bright area (for flat region). */ 36 int32_t epsilon_1; /** Coefficient to control variance for bright area (for flat region). */
37 int32_t eps_scale_text; /**< Epsilon scaling coefficient for texture region. */ 37 int32_t eps_scale_text; /** Epsilon scaling coefficient for texture region. */
38 int32_t eps_scale_edge; /**< Epsilon scaling coefficient for edge region. */ 38 int32_t eps_scale_edge; /** Epsilon scaling coefficient for edge region. */
39 int32_t sepa_flat; /**< Threshold to judge flat (edge < m_Flat_thre). */ 39 int32_t sepa_flat; /** Threshold to judge flat (edge < m_Flat_thre). */
40 int32_t sepa_edge; /**< Threshold to judge edge (edge > m_Edge_thre). */ 40 int32_t sepa_edge; /** Threshold to judge edge (edge > m_Edge_thre). */
41 int32_t blend_flat; /**< Blending ratio at flat region. */ 41 int32_t blend_flat; /** Blending ratio at flat region. */
42 int32_t blend_text; /**< Blending ratio at texture region. */ 42 int32_t blend_text; /** Blending ratio at texture region. */
43 int32_t blend_edge; /**< Blending ratio at edge region. */ 43 int32_t blend_edge; /** Blending ratio at edge region. */
44 int32_t shading_gain; /**< Gain of Shading control. */ 44 int32_t shading_gain; /** Gain of Shading control. */
45 int32_t shading_base_gain; /**< Base Gain of Shading control. */ 45 int32_t shading_base_gain; /** Base Gain of Shading control. */
46 int32_t local_y_gain; /**< Gain of local luminance control. */ 46 int32_t local_y_gain; /** Gain of local luminance control. */
47 int32_t local_y_base_gain; /**< Base gain of local luminance control. */ 47 int32_t local_y_base_gain; /** Base gain of local luminance control. */
48 int32_t rad_x_origin; /**< Initial x coord. for radius computation. */ 48 int32_t rad_x_origin; /** Initial x coord. for radius computation. */
49 int32_t rad_y_origin; /**< Initial y coord. for radius computation. */ 49 int32_t rad_y_origin; /** Initial y coord. for radius computation. */
50}; 50};
51 51
52#endif /* __IA_CSS_TDF_TYPES_H */ 52#endif /* __IA_CSS_TDF_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
index 135563f52174..223423f8c40b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
@@ -16,7 +16,7 @@ more details.
16#ifndef _IA_CSS_TNR3_TYPES_H 16#ifndef _IA_CSS_TNR3_TYPES_H
17#define _IA_CSS_TNR3_TYPES_H 17#define _IA_CSS_TNR3_TYPES_H
18 18
19/** @file 19/* @file
20* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel 20* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
21*/ 21*/
22 22
@@ -27,7 +27,7 @@ more details.
27 */ 27 */
28#define TNR3_NUM_SEGMENTS 3 28#define TNR3_NUM_SEGMENTS 3
29 29
30/** Temporal Noise Reduction v3 (TNR3) configuration. 30/* Temporal Noise Reduction v3 (TNR3) configuration.
31 * The parameter to this kernel is fourfold 31 * The parameter to this kernel is fourfold
32 * 1. Three piecewise linear graphs (one for each plane) with three segments 32 * 1. Three piecewise linear graphs (one for each plane) with three segments
33 * each. Each line graph has Luma values on the x axis and sigma values for 33 * each. Each line graph has Luma values on the x axis and sigma values for
@@ -44,17 +44,17 @@ more details.
44 * 4. Selection of the reference frame buffer to be used for noise reduction. 44 * 4. Selection of the reference frame buffer to be used for noise reduction.
45 */ 45 */
46struct ia_css_tnr3_kernel_config { 46struct ia_css_tnr3_kernel_config {
47 unsigned int maxfb_y; /**< Maximum Feedback Gain for Y */ 47 unsigned int maxfb_y; /** Maximum Feedback Gain for Y */
48 unsigned int maxfb_u; /**< Maximum Feedback Gain for U */ 48 unsigned int maxfb_u; /** Maximum Feedback Gain for U */
49 unsigned int maxfb_v; /**< Maximum Feedback Gain for V */ 49 unsigned int maxfb_v; /** Maximum Feedback Gain for V */
50 unsigned int round_adj_y; /**< Rounding Adjust for Y */ 50 unsigned int round_adj_y; /** Rounding Adjust for Y */
51 unsigned int round_adj_u; /**< Rounding Adjust for U */ 51 unsigned int round_adj_u; /** Rounding Adjust for U */
52 unsigned int round_adj_v; /**< Rounding Adjust for V */ 52 unsigned int round_adj_v; /** Rounding Adjust for V */
53 unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /**< Knee points */ 53 unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /** Knee points */
54 unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for Y at points Y0, Y1, Y2, Y3 */ 54 unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for Y at points Y0, Y1, Y2, Y3 */
55 unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for U at points U0, U1, U2, U3 */ 55 unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for U at points U0, U1, U2, U3 */
56 unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for V at points V0, V1, V2, V3 */ 56 unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /** Standard deviation for V at points V0, V1, V2, V3 */
57 unsigned int ref_buf_select; /**< Selection of the reference buffer */ 57 unsigned int ref_buf_select; /** Selection of the reference buffer */
58}; 58};
59 59
60#endif 60#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
index 4fd35e6ccd70..9bbc9ab2e6c0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_TNR_TYPES_H 15#ifndef __IA_CSS_TNR_TYPES_H
16#define __IA_CSS_TNR_TYPES_H 16#define __IA_CSS_TNR_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Temporal Noise Reduction (TNR) parameters. 19* CSS-API header file for Temporal Noise Reduction (TNR) parameters.
20*/ 20*/
21 21
22/** Temporal Noise Reduction (TNR) configuration. 22/* Temporal Noise Reduction (TNR) configuration.
23 * 23 *
24 * When difference between current frame and previous frame is less than or 24 * When difference between current frame and previous frame is less than or
25 * equal to threshold, TNR works and current frame is mixed 25 * equal to threshold, TNR works and current frame is mixed
@@ -36,18 +36,18 @@
36 36
37 37
38struct ia_css_tnr_config { 38struct ia_css_tnr_config {
39 ia_css_u0_16 gain; /**< Interpolation ratio of current frame 39 ia_css_u0_16 gain; /** Interpolation ratio of current frame
40 and previous frame. 40 and previous frame.
41 gain=0.0 -> previous frame is outputted. 41 gain=0.0 -> previous frame is outputted.
42 gain=1.0 -> current frame is outputted. 42 gain=1.0 -> current frame is outputted.
43 u0.16, [0,65535], 43 u0.16, [0,65535],
44 default 32768(0.5), ineffective 65535(almost 1.0) */ 44 default 32768(0.5), ineffective 65535(almost 1.0) */
45 ia_css_u0_16 threshold_y; /**< Threshold to enable interpolation of Y. 45 ia_css_u0_16 threshold_y; /** Threshold to enable interpolation of Y.
46 If difference between current frame and 46 If difference between current frame and
47 previous frame is greater than threshold_y, 47 previous frame is greater than threshold_y,
48 TNR for Y is disabled. 48 TNR for Y is disabled.
49 u0.16, [0,65535], default/ineffective 0 */ 49 u0.16, [0,65535], default/ineffective 0 */
50 ia_css_u0_16 threshold_uv; /**< Threshold to enable interpolation of 50 ia_css_u0_16 threshold_uv; /** Threshold to enable interpolation of
51 U/V. 51 U/V.
52 If difference between current frame and 52 If difference between current frame and
53 previous frame is greater than threshold_uv, 53 previous frame is greater than threshold_uv,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
index df5d37c8c946..9df4e12f6c2c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
@@ -23,9 +23,9 @@
23 23
24#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS 24#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS
25 25
26/** Viewfinder decimation */ 26/* Viewfinder decimation */
27struct sh_css_isp_vf_isp_config { 27struct sh_css_isp_vf_isp_config {
28 uint32_t vf_downscale_bits; /**< Log VF downscale value */ 28 uint32_t vf_downscale_bits; /** Log VF downscale value */
29 uint32_t enable; 29 uint32_t enable;
30 struct ia_css_frame_sp_info info; 30 struct ia_css_frame_sp_info info;
31 struct { 31 struct {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
index d8cfdfbc8c0b..e3efafa279ff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_VF_TYPES_H 15#ifndef __IA_CSS_VF_TYPES_H
16#define __IA_CSS_VF_TYPES_H 16#define __IA_CSS_VF_TYPES_H
17 17
18/** Viewfinder decimation 18/* Viewfinder decimation
19 * 19 *
20 * ISP block: vfeven_horizontal_downscale 20 * ISP block: vfeven_horizontal_downscale
21 */ 21 */
@@ -24,7 +24,7 @@
24#include <type_support.h> 24#include <type_support.h>
25 25
26struct ia_css_vf_configuration { 26struct ia_css_vf_configuration {
27 uint32_t vf_downscale_bits; /**< Log VF downscale value */ 27 uint32_t vf_downscale_bits; /** Log VF downscale value */
28 const struct ia_css_frame_info *info; 28 const struct ia_css_frame_info *info;
29}; 29};
30 30
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
index 6bcfa274be88..bf98734d057e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
@@ -15,31 +15,31 @@
15#ifndef __IA_CSS_WB_TYPES_H 15#ifndef __IA_CSS_WB_TYPES_H
16#define __IA_CSS_WB_TYPES_H 16#define __IA_CSS_WB_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for White Balance parameters. 19* CSS-API header file for White Balance parameters.
20*/ 20*/
21 21
22 22
23/** White Balance configuration (Gain Adjust). 23/* White Balance configuration (Gain Adjust).
24 * 24 *
25 * ISP block: WB1 25 * ISP block: WB1
26 * ISP1: WB1 is used. 26 * ISP1: WB1 is used.
27 * ISP2: WB1 is used. 27 * ISP2: WB1 is used.
28 */ 28 */
29struct ia_css_wb_config { 29struct ia_css_wb_config {
30 uint32_t integer_bits; /**< Common exponent of gains. 30 uint32_t integer_bits; /** Common exponent of gains.
31 u8.0, [0,3], 31 u8.0, [0,3],
32 default 1, ineffective 1 */ 32 default 1, ineffective 1 */
33 uint32_t gr; /**< Significand of Gr gain. 33 uint32_t gr; /** Significand of Gr gain.
34 u[integer_bits].[16-integer_bits], [0,65535], 34 u[integer_bits].[16-integer_bits], [0,65535],
35 default/ineffective 32768(u1.15, 1.0) */ 35 default/ineffective 32768(u1.15, 1.0) */
36 uint32_t r; /**< Significand of R gain. 36 uint32_t r; /** Significand of R gain.
37 u[integer_bits].[16-integer_bits], [0,65535], 37 u[integer_bits].[16-integer_bits], [0,65535],
38 default/ineffective 32768(u1.15, 1.0) */ 38 default/ineffective 32768(u1.15, 1.0) */
39 uint32_t b; /**< Significand of B gain. 39 uint32_t b; /** Significand of B gain.
40 u[integer_bits].[16-integer_bits], [0,65535], 40 u[integer_bits].[16-integer_bits], [0,65535],
41 default/ineffective 32768(u1.15, 1.0) */ 41 default/ineffective 32768(u1.15, 1.0) */
42 uint32_t gb; /**< Significand of Gb gain. 42 uint32_t gb; /** Significand of Gb gain.
43 u[integer_bits].[16-integer_bits], [0,65535], 43 u[integer_bits].[16-integer_bits], [0,65535],
44 default/ineffective 32768(u1.15, 1.0) */ 44 default/ineffective 32768(u1.15, 1.0) */
45}; 45};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
index 3018100f6f76..abcb531f51cc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
@@ -21,7 +21,7 @@
21#include "ia_css_xnr.host.h" 21#include "ia_css_xnr.host.h"
22 22
23const struct ia_css_xnr_config default_xnr_config = { 23const struct ia_css_xnr_config default_xnr_config = {
24 /** default threshold 6400 translates to 25 on ISP. */ 24 /* default threshold 6400 translates to 25 on ISP. */
25 6400 25 6400
26}; 26};
27 27
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
index 806c9f8f0e2e..a5caebbe2f84 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -41,7 +41,7 @@ struct sh_css_isp_xnr_vamem_params {
41}; 41};
42 42
43struct sh_css_isp_xnr_params { 43struct sh_css_isp_xnr_params {
44 /** XNR threshold. 44 /* XNR threshold.
45 * type:u0.16 but actual valid range is:[0,255] 45 * type:u0.16 but actual valid range is:[0,255]
46 * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits) 46 * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
47 * default: 25 */ 47 * default: 25 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
index 89e8b0f17e8c..d2b634211a3f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_XNR_TYPES_H 15#ifndef __IA_CSS_XNR_TYPES_H
16#define __IA_CSS_XNR_TYPES_H 16#define __IA_CSS_XNR_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Extra Noise Reduction (XNR) parameters. 19* CSS-API header file for Extra Noise Reduction (XNR) parameters.
20*/ 20*/
21 21
22/** XNR table. 22/* XNR table.
23 * 23 *
24 * NOTE: The driver does not need to set this table, 24 * NOTE: The driver does not need to set this table,
25 * because the default values are set inside the css. 25 * because the default values are set inside the css.
@@ -36,23 +36,23 @@
36 * 36 *
37 */ 37 */
38 38
39/** Number of elements in the xnr table. */ 39/* Number of elements in the xnr table. */
40#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6 40#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6
41/** Number of elements in the xnr table. */ 41/* Number of elements in the xnr table. */
42#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2) 42#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
43 43
44/** Number of elements in the xnr table. */ 44/* Number of elements in the xnr table. */
45#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6 45#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6
46/** Number of elements in the xnr table. */ 46/* Number of elements in the xnr table. */
47#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2) 47#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
48 48
49/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or 49/** IA_CSS_VAMEM_TYPE_1(ISP2300) or
50 IA_CSS_VAMEM_TYPE_2(ISP2400) */ 50 IA_CSS_VAMEM_TYPE_2(ISP2400) */
51union ia_css_xnr_data { 51union ia_css_xnr_data {
52 uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE]; 52 uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
53 /**< Coefficients table on vamem type1. u0.12, [0,4095] */ 53 /** Coefficients table on vamem type1. u0.12, [0,4095] */
54 uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE]; 54 uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
55 /**< Coefficients table on vamem type2. u0.12, [0,4095] */ 55 /** Coefficients table on vamem type2. u0.12, [0,4095] */
56}; 56};
57 57
58struct ia_css_xnr_table { 58struct ia_css_xnr_table {
@@ -61,7 +61,7 @@ struct ia_css_xnr_table {
61}; 61};
62 62
63struct ia_css_xnr_config { 63struct ia_css_xnr_config {
64 /** XNR threshold. 64 /* XNR threshold.
65 * type:u0.16 valid range:[0,65535] 65 * type:u0.16 valid range:[0,65535]
66 * default: 6400 */ 66 * default: 6400 */
67 uint16_t threshold; 67 uint16_t threshold;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
index 8f14d1080651..669200caf72e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
@@ -15,7 +15,7 @@
15#ifndef __IA_CSS_XNR3_TYPES_H 15#ifndef __IA_CSS_XNR3_TYPES_H
16#define __IA_CSS_XNR3_TYPES_H 16#define __IA_CSS_XNR3_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Extra Noise Reduction (XNR) parameters. 19* CSS-API header file for Extra Noise Reduction (XNR) parameters.
20*/ 20*/
21 21
@@ -47,12 +47,12 @@
47 * IA_CSS_XNR3_SIGMA_SCALE. 47 * IA_CSS_XNR3_SIGMA_SCALE.
48 */ 48 */
49struct ia_css_xnr3_sigma_params { 49struct ia_css_xnr3_sigma_params {
50 int y0; /**< Sigma for Y range similarity in dark area */ 50 int y0; /** Sigma for Y range similarity in dark area */
51 int y1; /**< Sigma for Y range similarity in bright area */ 51 int y1; /** Sigma for Y range similarity in bright area */
52 int u0; /**< Sigma for U range similarity in dark area */ 52 int u0; /** Sigma for U range similarity in dark area */
53 int u1; /**< Sigma for U range similarity in bright area */ 53 int u1; /** Sigma for U range similarity in bright area */
54 int v0; /**< Sigma for V range similarity in dark area */ 54 int v0; /** Sigma for V range similarity in dark area */
55 int v1; /**< Sigma for V range similarity in bright area */ 55 int v1; /** Sigma for V range similarity in bright area */
56}; 56};
57 57
58/** 58/**
@@ -64,10 +64,10 @@ struct ia_css_xnr3_sigma_params {
64 * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0. 64 * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
65 */ 65 */
66struct ia_css_xnr3_coring_params { 66struct ia_css_xnr3_coring_params {
67 int u0; /**< Coring threshold of U channel in dark area */ 67 int u0; /** Coring threshold of U channel in dark area */
68 int u1; /**< Coring threshold of U channel in bright area */ 68 int u1; /** Coring threshold of U channel in bright area */
69 int v0; /**< Coring threshold of V channel in dark area */ 69 int v0; /** Coring threshold of V channel in dark area */
70 int v1; /**< Coring threshold of V channel in bright area */ 70 int v1; /** Coring threshold of V channel in bright area */
71}; 71};
72 72
73/** 73/**
@@ -81,7 +81,7 @@ struct ia_css_xnr3_coring_params {
81 * value of 0.0 bypasses the entire xnr3 filter. 81 * value of 0.0 bypasses the entire xnr3 filter.
82 */ 82 */
83struct ia_css_xnr3_blending_params { 83struct ia_css_xnr3_blending_params {
84 int strength; /**< Blending strength */ 84 int strength; /** Blending strength */
85}; 85};
86 86
87/** 87/**
@@ -90,9 +90,9 @@ struct ia_css_xnr3_blending_params {
90 * from the CSS API. 90 * from the CSS API.
91 */ 91 */
92struct ia_css_xnr3_config { 92struct ia_css_xnr3_config {
93 struct ia_css_xnr3_sigma_params sigma; /**< XNR3 sigma parameters */ 93 struct ia_css_xnr3_sigma_params sigma; /** XNR3 sigma parameters */
94 struct ia_css_xnr3_coring_params coring; /**< XNR3 coring parameters */ 94 struct ia_css_xnr3_coring_params coring; /** XNR3 coring parameters */
95 struct ia_css_xnr3_blending_params blending; /**< XNR3 blending parameters */ 95 struct ia_css_xnr3_blending_params blending; /** XNR3 blending parameters */
96}; 96};
97 97
98#endif /* __IA_CSS_XNR3_TYPES_H */ 98#endif /* __IA_CSS_XNR3_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
index 3f46655bee57..3f8589a5a43a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_YNR_TYPES_H 15#ifndef __IA_CSS_YNR_TYPES_H
16#define __IA_CSS_YNR_TYPES_H 16#define __IA_CSS_YNR_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR). 19* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
20*/ 20*/
21 21
22/** Configuration used by Bayer Noise Reduction (BNR) and 22/* Configuration used by Bayer Noise Reduction (BNR) and
23 * YCC Noise Reduction (YNR,CNR). 23 * YCC Noise Reduction (YNR,CNR).
24 * 24 *
25 * ISP block: BNR1, YNR1, CNR1 25 * ISP block: BNR1, YNR1, CNR1
@@ -28,28 +28,28 @@
28 * BNR1,YNR2,CNR2 are used for Still. 28 * BNR1,YNR2,CNR2 are used for Still.
29 */ 29 */
30struct ia_css_nr_config { 30struct ia_css_nr_config {
31 ia_css_u0_16 bnr_gain; /**< Strength of noise reduction (BNR). 31 ia_css_u0_16 bnr_gain; /** Strength of noise reduction (BNR).
32 u0.16, [0,65535], 32 u0.16, [0,65535],
33 default 14336(0.21875), ineffective 0 */ 33 default 14336(0.21875), ineffective 0 */
34 ia_css_u0_16 ynr_gain; /**< Strength of noise reduction (YNR). 34 ia_css_u0_16 ynr_gain; /** Strength of noise reduction (YNR).
35 u0.16, [0,65535], 35 u0.16, [0,65535],
36 default 14336(0.21875), ineffective 0 */ 36 default 14336(0.21875), ineffective 0 */
37 ia_css_u0_16 direction; /**< Sensitivity of edge (BNR). 37 ia_css_u0_16 direction; /** Sensitivity of edge (BNR).
38 u0.16, [0,65535], 38 u0.16, [0,65535],
39 default 512(0.0078125), ineffective 0 */ 39 default 512(0.0078125), ineffective 0 */
40 ia_css_u0_16 threshold_cb; /**< Coring threshold for Cb (CNR). 40 ia_css_u0_16 threshold_cb; /** Coring threshold for Cb (CNR).
41 This is the same as 41 This is the same as
42 de_config.c1_coring_threshold. 42 de_config.c1_coring_threshold.
43 u0.16, [0,65535], 43 u0.16, [0,65535],
44 default 0(0), ineffective 0 */ 44 default 0(0), ineffective 0 */
45 ia_css_u0_16 threshold_cr; /**< Coring threshold for Cr (CNR). 45 ia_css_u0_16 threshold_cr; /** Coring threshold for Cr (CNR).
46 This is the same as 46 This is the same as
47 de_config.c2_coring_threshold. 47 de_config.c2_coring_threshold.
48 u0.16, [0,65535], 48 u0.16, [0,65535],
49 default 0(0), ineffective 0 */ 49 default 0(0), ineffective 0 */
50}; 50};
51 51
52/** Edge Enhancement (sharpen) configuration. 52/* Edge Enhancement (sharpen) configuration.
53 * 53 *
54 * ISP block: YEE1 54 * ISP block: YEE1
55 * ISP1: YEE1 is used. 55 * ISP1: YEE1 is used.
@@ -57,24 +57,24 @@ struct ia_css_nr_config {
57 * (YEE2 is used for Still.) 57 * (YEE2 is used for Still.)
58 */ 58 */
59struct ia_css_ee_config { 59struct ia_css_ee_config {
60 ia_css_u5_11 gain; /**< The strength of sharpness. 60 ia_css_u5_11 gain; /** The strength of sharpness.
61 u5.11, [0,65535], 61 u5.11, [0,65535],
62 default 8192(4.0), ineffective 0 */ 62 default 8192(4.0), ineffective 0 */
63 ia_css_u8_8 threshold; /**< The threshold that divides noises from 63 ia_css_u8_8 threshold; /** The threshold that divides noises from
64 edge. 64 edge.
65 u8.8, [0,65535], 65 u8.8, [0,65535],
66 default 256(1.0), ineffective 65535 */ 66 default 256(1.0), ineffective 65535 */
67 ia_css_u5_11 detail_gain; /**< The strength of sharpness in pell-mell 67 ia_css_u5_11 detail_gain; /** The strength of sharpness in pell-mell
68 area. 68 area.
69 u5.11, [0,65535], 69 u5.11, [0,65535],
70 default 2048(1.0), ineffective 0 */ 70 default 2048(1.0), ineffective 0 */
71}; 71};
72 72
73/** YNR and YEE (sharpen) configuration. 73/* YNR and YEE (sharpen) configuration.
74 */ 74 */
75struct ia_css_yee_config { 75struct ia_css_yee_config {
76 struct ia_css_nr_config nr; /**< The NR configuration. */ 76 struct ia_css_nr_config nr; /** The NR configuration. */
77 struct ia_css_ee_config ee; /**< The EE configuration. */ 77 struct ia_css_ee_config ee; /** The EE configuration. */
78}; 78};
79 79
80#endif /* __IA_CSS_YNR_TYPES_H */ 80#endif /* __IA_CSS_YNR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
index e0a0b10ac5fa..83161a24207d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
@@ -15,11 +15,11 @@
15#ifndef __IA_CSS_YNR2_TYPES_H 15#ifndef __IA_CSS_YNR2_TYPES_H
16#define __IA_CSS_YNR2_TYPES_H 16#define __IA_CSS_YNR2_TYPES_H
17 17
18/** @file 18/* @file
19* CSS-API header file for Y(Luma) Noise Reduction. 19* CSS-API header file for Y(Luma) Noise Reduction.
20*/ 20*/
21 21
22/** Y(Luma) Noise Reduction configuration. 22/* Y(Luma) Noise Reduction configuration.
23 * 23 *
24 * ISP block: YNR2 & YEE2 24 * ISP block: YNR2 & YEE2
25 * (ISP1: YNR1 and YEE1 are used.) 25 * (ISP1: YNR1 and YEE1 are used.)
@@ -27,21 +27,21 @@
27 * ISP2: YNR2 and YEE2 are used for Still. 27 * ISP2: YNR2 and YEE2 are used for Still.
28 */ 28 */
29struct ia_css_ynr_config { 29struct ia_css_ynr_config {
30 uint16_t edge_sense_gain_0; /**< Sensitivity of edge in dark area. 30 uint16_t edge_sense_gain_0; /** Sensitivity of edge in dark area.
31 u13.0, [0,8191], 31 u13.0, [0,8191],
32 default 1000, ineffective 0 */ 32 default 1000, ineffective 0 */
33 uint16_t edge_sense_gain_1; /**< Sensitivity of edge in bright area. 33 uint16_t edge_sense_gain_1; /** Sensitivity of edge in bright area.
34 u13.0, [0,8191], 34 u13.0, [0,8191],
35 default 1000, ineffective 0 */ 35 default 1000, ineffective 0 */
36 uint16_t corner_sense_gain_0; /**< Sensitivity of corner in dark area. 36 uint16_t corner_sense_gain_0; /** Sensitivity of corner in dark area.
37 u13.0, [0,8191], 37 u13.0, [0,8191],
38 default 1000, ineffective 0 */ 38 default 1000, ineffective 0 */
39 uint16_t corner_sense_gain_1; /**< Sensitivity of corner in bright area. 39 uint16_t corner_sense_gain_1; /** Sensitivity of corner in bright area.
40 u13.0, [0,8191], 40 u13.0, [0,8191],
41 default 1000, ineffective 0 */ 41 default 1000, ineffective 0 */
42}; 42};
43 43
44/** Fringe Control configuration. 44/* Fringe Control configuration.
45 * 45 *
46 * ISP block: FC2 (FC2 is used with YNR2/YEE2.) 46 * ISP block: FC2 (FC2 is used with YNR2/YEE2.)
47 * (ISP1: FC2 is not used.) 47 * (ISP1: FC2 is not used.)
@@ -49,43 +49,43 @@ struct ia_css_ynr_config {
49 * ISP2: FC2 is used for Still. 49 * ISP2: FC2 is used for Still.
50 */ 50 */
51struct ia_css_fc_config { 51struct ia_css_fc_config {
52 uint8_t gain_exp; /**< Common exponent of gains. 52 uint8_t gain_exp; /** Common exponent of gains.
53 u8.0, [0,13], 53 u8.0, [0,13],
54 default 1, ineffective 0 */ 54 default 1, ineffective 0 */
55 uint16_t coring_pos_0; /**< Coring threshold for positive edge in dark area. 55 uint16_t coring_pos_0; /** Coring threshold for positive edge in dark area.
56 u0.13, [0,8191], 56 u0.13, [0,8191],
57 default 0(0), ineffective 0 */ 57 default 0(0), ineffective 0 */
58 uint16_t coring_pos_1; /**< Coring threshold for positive edge in bright area. 58 uint16_t coring_pos_1; /** Coring threshold for positive edge in bright area.
59 u0.13, [0,8191], 59 u0.13, [0,8191],
60 default 0(0), ineffective 0 */ 60 default 0(0), ineffective 0 */
61 uint16_t coring_neg_0; /**< Coring threshold for negative edge in dark area. 61 uint16_t coring_neg_0; /** Coring threshold for negative edge in dark area.
62 u0.13, [0,8191], 62 u0.13, [0,8191],
63 default 0(0), ineffective 0 */ 63 default 0(0), ineffective 0 */
64 uint16_t coring_neg_1; /**< Coring threshold for negative edge in bright area. 64 uint16_t coring_neg_1; /** Coring threshold for negative edge in bright area.
65 u0.13, [0,8191], 65 u0.13, [0,8191],
66 default 0(0), ineffective 0 */ 66 default 0(0), ineffective 0 */
67 uint16_t gain_pos_0; /**< Gain for positive edge in dark area. 67 uint16_t gain_pos_0; /** Gain for positive edge in dark area.
68 u0.13, [0,8191], 68 u0.13, [0,8191],
69 default 4096(0.5), ineffective 0 */ 69 default 4096(0.5), ineffective 0 */
70 uint16_t gain_pos_1; /**< Gain for positive edge in bright area. 70 uint16_t gain_pos_1; /** Gain for positive edge in bright area.
71 u0.13, [0,8191], 71 u0.13, [0,8191],
72 default 4096(0.5), ineffective 0 */ 72 default 4096(0.5), ineffective 0 */
73 uint16_t gain_neg_0; /**< Gain for negative edge in dark area. 73 uint16_t gain_neg_0; /** Gain for negative edge in dark area.
74 u0.13, [0,8191], 74 u0.13, [0,8191],
75 default 4096(0.5), ineffective 0 */ 75 default 4096(0.5), ineffective 0 */
76 uint16_t gain_neg_1; /**< Gain for negative edge in bright area. 76 uint16_t gain_neg_1; /** Gain for negative edge in bright area.
77 u0.13, [0,8191], 77 u0.13, [0,8191],
78 default 4096(0.5), ineffective 0 */ 78 default 4096(0.5), ineffective 0 */
79 uint16_t crop_pos_0; /**< Limit for positive edge in dark area. 79 uint16_t crop_pos_0; /** Limit for positive edge in dark area.
80 u0.13, [0,8191], 80 u0.13, [0,8191],
81 default/ineffective 8191(almost 1.0) */ 81 default/ineffective 8191(almost 1.0) */
82 uint16_t crop_pos_1; /**< Limit for positive edge in bright area. 82 uint16_t crop_pos_1; /** Limit for positive edge in bright area.
83 u0.13, [0,8191], 83 u0.13, [0,8191],
84 default/ineffective 8191(almost 1.0) */ 84 default/ineffective 8191(almost 1.0) */
85 int16_t crop_neg_0; /**< Limit for negative edge in dark area. 85 int16_t crop_neg_0; /** Limit for negative edge in dark area.
86 s0.13, [-8192,0], 86 s0.13, [-8192,0],
87 default/ineffective -8192(-1.0) */ 87 default/ineffective -8192(-1.0) */
88 int16_t crop_neg_1; /**< Limit for negative edge in bright area. 88 int16_t crop_neg_1; /** Limit for negative edge in bright area.
89 s0.13, [-8192,0], 89 s0.13, [-8192,0],
90 default/ineffective -8192(-1.0) */ 90 default/ineffective -8192(-1.0) */
91}; 91};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
index 63a8703c9c44..c9ff0cb2493a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
@@ -24,7 +24,7 @@
24 */ 24 */
25#define NUM_YUV_LS 2 25#define NUM_YUV_LS 2
26 26
27/** YUV load/store */ 27/* YUV load/store */
28struct sh_css_isp_yuv_ls_isp_config { 28struct sh_css_isp_yuv_ls_isp_config {
29 unsigned base_address[NUM_YUV_LS]; 29 unsigned base_address[NUM_YUV_LS];
30 unsigned width[NUM_YUV_LS]; 30 unsigned width[NUM_YUV_LS];
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
index e814f1bf19f7..6512a1ceb9d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
@@ -1,4 +1,4 @@
1/** 1/*
2Support for Intel Camera Imaging ISP subsystem. 2Support for Intel Camera Imaging ISP subsystem.
3Copyright (c) 2010 - 2015, Intel Corporation. 3Copyright (c) 2010 - 2015, Intel Corporation.
4 4
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
index c65194619a34..5a58abe2b233 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
@@ -269,7 +269,7 @@ enum ia_css_err
269ia_css_binary_find(struct ia_css_binary_descr *descr, 269ia_css_binary_find(struct ia_css_binary_descr *descr,
270 struct ia_css_binary *binary); 270 struct ia_css_binary *binary);
271 271
272/** @brief Get the shading information of the specified shading correction type. 272/* @brief Get the shading information of the specified shading correction type.
273 * 273 *
274 * @param[in] binary: The isp binary which has the shading correction. 274 * @param[in] binary: The isp binary which has the shading correction.
275 * @param[in] type: The shading correction type. 275 * @param[in] type: The shading correction type.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
index e028e460ae4c..295e07049393 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -972,7 +972,7 @@ ia_css_binary_uninit(void)
972 return IA_CSS_SUCCESS; 972 return IA_CSS_SUCCESS;
973} 973}
974 974
975/** @brief Compute decimation factor for 3A statistics and shading correction. 975/* @brief Compute decimation factor for 3A statistics and shading correction.
976 * 976 *
977 * @param[in] width Frame width in pixels. 977 * @param[in] width Frame width in pixels.
978 * @param[in] height Frame height in pixels. 978 * @param[in] height Frame height in pixels.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
index 42d9a8508858..e50d9f2e2609 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -152,7 +152,7 @@ void ia_css_queue_map(
152 unmap_buffer_type_to_queue_id(thread_id, buf_type); 152 unmap_buffer_type_to_queue_id(thread_id, buf_type);
153} 153}
154 154
155/** 155/*
156 * @brief Query the internal queue ID. 156 * @brief Query the internal queue ID.
157 */ 157 */
158bool ia_css_query_internal_queue_id( 158bool ia_css_query_internal_queue_id(
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
index 3c8dcfd4bbc6..4b28b2a0863a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -54,21 +54,21 @@ extern unsigned int ia_css_debug_trace_level;
54 * Values can be combined to dump a combination of sets. 54 * Values can be combined to dump a combination of sets.
55 */ 55 */
56enum ia_css_debug_enable_param_dump { 56enum ia_css_debug_enable_param_dump {
57 IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /**< FPN table */ 57 IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /** FPN table */
58 IA_CSS_DEBUG_DUMP_OB = 1 << 1, /**< OB table */ 58 IA_CSS_DEBUG_DUMP_OB = 1 << 1, /** OB table */
59 IA_CSS_DEBUG_DUMP_SC = 1 << 2, /**< Shading table */ 59 IA_CSS_DEBUG_DUMP_SC = 1 << 2, /** Shading table */
60 IA_CSS_DEBUG_DUMP_WB = 1 << 3, /**< White balance */ 60 IA_CSS_DEBUG_DUMP_WB = 1 << 3, /** White balance */
61 IA_CSS_DEBUG_DUMP_DP = 1 << 4, /**< Defect Pixel */ 61 IA_CSS_DEBUG_DUMP_DP = 1 << 4, /** Defect Pixel */
62 IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /**< Bayer Noise Reductions */ 62 IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /** Bayer Noise Reductions */
63 IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /**< 3A Statistics */ 63 IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /** 3A Statistics */
64 IA_CSS_DEBUG_DUMP_DE = 1 << 7, /**< De Mosaicing */ 64 IA_CSS_DEBUG_DUMP_DE = 1 << 7, /** De Mosaicing */
65 IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /**< Luma Noise Reduction */ 65 IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /** Luma Noise Reduction */
66 IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /**< Color Space Conversion */ 66 IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /** Color Space Conversion */
67 IA_CSS_DEBUG_DUMP_GC = 1 << 10, /**< Gamma Correction */ 67 IA_CSS_DEBUG_DUMP_GC = 1 << 10, /** Gamma Correction */
68 IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /**< Temporal Noise Reduction */ 68 IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /** Temporal Noise Reduction */
69 IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /**< Advanced Noise Reduction */ 69 IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /** Advanced Noise Reduction */
70 IA_CSS_DEBUG_DUMP_CE = 1 << 13, /**< Chroma Enhancement */ 70 IA_CSS_DEBUG_DUMP_CE = 1 << 13, /** Chroma Enhancement */
71 IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /**< Dump all device parameters */ 71 IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /** Dump all device parameters */
72}; 72};
73 73
74#define IA_CSS_ERROR(fmt, ...) \ 74#define IA_CSS_ERROR(fmt, ...) \
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
index 0fa7cb2423d8..dd1127a21494 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
@@ -1617,7 +1617,7 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
1617 1617
1618#elif SP_DEBUG == SP_DEBUG_TRACE 1618#elif SP_DEBUG == SP_DEBUG_TRACE
1619 1619
1620/** 1620/*
1621 * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will 1621 * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
1622 * me mapped on the file name string. 1622 * me mapped on the file name string.
1623 * 1623 *
@@ -2267,7 +2267,7 @@ void ia_css_debug_dump_debug_info(const char *context)
2267 return; 2267 return;
2268} 2268}
2269 2269
2270/** this function is for debug use, it can make SP go to sleep 2270/* this function is for debug use, it can make SP go to sleep
2271 state after each frame, then user can dump the stable SP dmem. 2271 state after each frame, then user can dump the stable SP dmem.
2272 this function can be called after ia_css_start_sp() 2272 this function can be called after ia_css_start_sp()
2273 and before sh_css_init_buffer_queues() 2273 and before sh_css_init_buffer_queues()
@@ -2526,7 +2526,7 @@ void ia_css_debug_dump_ddr_debug_queue(void)
2526} 2526}
2527*/ 2527*/
2528 2528
2529/** 2529/*
2530 * @brief Initialize the debug mode. 2530 * @brief Initialize the debug mode.
2531 * Refer to "ia_css_debug.h" for more details. 2531 * Refer to "ia_css_debug.h" for more details.
2532 */ 2532 */
@@ -2537,7 +2537,7 @@ bool ia_css_debug_mode_init(void)
2537 return rc; 2537 return rc;
2538} 2538}
2539 2539
2540/** 2540/*
2541 * @brief Disable the DMA channel. 2541 * @brief Disable the DMA channel.
2542 * Refer to "ia_css_debug.h" for more details. 2542 * Refer to "ia_css_debug.h" for more details.
2543 */ 2543 */
@@ -2552,7 +2552,7 @@ ia_css_debug_mode_disable_dma_channel(int dma_id,
2552 return rc; 2552 return rc;
2553} 2553}
2554 2554
2555/** 2555/*
2556 * @brief Enable the DMA channel. 2556 * @brief Enable the DMA channel.
2557 * Refer to "ia_css_debug.h" for more details. 2557 * Refer to "ia_css_debug.h" for more details.
2558 */ 2558 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
index 2698c3e1adb0..239c06730bf4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
@@ -52,7 +52,7 @@ more details.
52 52
53#include "ia_css_queue.h" /* host_sp_enqueue_XXX */ 53#include "ia_css_queue.h" /* host_sp_enqueue_XXX */
54#include "ia_css_event.h" /* ia_css_event_encode */ 54#include "ia_css_event.h" /* ia_css_event_encode */
55/** 55/*
56 * @brief Encode the information into the software-event. 56 * @brief Encode the information into the software-event.
57 * Refer to "sw_event_public.h" for details. 57 * Refer to "sw_event_public.h" for details.
58 */ 58 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
index 56d6858890ec..913a4bf7a34f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
@@ -37,7 +37,7 @@ int ia_css_eventq_recv(
37 return error; 37 return error;
38} 38}
39 39
40/** 40/*
41 * @brief The Host sends the event to the SP. 41 * @brief The Host sends the event to the SP.
42 * Refer to "sh_css_sp.h" for details. 42 * Refer to "sh_css_sp.h" for details.
43 */ 43 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
index c7e07b79f4e5..89ad8080ceb1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
@@ -41,7 +41,7 @@ more details.
41/********************************************************************* 41/*********************************************************************
42**** Frame INFO APIs 42**** Frame INFO APIs
43**********************************************************************/ 43**********************************************************************/
44/** @brief Sets the given width and alignment to the frame info 44/* @brief Sets the given width and alignment to the frame info
45 * 45 *
46 * @param 46 * @param
47 * @param[in] info The info to which parameters would set 47 * @param[in] info The info to which parameters would set
@@ -53,7 +53,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
53 unsigned int width, 53 unsigned int width,
54 unsigned int min_padded_width); 54 unsigned int min_padded_width);
55 55
56/** @brief Sets the given format to the frame info 56/* @brief Sets the given format to the frame info
57 * 57 *
58 * @param 58 * @param
59 * @param[in] info The info to which parameters would set 59 * @param[in] info The info to which parameters would set
@@ -63,7 +63,7 @@ void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
63void ia_css_frame_info_set_format(struct ia_css_frame_info *info, 63void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
64 enum ia_css_frame_format format); 64 enum ia_css_frame_format format);
65 65
66/** @brief Sets the frame info with the given parameters 66/* @brief Sets the frame info with the given parameters
67 * 67 *
68 * @param 68 * @param
69 * @param[in] info The info to which parameters would set 69 * @param[in] info The info to which parameters would set
@@ -79,7 +79,7 @@ void ia_css_frame_info_init(struct ia_css_frame_info *info,
79 enum ia_css_frame_format format, 79 enum ia_css_frame_format format,
80 unsigned int aligned); 80 unsigned int aligned);
81 81
82/** @brief Checks whether 2 frame infos has the same resolution 82/* @brief Checks whether 2 frame infos has the same resolution
83 * 83 *
84 * @param 84 * @param
85 * @param[in] frame_a The first frame to be compared 85 * @param[in] frame_a The first frame to be compared
@@ -90,7 +90,7 @@ bool ia_css_frame_info_is_same_resolution(
90 const struct ia_css_frame_info *info_a, 90 const struct ia_css_frame_info *info_a,
91 const struct ia_css_frame_info *info_b); 91 const struct ia_css_frame_info *info_b);
92 92
93/** @brief Check the frame info is valid 93/* @brief Check the frame info is valid
94 * 94 *
95 * @param 95 * @param
96 * @param[in] info The frame attributes to be initialized 96 * @param[in] info The frame attributes to be initialized
@@ -102,7 +102,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
102**** Frame APIs 102**** Frame APIs
103**********************************************************************/ 103**********************************************************************/
104 104
105/** @brief Initialize the plane depending on the frame type 105/* @brief Initialize the plane depending on the frame type
106 * 106 *
107 * @param 107 * @param
108 * @param[in] frame The frame attributes to be initialized 108 * @param[in] frame The frame attributes to be initialized
@@ -110,7 +110,7 @@ enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
110 */ 110 */
111enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame); 111enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
112 112
113/** @brief Free an array of frames 113/* @brief Free an array of frames
114 * 114 *
115 * @param 115 * @param
116 * @param[in] num_frames The number of frames to be freed in the array 116 * @param[in] num_frames The number of frames to be freed in the array
@@ -120,7 +120,7 @@ enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
120void ia_css_frame_free_multiple(unsigned int num_frames, 120void ia_css_frame_free_multiple(unsigned int num_frames,
121 struct ia_css_frame **frames_array); 121 struct ia_css_frame **frames_array);
122 122
123/** @brief Allocate a CSS frame structure of given size in bytes.. 123/* @brief Allocate a CSS frame structure of given size in bytes..
124 * 124 *
125 * @param frame The allocated frame. 125 * @param frame The allocated frame.
126 * @param[in] size_bytes The frame size in bytes. 126 * @param[in] size_bytes The frame size in bytes.
@@ -135,7 +135,7 @@ enum ia_css_err ia_css_frame_allocate_with_buffer_size(
135 const unsigned int size_bytes, 135 const unsigned int size_bytes,
136 const bool contiguous); 136 const bool contiguous);
137 137
138/** @brief Check whether 2 frames are same type 138/* @brief Check whether 2 frames are same type
139 * 139 *
140 * @param 140 * @param
141 * @param[in] frame_a The first frame to be compared 141 * @param[in] frame_a The first frame to be compared
@@ -146,7 +146,7 @@ bool ia_css_frame_is_same_type(
146 const struct ia_css_frame *frame_a, 146 const struct ia_css_frame *frame_a,
147 const struct ia_css_frame *frame_b); 147 const struct ia_css_frame *frame_b);
148 148
149/** @brief Configure a dma port from frame info 149/* @brief Configure a dma port from frame info
150 * 150 *
151 * @param 151 * @param
152 * @param[in] config The DAM port configuration 152 * @param[in] config The DAM port configuration
@@ -158,7 +158,7 @@ void ia_css_dma_configure_from_info(
158 const struct ia_css_frame_info *info); 158 const struct ia_css_frame_info *info);
159 159
160#ifdef ISP2401 160#ifdef ISP2401
161/** @brief Finds the cropping resolution 161/* @brief Finds the cropping resolution
162 * This function finds the maximum cropping resolution in an input image keeping 162 * This function finds the maximum cropping resolution in an input image keeping
163 * the aspect ratio for the given output resolution.Calculates the coordinates 163 * the aspect ratio for the given output resolution.Calculates the coordinates
164 * for cropping from the center and returns the starting pixel location of the 164 * for cropping from the center and returns the starting pixel location of the
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
index f1a943cf04c0..5faa89ad8a23 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
index 11d3995ba0db..adefa57820a4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
index d9a5f3e9283a..8dc74927e9a2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
index 8e651b80345a..2283dd1c1c9b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
@@ -53,7 +53,7 @@ enum ia_css_param_class {
53}; 53};
54#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1) 54#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
55 55
56/** ISP parameter descriptor */ 56/* ISP parameter descriptor */
57struct ia_css_isp_parameter { 57struct ia_css_isp_parameter {
58 uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */ 58 uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */
59 uint32_t size; /* Disabled if 0 */ 59 uint32_t size; /* Disabled if 0 */
@@ -77,10 +77,10 @@ struct ia_css_isp_param_isp_segments {
77 77
78/* Memory offsets in binary info */ 78/* Memory offsets in binary info */
79struct ia_css_isp_param_memory_offsets { 79struct ia_css_isp_param_memory_offsets {
80 uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /**< offset wrt hdr in bytes */ 80 uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /** offset wrt hdr in bytes */
81}; 81};
82 82
83/** Offsets for ISP kernel parameters per isp memory. 83/* Offsets for ISP kernel parameters per isp memory.
84 * Only relevant for standard ISP binaries, not ACC or SP. 84 * Only relevant for standard ISP binaries, not ACC or SP.
85 */ 85 */
86union ia_css_all_memory_offsets { 86union ia_css_all_memory_offsets {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
index 832d9e16edeb..f793ce125f02 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
index 02bf908d94e6..4cf2defe9ef0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
@@ -44,7 +44,7 @@ more details.
44 * Virtual Input System. (Input System 2401) 44 * Virtual Input System. (Input System 2401)
45 */ 45 */
46typedef input_system_cfg_t ia_css_isys_descr_t; 46typedef input_system_cfg_t ia_css_isys_descr_t;
47/** end of Virtual Input System */ 47/* end of Virtual Input System */
48#endif 48#endif
49 49
50#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) 50#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
@@ -112,7 +112,7 @@ unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
112 112
113#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */ 113#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
114 114
115/** @brief Translate format and compression to format type. 115/* @brief Translate format and compression to format type.
116 * 116 *
117 * @param[in] input_format The input format. 117 * @param[in] input_format The input format.
118 * @param[in] compression The compression scheme. 118 * @param[in] compression The compression scheme.
@@ -195,7 +195,7 @@ extern void ia_css_isys_stream2mmio_sid_rmgr_release(
195 stream2mmio_ID_t stream2mmio, 195 stream2mmio_ID_t stream2mmio,
196 stream2mmio_sid_ID_t *sid); 196 stream2mmio_sid_ID_t *sid);
197 197
198/** end of Virtual Input System */ 198/* end of Virtual Input System */
199#endif 199#endif
200 200
201#endif /* __IA_CSS_ISYS_H__ */ 201#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
index d1d4f79c00f1..3b04dc51335a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
index faef97672eac..d8c3b75d7fac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17 * Support for Intel Camera Imaging ISP subsystem. 17 * Support for Intel Camera Imaging ISP subsystem.
18 * Copyright (c) 2010 - 2015, Intel Corporation. 18 * Copyright (c) 2010 - 2015, Intel Corporation.
19 * 19 *
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
index 5032627342d9..4def4a542b7d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
index 239ef310bdeb..4122084fd237 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
index a93c7f44ff12..222b294c0ab0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
index 46a157f64343..70f6cb5e5918 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
index 0f1e8a2f6b10..90922a7acefd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
@@ -166,7 +166,7 @@ static int32_t calculate_stride(
166 bool raw_packed, 166 bool raw_packed,
167 int32_t align_in_bytes); 167 int32_t align_in_bytes);
168 168
169/** end of Forwarded Declaration */ 169/* end of Forwarded Declaration */
170 170
171/************************************************** 171/**************************************************
172 * 172 *
@@ -292,7 +292,7 @@ ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
292 return rc; 292 return rc;
293} 293}
294 294
295/** end of Public Methods */ 295/* end of Public Methods */
296 296
297/************************************************** 297/**************************************************
298 * 298 *
@@ -894,5 +894,5 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
894 894
895 return packet_type; 895 return packet_type;
896} 896}
897/** end of Private Methods */ 897/* end of Private Methods */
898#endif 898#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
index 90646f5f8885..e64936e2d46e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
@@ -103,7 +103,7 @@ struct ia_css_pipeline_stage_desc {
103 struct ia_css_frame *vf_frame; 103 struct ia_css_frame *vf_frame;
104}; 104};
105 105
106/** @brief initialize the pipeline module 106/* @brief initialize the pipeline module
107 * 107 *
108 * @return None 108 * @return None
109 * 109 *
@@ -112,7 +112,7 @@ struct ia_css_pipeline_stage_desc {
112 */ 112 */
113void ia_css_pipeline_init(void); 113void ia_css_pipeline_init(void);
114 114
115/** @brief initialize the pipeline structure with default values 115/* @brief initialize the pipeline structure with default values
116 * 116 *
117 * @param[out] pipeline structure to be initialized with defaults 117 * @param[out] pipeline structure to be initialized with defaults
118 * @param[in] pipe_id 118 * @param[in] pipe_id
@@ -129,7 +129,7 @@ enum ia_css_err ia_css_pipeline_create(
129 unsigned int pipe_num, 129 unsigned int pipe_num,
130 unsigned int dvs_frame_delay); 130 unsigned int dvs_frame_delay);
131 131
132/** @brief destroy a pipeline 132/* @brief destroy a pipeline
133 * 133 *
134 * @param[in] pipeline 134 * @param[in] pipeline
135 * @return None 135 * @return None
@@ -138,7 +138,7 @@ enum ia_css_err ia_css_pipeline_create(
138void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline); 138void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
139 139
140 140
141/** @brief Starts a pipeline 141/* @brief Starts a pipeline
142 * 142 *
143 * @param[in] pipe_id 143 * @param[in] pipe_id
144 * @param[in] pipeline 144 * @param[in] pipeline
@@ -148,7 +148,7 @@ void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
148void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id, 148void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
149 struct ia_css_pipeline *pipeline); 149 struct ia_css_pipeline *pipeline);
150 150
151/** @brief Request to stop a pipeline 151/* @brief Request to stop a pipeline
152 * 152 *
153 * @param[in] pipeline 153 * @param[in] pipeline
154 * @return IA_CSS_SUCCESS or error code upon error. 154 * @return IA_CSS_SUCCESS or error code upon error.
@@ -156,7 +156,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
156 */ 156 */
157enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline); 157enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
158 158
159/** @brief Check whether pipeline has stopped 159/* @brief Check whether pipeline has stopped
160 * 160 *
161 * @param[in] pipeline 161 * @param[in] pipeline
162 * @return true if the pipeline has stopped 162 * @return true if the pipeline has stopped
@@ -164,7 +164,7 @@ enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
164 */ 164 */
165bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe); 165bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
166 166
167/** @brief clean all the stages pipeline and make it as new 167/* @brief clean all the stages pipeline and make it as new
168 * 168 *
169 * @param[in] pipeline 169 * @param[in] pipeline
170 * @return None 170 * @return None
@@ -172,7 +172,7 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
172 */ 172 */
173void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline); 173void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
174 174
175/** @brief Add a stage to pipeline. 175/* @brief Add a stage to pipeline.
176 * 176 *
177 * @param pipeline Pointer to the pipeline to be added to. 177 * @param pipeline Pointer to the pipeline to be added to.
178 * @param[in] stage_desc The description of the stage 178 * @param[in] stage_desc The description of the stage
@@ -188,7 +188,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
188 struct ia_css_pipeline_stage_desc *stage_desc, 188 struct ia_css_pipeline_stage_desc *stage_desc,
189 struct ia_css_pipeline_stage **stage); 189 struct ia_css_pipeline_stage **stage);
190 190
191/** @brief Finalize the stages in a pipeline 191/* @brief Finalize the stages in a pipeline
192 * 192 *
193 * @param pipeline Pointer to the pipeline to be added to. 193 * @param pipeline Pointer to the pipeline to be added to.
194 * @return None 194 * @return None
@@ -198,7 +198,7 @@ enum ia_css_err ia_css_pipeline_create_and_add_stage(
198void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline, 198void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
199 bool continuous); 199 bool continuous);
200 200
201/** @brief gets a stage from the pipeline 201/* @brief gets a stage from the pipeline
202 * 202 *
203 * @param[in] pipeline 203 * @param[in] pipeline
204 * @return IA_CSS_SUCCESS or error code upon error. 204 * @return IA_CSS_SUCCESS or error code upon error.
@@ -208,7 +208,7 @@ enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
208 int mode, 208 int mode,
209 struct ia_css_pipeline_stage **stage); 209 struct ia_css_pipeline_stage **stage);
210 210
211/** @brief Gets a pipeline stage corresponding Firmware handle from the pipeline 211/* @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
212 * 212 *
213 * @param[in] pipeline 213 * @param[in] pipeline
214 * @param[in] fw_handle 214 * @param[in] fw_handle
@@ -221,7 +221,7 @@ enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeli
221 uint32_t fw_handle, 221 uint32_t fw_handle,
222 struct ia_css_pipeline_stage **stage); 222 struct ia_css_pipeline_stage **stage);
223 223
224/** @brief Gets the Firmware handle correponding the stage num from the pipeline 224/* @brief Gets the Firmware handle correponding the stage num from the pipeline
225 * 225 *
226 * @param[in] pipeline 226 * @param[in] pipeline
227 * @param[in] stage_num 227 * @param[in] stage_num
@@ -234,7 +234,7 @@ enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeli
234 uint32_t stage_num, 234 uint32_t stage_num,
235 uint32_t *fw_handle); 235 uint32_t *fw_handle);
236 236
237/** @brief gets the output stage from the pipeline 237/* @brief gets the output stage from the pipeline
238 * 238 *
239 * @param[in] pipeline 239 * @param[in] pipeline
240 * @return IA_CSS_SUCCESS or error code upon error. 240 * @return IA_CSS_SUCCESS or error code upon error.
@@ -245,7 +245,7 @@ enum ia_css_err ia_css_pipeline_get_output_stage(
245 int mode, 245 int mode,
246 struct ia_css_pipeline_stage **stage); 246 struct ia_css_pipeline_stage **stage);
247 247
248/** @brief Checks whether the pipeline uses params 248/* @brief Checks whether the pipeline uses params
249 * 249 *
250 * @param[in] pipeline 250 * @param[in] pipeline
251 * @return true if the pipeline uses params 251 * @return true if the pipeline uses params
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
index 62d13978475d..8f93d29d1c51 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
@@ -114,7 +114,7 @@ void ia_css_pipeline_map(unsigned int pipe_num, bool map)
114 IA_CSS_LEAVE_PRIVATE("void"); 114 IA_CSS_LEAVE_PRIVATE("void");
115} 115}
116 116
117/** @brief destroy a pipeline 117/* @brief destroy a pipeline
118 * 118 *
119 * @param[in] pipeline 119 * @param[in] pipeline
120 * @return None 120 * @return None
@@ -187,7 +187,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
187 "ia_css_pipeline_start() leave: return_void\n"); 187 "ia_css_pipeline_start() leave: return_void\n");
188} 188}
189 189
190/** 190/*
191 * @brief Query the SP thread ID. 191 * @brief Query the SP thread ID.
192 * Refer to "sh_css_internal.h" for details. 192 * Refer to "sh_css_internal.h" for details.
193 */ 193 */
@@ -285,7 +285,7 @@ void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
285 IA_CSS_LEAVE_PRIVATE("void"); 285 IA_CSS_LEAVE_PRIVATE("void");
286} 286}
287 287
288/** @brief Add a stage to pipeline. 288/* @brief Add a stage to pipeline.
289 * 289 *
290 * @param pipeline Pointer to the pipeline to be added to. 290 * @param pipeline Pointer to the pipeline to be added to.
291 * @param[in] stage_desc The description of the stage 291 * @param[in] stage_desc The description of the stage
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
index e50a0f813753..aaf2e247cafb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
@@ -51,7 +51,7 @@ typedef struct ia_css_queue ia_css_queue_t;
51/***************************************************************************** 51/*****************************************************************************
52 * Queue Public APIs 52 * Queue Public APIs
53 *****************************************************************************/ 53 *****************************************************************************/
54/** @brief Initialize a local queue instance. 54/* @brief Initialize a local queue instance.
55 * 55 *
56 * @param[out] qhandle. Handle to queue instance for use with API 56 * @param[out] qhandle. Handle to queue instance for use with API
57 * @param[in] desc. Descriptor with queue properties filled-in 57 * @param[in] desc. Descriptor with queue properties filled-in
@@ -63,7 +63,7 @@ extern int ia_css_queue_local_init(
63 ia_css_queue_t *qhandle, 63 ia_css_queue_t *qhandle,
64 ia_css_queue_local_t *desc); 64 ia_css_queue_local_t *desc);
65 65
66/** @brief Initialize a remote queue instance 66/* @brief Initialize a remote queue instance
67 * 67 *
68 * @param[out] qhandle. Handle to queue instance for use with API 68 * @param[out] qhandle. Handle to queue instance for use with API
69 * @param[in] desc. Descriptor with queue properties filled-in 69 * @param[in] desc. Descriptor with queue properties filled-in
@@ -74,7 +74,7 @@ extern int ia_css_queue_remote_init(
74 ia_css_queue_t *qhandle, 74 ia_css_queue_t *qhandle,
75 ia_css_queue_remote_t *desc); 75 ia_css_queue_remote_t *desc);
76 76
77/** @brief Uninitialize a queue instance 77/* @brief Uninitialize a queue instance
78 * 78 *
79 * @param[in] qhandle. Handle to queue instance 79 * @param[in] qhandle. Handle to queue instance
80 * @return 0 - Successful uninit. 80 * @return 0 - Successful uninit.
@@ -83,7 +83,7 @@ extern int ia_css_queue_remote_init(
83extern int ia_css_queue_uninit( 83extern int ia_css_queue_uninit(
84 ia_css_queue_t *qhandle); 84 ia_css_queue_t *qhandle);
85 85
86/** @brief Enqueue an item in the queue instance 86/* @brief Enqueue an item in the queue instance
87 * 87 *
88 * @param[in] qhandle. Handle to queue instance 88 * @param[in] qhandle. Handle to queue instance
89 * @param[in] item. Object to be enqueued. 89 * @param[in] item. Object to be enqueued.
@@ -96,7 +96,7 @@ extern int ia_css_queue_enqueue(
96 ia_css_queue_t *qhandle, 96 ia_css_queue_t *qhandle,
97 uint32_t item); 97 uint32_t item);
98 98
99/** @brief Dequeue an item from the queue instance 99/* @brief Dequeue an item from the queue instance
100 * 100 *
101 * @param[in] qhandle. Handle to queue instance 101 * @param[in] qhandle. Handle to queue instance
102 * @param[out] item. Object to be dequeued into this item. 102 * @param[out] item. Object to be dequeued into this item.
@@ -110,7 +110,7 @@ extern int ia_css_queue_dequeue(
110 ia_css_queue_t *qhandle, 110 ia_css_queue_t *qhandle,
111 uint32_t *item); 111 uint32_t *item);
112 112
113/** @brief Check if the queue is empty 113/* @brief Check if the queue is empty
114 * 114 *
115 * @param[in] qhandle. Handle to queue instance 115 * @param[in] qhandle. Handle to queue instance
116 * @param[in] is_empty True if empty, False if not. 116 * @param[in] is_empty True if empty, False if not.
@@ -123,7 +123,7 @@ extern int ia_css_queue_is_empty(
123 ia_css_queue_t *qhandle, 123 ia_css_queue_t *qhandle,
124 bool *is_empty); 124 bool *is_empty);
125 125
126/** @brief Check if the queue is full 126/* @brief Check if the queue is full
127 * 127 *
128 * @param[in] qhandle. Handle to queue instance 128 * @param[in] qhandle. Handle to queue instance
129 * @param[in] is_full True if Full, False if not. 129 * @param[in] is_full True if Full, False if not.
@@ -136,7 +136,7 @@ extern int ia_css_queue_is_full(
136 ia_css_queue_t *qhandle, 136 ia_css_queue_t *qhandle,
137 bool *is_full); 137 bool *is_full);
138 138
139/** @brief Get used space in the queue 139/* @brief Get used space in the queue
140 * 140 *
141 * @param[in] qhandle. Handle to queue instance 141 * @param[in] qhandle. Handle to queue instance
142 * @param[in] size Number of available elements in the queue 142 * @param[in] size Number of available elements in the queue
@@ -148,7 +148,7 @@ extern int ia_css_queue_get_used_space(
148 ia_css_queue_t *qhandle, 148 ia_css_queue_t *qhandle,
149 uint32_t *size); 149 uint32_t *size);
150 150
151/** @brief Get free space in the queue 151/* @brief Get free space in the queue
152 * 152 *
153 * @param[in] qhandle. Handle to queue instance 153 * @param[in] qhandle. Handle to queue instance
154 * @param[in] size Number of free elements in the queue 154 * @param[in] size Number of free elements in the queue
@@ -160,7 +160,7 @@ extern int ia_css_queue_get_free_space(
160 ia_css_queue_t *qhandle, 160 ia_css_queue_t *qhandle,
161 uint32_t *size); 161 uint32_t *size);
162 162
163/** @brief Peek at an element in the queue 163/* @brief Peek at an element in the queue
164 * 164 *
165 * @param[in] qhandle. Handle to queue instance 165 * @param[in] qhandle. Handle to queue instance
166 * @param[in] offset Offset of element to peek, 166 * @param[in] offset Offset of element to peek,
@@ -175,7 +175,7 @@ extern int ia_css_queue_peek(
175 uint32_t offset, 175 uint32_t offset,
176 uint32_t *element); 176 uint32_t *element);
177 177
178/** @brief Get the usable size for the queue 178/* @brief Get the usable size for the queue
179 * 179 *
180 * @param[in] qhandle. Handle to queue instance 180 * @param[in] qhandle. Handle to queue instance
181 * @param[out] size Size value to be returned here. 181 * @param[out] size Size value to be returned here.
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
index 946d4f2d2108..7bb2b494836e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
index efa9c140484f..370ff3816dbe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
@@ -44,7 +44,7 @@ enum ia_css_err ia_css_rmgr_init(void)
44 return err; 44 return err;
45} 45}
46 46
47/** 47/*
48 * @brief Uninitialize resource pool (host) 48 * @brief Uninitialize resource pool (host)
49 */ 49 */
50void ia_css_rmgr_uninit(void) 50void ia_css_rmgr_uninit(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
index e56006c07ee8..54239ac9d7c9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
@@ -20,13 +20,13 @@
20#include <memory_access.h> /* mmmgr_malloc, mhmm_free */ 20#include <memory_access.h> /* mmmgr_malloc, mhmm_free */
21#include <ia_css_debug.h> 21#include <ia_css_debug.h>
22 22
23/** 23/*
24 * @brief VBUF resource handles 24 * @brief VBUF resource handles
25 */ 25 */
26#define NUM_HANDLES 1000 26#define NUM_HANDLES 1000
27struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES]; 27struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
28 28
29/** 29/*
30 * @brief VBUF resource pool - refpool 30 * @brief VBUF resource pool - refpool
31 */ 31 */
32struct ia_css_rmgr_vbuf_pool refpool = { 32struct ia_css_rmgr_vbuf_pool refpool = {
@@ -37,7 +37,7 @@ struct ia_css_rmgr_vbuf_pool refpool = {
37 NULL, /* handles */ 37 NULL, /* handles */
38}; 38};
39 39
40/** 40/*
41 * @brief VBUF resource pool - writepool 41 * @brief VBUF resource pool - writepool
42 */ 42 */
43struct ia_css_rmgr_vbuf_pool writepool = { 43struct ia_css_rmgr_vbuf_pool writepool = {
@@ -48,7 +48,7 @@ struct ia_css_rmgr_vbuf_pool writepool = {
48 NULL, /* handles */ 48 NULL, /* handles */
49}; 49};
50 50
51/** 51/*
52 * @brief VBUF resource pool - hmmbufferpool 52 * @brief VBUF resource pool - hmmbufferpool
53 */ 53 */
54struct ia_css_rmgr_vbuf_pool hmmbufferpool = { 54struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
@@ -63,7 +63,7 @@ struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
63struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool; 63struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
64struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool; 64struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
65 65
66/** 66/*
67 * @brief Initialize the reference count (host, vbuf) 67 * @brief Initialize the reference count (host, vbuf)
68 */ 68 */
69static void rmgr_refcount_init_vbuf(void) 69static void rmgr_refcount_init_vbuf(void)
@@ -72,7 +72,7 @@ static void rmgr_refcount_init_vbuf(void)
72 memset(&handle_table, 0, sizeof(handle_table)); 72 memset(&handle_table, 0, sizeof(handle_table));
73} 73}
74 74
75/** 75/*
76 * @brief Retain the reference count for a handle (host, vbuf) 76 * @brief Retain the reference count for a handle (host, vbuf)
77 * 77 *
78 * @param handle The pointer to the handle 78 * @param handle The pointer to the handle
@@ -109,7 +109,7 @@ void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
109 (*handle)->count++; 109 (*handle)->count++;
110} 110}
111 111
112/** 112/*
113 * @brief Release the reference count for a handle (host, vbuf) 113 * @brief Release the reference count for a handle (host, vbuf)
114 * 114 *
115 * @param handle The pointer to the handle 115 * @param handle The pointer to the handle
@@ -131,7 +131,7 @@ void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
131 } 131 }
132} 132}
133 133
134/** 134/*
135 * @brief Initialize the resource pool (host, vbuf) 135 * @brief Initialize the resource pool (host, vbuf)
136 * 136 *
137 * @param pool The pointer to the pool 137 * @param pool The pointer to the pool
@@ -163,7 +163,7 @@ enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
163 return err; 163 return err;
164} 164}
165 165
166/** 166/*
167 * @brief Uninitialize the resource pool (host, vbuf) 167 * @brief Uninitialize the resource pool (host, vbuf)
168 * 168 *
169 * @param pool The pointer to the pool 169 * @param pool The pointer to the pool
@@ -197,7 +197,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
197 } 197 }
198} 198}
199 199
200/** 200/*
201 * @brief Push a handle to the pool 201 * @brief Push a handle to the pool
202 * 202 *
203 * @param pool The pointer to the pool 203 * @param pool The pointer to the pool
@@ -224,7 +224,7 @@ void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
224 assert(succes); 224 assert(succes);
225} 225}
226 226
227/** 227/*
228 * @brief Pop a handle from the pool 228 * @brief Pop a handle from the pool
229 * 229 *
230 * @param pool The pointer to the pool 230 * @param pool The pointer to the pool
@@ -254,7 +254,7 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
254 } 254 }
255} 255}
256 256
257/** 257/*
258 * @brief Acquire a handle from the pool (host, vbuf) 258 * @brief Acquire a handle from the pool (host, vbuf)
259 * 259 *
260 * @param pool The pointer to the pool 260 * @param pool The pointer to the pool
@@ -302,7 +302,7 @@ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
302 ia_css_rmgr_refcount_retain_vbuf(handle); 302 ia_css_rmgr_refcount_retain_vbuf(handle);
303} 303}
304 304
305/** 305/*
306 * @brief Release a handle to the pool (host, vbuf) 306 * @brief Release a handle to the pool (host, vbuf)
307 * 307 *
308 * @param pool The pointer to the pool 308 * @param pool The pointer to the pool
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
index 27e9eb1e2102..bc4b1723369e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
@@ -37,17 +37,17 @@ more details.
37 37
38 38
39typedef struct { 39typedef struct {
40 uint32_t ddr_data_offset; /**< posistion of data in DDR */ 40 uint32_t ddr_data_offset; /** posistion of data in DDR */
41 uint32_t dmem_data_addr; /**< data segment address in dmem */ 41 uint32_t dmem_data_addr; /** data segment address in dmem */
42 uint32_t dmem_bss_addr; /**< bss segment address in dmem */ 42 uint32_t dmem_bss_addr; /** bss segment address in dmem */
43 uint32_t data_size; /**< data segment size */ 43 uint32_t data_size; /** data segment size */
44 uint32_t bss_size; /**< bss segment size */ 44 uint32_t bss_size; /** bss segment size */
45 uint32_t spctrl_config_dmem_addr; /** <location of dmem_cfg in SP dmem */ 45 uint32_t spctrl_config_dmem_addr; /* <location of dmem_cfg in SP dmem */
46 uint32_t spctrl_state_dmem_addr; /** < location of state in SP dmem */ 46 uint32_t spctrl_state_dmem_addr; /* < location of state in SP dmem */
47 unsigned int sp_entry; /** < entry function ptr on SP */ 47 unsigned int sp_entry; /* < entry function ptr on SP */
48 const void *code; /**< location of firmware */ 48 const void *code; /** location of firmware */
49 uint32_t code_size; 49 uint32_t code_size;
50 char *program_name; /**< not used on hardware, only for simulation */ 50 char *program_name; /** not used on hardware, only for simulation */
51} ia_css_spctrl_cfg; 51} ia_css_spctrl_cfg;
52 52
53/* Get the code addr in DDR of SP */ 53/* Get the code addr in DDR of SP */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
index 3af2891efca7..2620d7514f79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
@@ -41,16 +41,16 @@ typedef enum {
41 IA_CSS_SP_SW_RUNNING 41 IA_CSS_SP_SW_RUNNING
42} ia_css_spctrl_sp_sw_state; 42} ia_css_spctrl_sp_sw_state;
43 43
44/** Structure to encapsulate required arguments for 44/* Structure to encapsulate required arguments for
45 * initialization of SP DMEM using the SP itself 45 * initialization of SP DMEM using the SP itself
46 */ 46 */
47struct ia_css_sp_init_dmem_cfg { 47struct ia_css_sp_init_dmem_cfg {
48 ia_css_ptr ddr_data_addr; /**< data segment address in ddr */ 48 ia_css_ptr ddr_data_addr; /** data segment address in ddr */
49 uint32_t dmem_data_addr; /**< data segment address in dmem */ 49 uint32_t dmem_data_addr; /** data segment address in dmem */
50 uint32_t dmem_bss_addr; /**< bss segment address in dmem */ 50 uint32_t dmem_bss_addr; /** bss segment address in dmem */
51 uint32_t data_size; /**< data segment size */ 51 uint32_t data_size; /** data segment size */
52 uint32_t bss_size; /**< bss segment size */ 52 uint32_t bss_size; /** bss segment size */
53 sp_ID_t sp_id; /** <sp Id */ 53 sp_ID_t sp_id; /* <sp Id */
54}; 54};
55 55
56#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \ 56#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
index 6d9bceb60196..844e4d536cec 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
@@ -39,7 +39,7 @@ more details.
39 39
40struct spctrl_context_info { 40struct spctrl_context_info {
41 struct ia_css_sp_init_dmem_cfg dmem_config; 41 struct ia_css_sp_init_dmem_cfg dmem_config;
42 uint32_t spctrl_config_dmem_addr; /** location of dmem_cfg in SP dmem */ 42 uint32_t spctrl_config_dmem_addr; /* location of dmem_cfg in SP dmem */
43 uint32_t spctrl_state_dmem_addr; 43 uint32_t spctrl_state_dmem_addr;
44 unsigned int sp_entry; /* entry function ptr on SP */ 44 unsigned int sp_entry; /* entry function ptr on SP */
45 hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/ 45 hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
index 49c69e60ca5c..b7dd18492a91 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
@@ -13,7 +13,7 @@
13 * more details. 13 * more details.
14 */ 14 */
15#else 15#else
16/** 16/*
17Support for Intel Camera Imaging ISP subsystem. 17Support for Intel Camera Imaging ISP subsystem.
18Copyright (c) 2010 - 2015, Intel Corporation. 18Copyright (c) 2010 - 2015, Intel Corporation.
19 19
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
index f92b6a9f77eb..322bb3de6098 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -176,7 +176,7 @@ static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
176 176
177static bool fw_explicitly_loaded = false; 177static bool fw_explicitly_loaded = false;
178 178
179/** 179/*
180 * Local prototypes 180 * Local prototypes
181 */ 181 */
182 182
@@ -187,7 +187,7 @@ static enum ia_css_err
187sh_css_pipe_start(struct ia_css_stream *stream); 187sh_css_pipe_start(struct ia_css_stream *stream);
188 188
189#ifdef ISP2401 189#ifdef ISP2401
190/** 190/*
191 * @brief Stop all "ia_css_pipe" instances in the target 191 * @brief Stop all "ia_css_pipe" instances in the target
192 * "ia_css_stream" instance. 192 * "ia_css_stream" instance.
193 * 193 *
@@ -207,7 +207,7 @@ sh_css_pipe_start(struct ia_css_stream *stream);
207static enum ia_css_err 207static enum ia_css_err
208sh_css_pipes_stop(struct ia_css_stream *stream); 208sh_css_pipes_stop(struct ia_css_stream *stream);
209 209
210/** 210/*
211 * @brief Check if all "ia_css_pipe" instances in the target 211 * @brief Check if all "ia_css_pipe" instances in the target
212 * "ia_css_stream" instance have stopped. 212 * "ia_css_stream" instance have stopped.
213 * 213 *
@@ -1649,7 +1649,7 @@ ia_css_init(const struct ia_css_env *env,
1649 void (*flush_func)(struct ia_css_acc_fw *fw); 1649 void (*flush_func)(struct ia_css_acc_fw *fw);
1650 hrt_data select, enable; 1650 hrt_data select, enable;
1651 1651
1652 /** 1652 /*
1653 * The C99 standard does not specify the exact object representation of structs; 1653 * The C99 standard does not specify the exact object representation of structs;
1654 * the representation is compiler dependent. 1654 * the representation is compiler dependent.
1655 * 1655 *
@@ -4617,23 +4617,23 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
4617 * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c) 4617 * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
4618 */ 4618 */
4619static enum ia_css_event_type convert_event_sp_to_host_domain[] = { 4619static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
4620 IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /**< Output frame ready. */ 4620 IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /** Output frame ready. */
4621 IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /**< Second output frame ready. */ 4621 IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /** Second output frame ready. */
4622 IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /**< Viewfinder Output frame ready. */ 4622 IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
4623 IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /**< Second viewfinder Output frame ready. */ 4623 IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /** Second viewfinder Output frame ready. */
4624 IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /**< Indication that 3A statistics are available. */ 4624 IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /** Indication that 3A statistics are available. */
4625 IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /**< Indication that DIS statistics are available. */ 4625 IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /** Indication that DIS statistics are available. */
4626 IA_CSS_EVENT_TYPE_PIPELINE_DONE, /**< Pipeline Done event, sent after last pipeline stage. */ 4626 IA_CSS_EVENT_TYPE_PIPELINE_DONE, /** Pipeline Done event, sent after last pipeline stage. */
4627 IA_CSS_EVENT_TYPE_FRAME_TAGGED, /**< Frame tagged. */ 4627 IA_CSS_EVENT_TYPE_FRAME_TAGGED, /** Frame tagged. */
4628 IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /**< Input frame ready. */ 4628 IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /** Input frame ready. */
4629 IA_CSS_EVENT_TYPE_METADATA_DONE, /**< Metadata ready. */ 4629 IA_CSS_EVENT_TYPE_METADATA_DONE, /** Metadata ready. */
4630 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /**< Indication that LACE statistics are available. */ 4630 IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
4631 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /**< Extension stage executed. */ 4631 IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /** Extension stage executed. */
4632 IA_CSS_EVENT_TYPE_TIMER, /**< Timing measurement data. */ 4632 IA_CSS_EVENT_TYPE_TIMER, /** Timing measurement data. */
4633 IA_CSS_EVENT_TYPE_PORT_EOF, /**< End Of Frame event, sent when in buffered sensor mode. */ 4633 IA_CSS_EVENT_TYPE_PORT_EOF, /** End Of Frame event, sent when in buffered sensor mode. */
4634 IA_CSS_EVENT_TYPE_FW_WARNING, /**< Performance warning encountered by FW */ 4634 IA_CSS_EVENT_TYPE_FW_WARNING, /** Performance warning encountered by FW */
4635 IA_CSS_EVENT_TYPE_FW_ASSERT, /**< Assertion hit by FW */ 4635 IA_CSS_EVENT_TYPE_FW_ASSERT, /** Assertion hit by FW */
4636 0, /** error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */ 4636 0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
4637}; 4637};
4638 4638
4639enum ia_css_err 4639enum ia_css_err
@@ -5028,7 +5028,7 @@ sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
5028bool 5028bool
5029sh_css_continuous_is_enabled(uint8_t pipe_num) 5029sh_css_continuous_is_enabled(uint8_t pipe_num)
5030#else 5030#else
5031/** 5031/*
5032 * @brief Stop all "ia_css_pipe" instances in the target 5032 * @brief Stop all "ia_css_pipe" instances in the target
5033 * "ia_css_stream" instance. 5033 * "ia_css_stream" instance.
5034 * 5034 *
@@ -5107,7 +5107,7 @@ ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
5107 return IA_CSS_SUCCESS; 5107 return IA_CSS_SUCCESS;
5108} 5108}
5109#else 5109#else
5110 /** 5110 /*
5111 * Stop all "ia_css_pipe" instances in this target 5111 * Stop all "ia_css_pipe" instances in this target
5112 * "ia_css_stream" instance. 5112 * "ia_css_stream" instance.
5113 */ 5113 */
@@ -5146,7 +5146,7 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
5146 } 5146 }
5147 } 5147 }
5148 5148
5149 /** 5149 /*
5150 * In the CSS firmware use scenario "Continuous Preview" 5150 * In the CSS firmware use scenario "Continuous Preview"
5151 * as well as "Continuous Video", the "ia_css_pipe" instance 5151 * as well as "Continuous Video", the "ia_css_pipe" instance
5152 * "Copy Pipe" is activated. This "Copy Pipe" is private to 5152 * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5183,7 +5183,7 @@ ERR:
5183 return err; 5183 return err;
5184} 5184}
5185 5185
5186/** 5186/*
5187 * @brief Check if all "ia_css_pipe" instances in the target 5187 * @brief Check if all "ia_css_pipe" instances in the target
5188 * "ia_css_stream" instance have stopped. 5188 * "ia_css_stream" instance have stopped.
5189 * 5189 *
@@ -5218,7 +5218,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
5218 main_pipe_id = main_pipe->mode; 5218 main_pipe_id = main_pipe->mode;
5219 IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id); 5219 IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
5220 5220
5221 /** 5221 /*
5222 * Check if every "ia_css_pipe" instance in this target 5222 * Check if every "ia_css_pipe" instance in this target
5223 * "ia_css_stream" instance has stopped. 5223 * "ia_css_stream" instance has stopped.
5224 */ 5224 */
@@ -5229,7 +5229,7 @@ sh_css_pipes_have_stopped(struct ia_css_stream *stream)
5229 rval); 5229 rval);
5230 } 5230 }
5231 5231
5232 /** 5232 /*
5233 * In the CSS firmware use scenario "Continuous Preview" 5233 * In the CSS firmware use scenario "Continuous Preview"
5234 * as well as "Continuous Video", the "ia_css_pipe" instance 5234 * as well as "Continuous Video", the "ia_css_pipe" instance
5235 * "Copy Pipe" is activated. This "Copy Pipe" is private to 5235 * "Copy Pipe" is activated. This "Copy Pipe" is private to
@@ -5474,7 +5474,7 @@ ERR:
5474} 5474}
5475 5475
5476#ifdef ISP2401 5476#ifdef ISP2401
5477/** 5477/*
5478 * @brief Check if a format is supported by the pipe. 5478 * @brief Check if a format is supported by the pipe.
5479 * 5479 *
5480 */ 5480 */
@@ -8626,7 +8626,7 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
8626 return err; 8626 return err;
8627} 8627}
8628 8628
8629/** 8629/*
8630 * @brief Tag a specific frame in continuous capture. 8630 * @brief Tag a specific frame in continuous capture.
8631 * Refer to "sh_css_internal.h" for details. 8631 * Refer to "sh_css_internal.h" for details.
8632 */ 8632 */
@@ -8666,7 +8666,7 @@ enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
8666 return err; 8666 return err;
8667} 8667}
8668 8668
8669/** 8669/*
8670 * @brief Configure the continuous capture. 8670 * @brief Configure the continuous capture.
8671 * Refer to "sh_css_internal.h" for details. 8671 * Refer to "sh_css_internal.h" for details.
8672 */ 8672 */
@@ -8822,7 +8822,7 @@ sh_css_init_host_sp_control_vars(void)
8822 "sh_css_init_host_sp_control_vars() leave: return_void\n"); 8822 "sh_css_init_host_sp_control_vars() leave: return_void\n");
8823} 8823}
8824 8824
8825/** 8825/*
8826 * create the internal structures and fill in the configuration data 8826 * create the internal structures and fill in the configuration data
8827 */ 8827 */
8828void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config) 8828void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
@@ -10435,7 +10435,7 @@ ia_css_start_sp(void)
10435 return err; 10435 return err;
10436} 10436}
10437 10437
10438/** 10438/*
10439 * Time to wait SP for termincate. Only condition when this can happen 10439 * Time to wait SP for termincate. Only condition when this can happen
10440 * is a fatal hw failure, but we must be able to detect this and emit 10440 * is a fatal hw failure, but we must be able to detect this and emit
10441 * a proper error trace. 10441 * a proper error trace.
@@ -10713,7 +10713,7 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
10713 return ret; 10713 return ret;
10714} 10714}
10715 10715
10716/** @brief Set the state (Enable or Disable) of the Extension stage in the 10716/* @brief Set the state (Enable or Disable) of the Extension stage in the
10717 * given pipe. 10717 * given pipe.
10718 */ 10718 */
10719enum ia_css_err 10719enum ia_css_err
@@ -10758,7 +10758,7 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool
10758 return err; 10758 return err;
10759} 10759}
10760 10760
10761/** @brief Get the state (Enable or Disable) of the Extension stage in the 10761/* @brief Get the state (Enable or Disable) of the Extension stage in the
10762 * given pipe. 10762 * given pipe.
10763 */ 10763 */
10764enum ia_css_err 10764enum ia_css_err
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
index 0910021286a4..161122e1bcbc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -188,7 +188,7 @@ enum host2sp_commands {
188 N_host2sp_cmd 188 N_host2sp_cmd
189}; 189};
190 190
191/** Enumeration used to indicate the events that are produced by 191/* Enumeration used to indicate the events that are produced by
192 * the SP and consumed by the Host. 192 * the SP and consumed by the Host.
193 * 193 *
194 * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC: 194 * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
@@ -274,10 +274,10 @@ struct sh_css_ddr_address_map_compound {
274}; 274};
275 275
276struct ia_css_isp_parameter_set_info { 276struct ia_css_isp_parameter_set_info {
277 struct sh_css_ddr_address_map mem_map;/**< pointers to Parameters in ISP format IMPT: 277 struct sh_css_ddr_address_map mem_map;/** pointers to Parameters in ISP format IMPT:
278 This should be first member of this struct */ 278 This should be first member of this struct */
279 uint32_t isp_parameters_id;/**< Unique ID to track which config was actually applied to a particular frame */ 279 uint32_t isp_parameters_id;/** Unique ID to track which config was actually applied to a particular frame */
280 ia_css_ptr output_frame_ptr;/**< Output frame to which this config has to be applied (optional) */ 280 ia_css_ptr output_frame_ptr;/** Output frame to which this config has to be applied (optional) */
281}; 281};
282 282
283/* this struct contains all arguments that can be passed to 283/* this struct contains all arguments that can be passed to
@@ -398,9 +398,9 @@ struct sh_css_sp_input_formatter_set {
398/* SP configuration information */ 398/* SP configuration information */
399struct sh_css_sp_config { 399struct sh_css_sp_config {
400 uint8_t no_isp_sync; /* Signal host immediately after start */ 400 uint8_t no_isp_sync; /* Signal host immediately after start */
401 uint8_t enable_raw_pool_locking; /**< Enable Raw Buffer Locking for HALv3 Support */ 401 uint8_t enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
402 uint8_t lock_all; 402 uint8_t lock_all;
403 /**< If raw buffer locking is enabled, this flag indicates whether raw 403 /** If raw buffer locking is enabled, this flag indicates whether raw
404 frames are locked when their EOF event is successfully sent to the 404 frames are locked when their EOF event is successfully sent to the
405 host (true) or when they are passed to the preview/video pipe 405 host (true) or when they are passed to the preview/video pipe
406 (false). */ 406 (false). */
@@ -458,13 +458,13 @@ struct sh_css_sp_pipeline_io {
458 /*struct sh_css_sp_pipeline_terminal output;*/ 458 /*struct sh_css_sp_pipeline_terminal output;*/
459}; 459};
460 460
461/** This struct tracks how many streams are registered per CSI port. 461/* This struct tracks how many streams are registered per CSI port.
462 * This is used to track which streams have already been configured. 462 * This is used to track which streams have already been configured.
463 * Only when all streams are configured, the CSI RX is started for that port. 463 * Only when all streams are configured, the CSI RX is started for that port.
464 */ 464 */
465struct sh_css_sp_pipeline_io_status { 465struct sh_css_sp_pipeline_io_status {
466 uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /**< registered streams */ 466 uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /** registered streams */
467 uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /**< configured streams */ 467 uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /** configured streams */
468}; 468};
469 469
470#endif 470#endif
@@ -500,7 +500,7 @@ enum sh_css_port_type {
500#define SH_CSS_METADATA_OFFLINE_MODE 0x04 500#define SH_CSS_METADATA_OFFLINE_MODE 0x04
501#define SH_CSS_METADATA_WAIT_INPUT 0x08 501#define SH_CSS_METADATA_WAIT_INPUT 0x08
502 502
503/** @brief Free an array of metadata buffers. 503/* @brief Free an array of metadata buffers.
504 * 504 *
505 * @param[in] num_bufs Number of metadata buffers to be freed. 505 * @param[in] num_bufs Number of metadata buffers to be freed.
506 * @param[in] bufs Pointer of array of metadata buffers. 506 * @param[in] bufs Pointer of array of metadata buffers.
@@ -764,7 +764,7 @@ struct sh_css_hmm_buffer {
764 hrt_vaddress frame_data; 764 hrt_vaddress frame_data;
765 uint32_t flashed; 765 uint32_t flashed;
766 uint32_t exp_id; 766 uint32_t exp_id;
767 uint32_t isp_parameters_id; /**< Unique ID to track which config was 767 uint32_t isp_parameters_id; /** Unique ID to track which config was
768 actually applied to a particular frame */ 768 actually applied to a particular frame */
769#if CONFIG_ON_FRAME_ENQUEUE() 769#if CONFIG_ON_FRAME_ENQUEUE()
770 struct sh_css_config_on_frame_enqueue config_on_frame_enqueue; 770 struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
index e12789236bb9..4bcc35d219f8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
@@ -22,7 +22,7 @@
22#include <ia_css_pipe_public.h> 22#include <ia_css_pipe_public.h>
23#include <ia_css_stream_public.h> 23#include <ia_css_stream_public.h>
24 24
25/** The pipe id type, distinguishes the kind of pipes that 25/* The pipe id type, distinguishes the kind of pipes that
26 * can be run in parallel. 26 * can be run in parallel.
27 */ 27 */
28enum ia_css_pipe_id { 28enum ia_css_pipe_id {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
index 36aaa3019a15..883474e90c81 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
@@ -321,7 +321,7 @@ calculate_mipi_buff_size(
321 height = stream_cfg->input_config.input_res.height; 321 height = stream_cfg->input_config.input_res.height;
322 format = stream_cfg->input_config.format; 322 format = stream_cfg->input_config.format;
323 pack_raw_pixels = stream_cfg->pack_raw_pixels; 323 pack_raw_pixels = stream_cfg->pack_raw_pixels;
324 /** end of NOTE */ 324 /* end of NOTE */
325 325
326 /** 326 /**
327#ifndef ISP2401 327#ifndef ISP2401
@@ -341,7 +341,7 @@ calculate_mipi_buff_size(
341 * in the non-continuous use scenario. 341 * in the non-continuous use scenario.
342 */ 342 */
343 width_padded = width + (2 * ISP_VEC_NELEMS); 343 width_padded = width + (2 * ISP_VEC_NELEMS);
344 /** end of NOTE */ 344 /* end of NOTE */
345 345
346 IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n", 346 IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
347 width_padded, height, format); 347 width_padded, height, format);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
index a7ffe6d8331b..270ec2b60a3e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
@@ -144,8 +144,8 @@ struct ia_css_isp_parameters {
144 struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM]; 144 struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
145 struct sh_css_ddr_address_map ddr_ptrs; 145 struct sh_css_ddr_address_map ddr_ptrs;
146 struct sh_css_ddr_address_map_size ddr_ptrs_size; 146 struct sh_css_ddr_address_map_size ddr_ptrs_size;
147 struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */ 147 struct ia_css_frame *output_frame; /** Output frame the config is to be applied to (optional) */
148 uint32_t isp_parameters_id; /**< Unique ID to track which config was actually applied to a particular frame */ 148 uint32_t isp_parameters_id; /** Unique ID to track which config was actually applied to a particular frame */
149}; 149};
150 150
151void 151void
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
index e6a345979ff1..6fc00fc402b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
@@ -261,7 +261,7 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
261 assert(out_frame != NULL); 261 assert(out_frame != NULL);
262 262
263 { 263 {
264 /** 264 /*
265 * Clear sh_css_sp_stage for easy debugging. 265 * Clear sh_css_sp_stage for easy debugging.
266 * program_input_circuit must be saved as it is set outside 266 * program_input_circuit must be saved as it is set outside
267 * this function. 267 * this function.
@@ -335,7 +335,7 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
335 assert(out_frame != NULL); 335 assert(out_frame != NULL);
336 336
337 { 337 {
338 /** 338 /*
339 * Clear sh_css_sp_stage for easy debugging. 339 * Clear sh_css_sp_stage for easy debugging.
340 * program_input_circuit must be saved as it is set outside 340 * program_input_circuit must be saved as it is set outside
341 * this function. 341 * this function.
@@ -909,7 +909,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
909 xinfo = binary->info; 909 xinfo = binary->info;
910 info = &xinfo->sp; 910 info = &xinfo->sp;
911 { 911 {
912 /** 912 /*
913 * Clear sh_css_sp_stage for easy debugging. 913 * Clear sh_css_sp_stage for easy debugging.
914 * program_input_circuit must be saved as it is set outside 914 * program_input_circuit must be saved as it is set outside
915 * this function. 915 * this function.
@@ -980,7 +980,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
980 sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0; 980 sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
981 sh_css_isp_stage.mem_initializers = *isp_mem_if; 981 sh_css_isp_stage.mem_initializers = *isp_mem_if;
982 982
983 /** 983 /*
984 * Even when a stage does not need uds and does not params, 984 * Even when a stage does not need uds and does not params,
985 * ia_css_uds_sp_scale_params() seems to be called (needs 985 * ia_css_uds_sp_scale_params() seems to be called (needs
986 * further investigation). This function can not deal with 986 * further investigation). This function can not deal with
@@ -1429,7 +1429,7 @@ sh_css_init_host2sp_frame_data(void)
1429} 1429}
1430 1430
1431 1431
1432/** 1432/*
1433 * @brief Update the offline frame information in host_sp_communication. 1433 * @brief Update the offline frame information in host_sp_communication.
1434 * Refer to "sh_css_sp.h" for more details. 1434 * Refer to "sh_css_sp.h" for more details.
1435 */ 1435 */
@@ -1461,7 +1461,7 @@ sh_css_update_host2sp_offline_frame(
1461} 1461}
1462 1462
1463#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) 1463#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
1464/** 1464/*
1465 * @brief Update the mipi frame information in host_sp_communication. 1465 * @brief Update the mipi frame information in host_sp_communication.
1466 * Refer to "sh_css_sp.h" for more details. 1466 * Refer to "sh_css_sp.h" for more details.
1467 */ 1467 */
@@ -1488,7 +1488,7 @@ sh_css_update_host2sp_mipi_frame(
1488 frame ? frame->data : 0); 1488 frame ? frame->data : 0);
1489} 1489}
1490 1490
1491/** 1491/*
1492 * @brief Update the mipi metadata information in host_sp_communication. 1492 * @brief Update the mipi metadata information in host_sp_communication.
1493 * Refer to "sh_css_sp.h" for more details. 1493 * Refer to "sh_css_sp.h" for more details.
1494 */ 1494 */
@@ -1735,7 +1735,7 @@ ia_css_isp_has_started(void)
1735} 1735}
1736 1736
1737 1737
1738/** 1738/*
1739 * @brief Initialize the DMA software-mask in the debug mode. 1739 * @brief Initialize the DMA software-mask in the debug mode.
1740 * Refer to "sh_css_sp.h" for more details. 1740 * Refer to "sh_css_sp.h" for more details.
1741 */ 1741 */
@@ -1761,7 +1761,7 @@ sh_css_sp_init_dma_sw_reg(int dma_id)
1761 return true; 1761 return true;
1762} 1762}
1763 1763
1764/** 1764/*
1765 * @brief Set the DMA software-mask in the debug mode. 1765 * @brief Set the DMA software-mask in the debug mode.
1766 * Refer to "sh_css_sp.h" for more details. 1766 * Refer to "sh_css_sp.h" for more details.
1767 */ 1767 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
index e49e478ab354..0b8e3d872069 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
@@ -61,7 +61,7 @@ struct sh_css {
61#endif 61#endif
62 hrt_vaddress sp_bin_addr; 62 hrt_vaddress sp_bin_addr;
63 hrt_data page_table_base_index; 63 hrt_data page_table_base_index;
64 unsigned int size_mem_words; /** \deprecated{Use ia_css_mipi_buffer_config instead.}*/ 64 unsigned int size_mem_words; /* \deprecated{Use ia_css_mipi_buffer_config instead.}*/
65 enum ia_css_irq_type irq_type; 65 enum ia_css_irq_type irq_type;
66 unsigned int pipe_counter; 66 unsigned int pipe_counter;
67 67
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 068aece25d37..cded30f145aa 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -394,7 +394,7 @@ struct octeon_hcd {
394 result = -1; \ 394 result = -1; \
395 break; \ 395 break; \
396 } else \ 396 } else \
397 cvmx_wait(100); \ 397 __delay(100); \
398 } \ 398 } \
399 } while (0); \ 399 } while (0); \
400 result; }) 400 result; })
@@ -774,7 +774,7 @@ retry:
774 usbn_clk_ctl.s.hclk_rst = 1; 774 usbn_clk_ctl.s.hclk_rst = 1;
775 cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64); 775 cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
776 /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */ 776 /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
777 cvmx_wait(64); 777 __delay(64);
778 /* 778 /*
779 * 3. Program the power-on reset field in the USBN clock-control 779 * 3. Program the power-on reset field in the USBN clock-control
780 * register: 780 * register:
@@ -795,7 +795,7 @@ retry:
795 cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index), 795 cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
796 usbn_usbp_ctl_status.u64); 796 usbn_usbp_ctl_status.u64);
797 /* 6. Wait 10 cycles */ 797 /* 6. Wait 10 cycles */
798 cvmx_wait(10); 798 __delay(10);
799 /* 799 /*
800 * 7. Clear ATE_RESET field in the USBN clock-control register: 800 * 7. Clear ATE_RESET field in the USBN clock-control register:
801 * USBN_USBP_CTL_STATUS[ATE_RESET] = 0 801 * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a2153c999..12c9df9cddde 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
102 102
103 currentValue = READ_REG(REG_DATAMODUL); 103 currentValue = READ_REG(REG_DATAMODUL);
104 104
105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define 105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK; 106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK; 107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
108 default: return undefined; 108 default: return undefined;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index c0664dc80bf2..446310775e90 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
1395 if ((check_fwstate(pmlmepriv, _FW_LINKED)) || 1395 if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
1396 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { 1396 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
1397 len = pcur_bss->Ssid.SsidLength; 1397 len = pcur_bss->Ssid.SsidLength;
1398
1399 wrqu->essid.length = len;
1400
1401 memcpy(extra, pcur_bss->Ssid.Ssid, len); 1398 memcpy(extra, pcur_bss->Ssid.Ssid, len);
1402
1403 wrqu->essid.flags = 1;
1404 } else { 1399 } else {
1405 ret = -1; 1400 len = 0;
1406 goto exit; 1401 *extra = 0;
1407 } 1402 }
1408 1403 wrqu->essid.length = len;
1409exit: 1404 wrqu->essid.flags = 1;
1410
1411 1405
1412 return ret; 1406 return ret;
1413} 1407}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c69b4a9694d..0d99b242e82e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
920 " %d i: %d bio: %p, allocating another" 920 " %d i: %d bio: %p, allocating another"
921 " bio\n", bio->bi_vcnt, i, bio); 921 " bio\n", bio->bi_vcnt, i, bio);
922 922
923 rc = blk_rq_append_bio(req, bio); 923 rc = blk_rq_append_bio(req, &bio);
924 if (rc) { 924 if (rc) {
925 pr_err("pSCSI: failed to append bio\n"); 925 pr_err("pSCSI: failed to append bio\n");
926 goto fail; 926 goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
938 } 938 }
939 939
940 if (bio) { 940 if (bio) {
941 rc = blk_rq_append_bio(req, bio); 941 rc = blk_rq_append_bio(req, &bio);
942 if (rc) { 942 if (rc) {
943 pr_err("pSCSI: failed to append bio\n"); 943 pr_err("pSCSI: failed to append bio\n");
944 goto fail; 944 goto fail;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 7952357df9c8..edb6e4e9ef3a 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -590,7 +590,6 @@ static int __init optee_driver_init(void)
590 return -ENODEV; 590 return -ENODEV;
591 591
592 np = of_find_matching_node(fw_np, optee_match); 592 np = of_find_matching_node(fw_np, optee_match);
593 of_node_put(fw_np);
594 if (!np) 593 if (!np)
595 return -ENODEV; 594 return -ENODEV;
596 595
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 419a7a90bce0..f45bcbc63738 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
339 return; 339 return;
340 340
341 if (ring->start_poll) { 341 if (ring->start_poll) {
342 __ring_interrupt_mask(ring, false); 342 __ring_interrupt_mask(ring, true);
343 ring->start_poll(ring->poll_data); 343 ring->start_poll(ring->poll_data);
344 } else { 344 } else {
345 schedule_work(&ring->work); 345 schedule_work(&ring->work);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 427e0d5d8f13..539b49adb6af 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1762{ 1762{
1763 struct n_tty_data *ldata = tty->disc_data; 1763 struct n_tty_data *ldata = tty->disc_data;
1764 1764
1765 if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { 1765 if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
1766 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1766 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1767 ldata->line_start = ldata->read_tail; 1767 ldata->line_start = ldata->read_tail;
1768 if (!L_ICANON(tty) || !read_cnt(ldata)) { 1768 if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
2425 return put_user(tty_chars_in_buffer(tty), (int __user *) arg); 2425 return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
2426 case TIOCINQ: 2426 case TIOCINQ:
2427 down_write(&tty->termios_rwsem); 2427 down_write(&tty->termios_rwsem);
2428 if (L_ICANON(tty)) 2428 if (L_ICANON(tty) && !L_EXTPROC(tty))
2429 retval = inq_canon(ldata); 2429 retval = inq_canon(ldata);
2430 else 2430 else
2431 retval = read_cnt(ldata); 2431 retval = read_cnt(ldata);
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index ce7ad0acee7a..247788a16f0b 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -27,23 +27,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
27{ 27{
28 struct serdev_controller *ctrl = port->client_data; 28 struct serdev_controller *ctrl = port->client_data;
29 struct serport *serport = serdev_controller_get_drvdata(ctrl); 29 struct serport *serport = serdev_controller_get_drvdata(ctrl);
30 int ret;
30 31
31 if (!test_bit(SERPORT_ACTIVE, &serport->flags)) 32 if (!test_bit(SERPORT_ACTIVE, &serport->flags))
32 return 0; 33 return 0;
33 34
34 return serdev_controller_receive_buf(ctrl, cp, count); 35 ret = serdev_controller_receive_buf(ctrl, cp, count);
36
37 dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
38 "receive_buf returns %d (count = %zu)\n",
39 ret, count);
40 if (ret < 0)
41 return 0;
42 else if (ret > count)
43 return count;
44
45 return ret;
35} 46}
36 47
37static void ttyport_write_wakeup(struct tty_port *port) 48static void ttyport_write_wakeup(struct tty_port *port)
38{ 49{
39 struct serdev_controller *ctrl = port->client_data; 50 struct serdev_controller *ctrl = port->client_data;
40 struct serport *serport = serdev_controller_get_drvdata(ctrl); 51 struct serport *serport = serdev_controller_get_drvdata(ctrl);
52 struct tty_struct *tty;
53
54 tty = tty_port_tty_get(port);
55 if (!tty)
56 return;
41 57
42 if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) && 58 if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
43 test_bit(SERPORT_ACTIVE, &serport->flags)) 59 test_bit(SERPORT_ACTIVE, &serport->flags))
44 serdev_controller_write_wakeup(ctrl); 60 serdev_controller_write_wakeup(ctrl);
45 61
46 wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT); 62 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
63
64 tty_kref_put(tty);
47} 65}
48 66
49static const struct tty_port_client_operations client_ops = { 67static const struct tty_port_client_operations client_ops = {
@@ -136,8 +154,10 @@ static void ttyport_close(struct serdev_controller *ctrl)
136 154
137 clear_bit(SERPORT_ACTIVE, &serport->flags); 155 clear_bit(SERPORT_ACTIVE, &serport->flags);
138 156
157 tty_lock(tty);
139 if (tty->ops->close) 158 if (tty->ops->close)
140 tty->ops->close(tty, NULL); 159 tty->ops->close(tty, NULL);
160 tty_unlock(tty);
141 161
142 tty_release_struct(tty, serport->tty_idx); 162 tty_release_struct(tty, serport->tty_idx);
143} 163}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 362c25ff188a..ae6a256524d8 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -122,12 +122,14 @@ static void __init init_port(struct earlycon_device *device)
122 serial8250_early_out(port, UART_FCR, 0); /* no fifo */ 122 serial8250_early_out(port, UART_FCR, 0); /* no fifo */
123 serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ 123 serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */
124 124
125 divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); 125 if (port->uartclk && device->baud) {
126 c = serial8250_early_in(port, UART_LCR); 126 divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
127 serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); 127 c = serial8250_early_in(port, UART_LCR);
128 serial8250_early_out(port, UART_DLL, divisor & 0xff); 128 serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
129 serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); 129 serial8250_early_out(port, UART_DLL, divisor & 0xff);
130 serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); 130 serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
131 serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
132 }
131} 133}
132 134
133int __init early_serial8250_setup(struct earlycon_device *device, 135int __init early_serial8250_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b7e0e3416641..54adf8d56350 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5135,6 +5135,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
5135 { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, 5135 { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
5136 { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, 5136 { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
5137 5137
5138 /* Amazon PCI serial device */
5139 { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
5140
5138 /* 5141 /*
5139 * These entries match devices with class COMMUNICATION_SERIAL, 5142 * These entries match devices with class COMMUNICATION_SERIAL,
5140 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 5143 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3593ce0ec641..880009987460 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
247 if (ret) 247 if (ret)
248 goto err_mux; 248 goto err_mux;
249 249
250 ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi"); 250 ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
251 if (ulpi_node) { 251 if (ulpi_node) {
252 phy_node = of_get_next_available_child(ulpi_node, NULL); 252 phy_node = of_get_next_available_child(ulpi_node, NULL);
253 ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy"); 253 ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 8b351444cc40..9a2ab6751a23 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -180,9 +180,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
180 /* Find a ulpi bus underneath the parent or the grandparent */ 180 /* Find a ulpi bus underneath the parent or the grandparent */
181 parent = ulpi->dev.parent; 181 parent = ulpi->dev.parent;
182 if (parent->of_node) 182 if (parent->of_node)
183 np = of_find_node_by_name(parent->of_node, "ulpi"); 183 np = of_get_child_by_name(parent->of_node, "ulpi");
184 else if (parent->parent && parent->parent->of_node) 184 else if (parent->parent && parent->parent->of_node)
185 np = of_find_node_by_name(parent->parent->of_node, "ulpi"); 185 np = of_get_child_by_name(parent->parent->of_node, "ulpi");
186 if (!np) 186 if (!np)
187 return 0; 187 return 0;
188 188
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index da8acd980fc6..c821b4b9647e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
555 unsigned iad_num = 0; 555 unsigned iad_num = 0;
556 556
557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); 557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
558 nintf = nintf_orig = config->desc.bNumInterfaces;
559 config->desc.bNumInterfaces = 0; // Adjusted later
560
558 if (config->desc.bDescriptorType != USB_DT_CONFIG || 561 if (config->desc.bDescriptorType != USB_DT_CONFIG ||
559 config->desc.bLength < USB_DT_CONFIG_SIZE || 562 config->desc.bLength < USB_DT_CONFIG_SIZE ||
560 config->desc.bLength > size) { 563 config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
568 buffer += config->desc.bLength; 571 buffer += config->desc.bLength;
569 size -= config->desc.bLength; 572 size -= config->desc.bLength;
570 573
571 nintf = nintf_orig = config->desc.bNumInterfaces;
572 if (nintf > USB_MAXINTERFACES) { 574 if (nintf > USB_MAXINTERFACES) {
573 dev_warn(ddev, "config %d has too many interfaces: %d, " 575 dev_warn(ddev, "config %d has too many interfaces: %d, "
574 "using maximum allowed: %d\n", 576 "using maximum allowed: %d\n",
@@ -905,14 +907,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
905 } 907 }
906} 908}
907 909
910static const __u8 bos_desc_len[256] = {
911 [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
912 [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
913 [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
914 [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
915 [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
916 [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
917};
918
908/* Get BOS descriptor set */ 919/* Get BOS descriptor set */
909int usb_get_bos_descriptor(struct usb_device *dev) 920int usb_get_bos_descriptor(struct usb_device *dev)
910{ 921{
911 struct device *ddev = &dev->dev; 922 struct device *ddev = &dev->dev;
912 struct usb_bos_descriptor *bos; 923 struct usb_bos_descriptor *bos;
913 struct usb_dev_cap_header *cap; 924 struct usb_dev_cap_header *cap;
925 struct usb_ssp_cap_descriptor *ssp_cap;
914 unsigned char *buffer; 926 unsigned char *buffer;
915 int length, total_len, num, i; 927 int length, total_len, num, i, ssac;
928 __u8 cap_type;
916 int ret; 929 int ret;
917 930
918 bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); 931 bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -965,7 +978,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
965 dev->bos->desc->bNumDeviceCaps = i; 978 dev->bos->desc->bNumDeviceCaps = i;
966 break; 979 break;
967 } 980 }
981 cap_type = cap->bDevCapabilityType;
968 length = cap->bLength; 982 length = cap->bLength;
983 if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
984 dev->bos->desc->bNumDeviceCaps = i;
985 break;
986 }
987
969 total_len -= length; 988 total_len -= length;
970 989
971 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { 990 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -973,7 +992,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
973 continue; 992 continue;
974 } 993 }
975 994
976 switch (cap->bDevCapabilityType) { 995 switch (cap_type) {
977 case USB_CAP_TYPE_WIRELESS_USB: 996 case USB_CAP_TYPE_WIRELESS_USB:
978 /* Wireless USB cap descriptor is handled by wusb */ 997 /* Wireless USB cap descriptor is handled by wusb */
979 break; 998 break;
@@ -986,8 +1005,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
986 (struct usb_ss_cap_descriptor *)buffer; 1005 (struct usb_ss_cap_descriptor *)buffer;
987 break; 1006 break;
988 case USB_SSP_CAP_TYPE: 1007 case USB_SSP_CAP_TYPE:
989 dev->bos->ssp_cap = 1008 ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
990 (struct usb_ssp_cap_descriptor *)buffer; 1009 ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
1010 USB_SSP_SUBLINK_SPEED_ATTRIBS);
1011 if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
1012 dev->bos->ssp_cap = ssp_cap;
991 break; 1013 break;
992 case CONTAINER_ID_TYPE: 1014 case CONTAINER_ID_TYPE:
993 dev->bos->ss_id = 1015 dev->bos->ss_id =
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 705c573d0257..a3fad4ec9870 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1442,14 +1442,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1442 int number_of_packets = 0; 1442 int number_of_packets = 0;
1443 unsigned int stream_id = 0; 1443 unsigned int stream_id = 0;
1444 void *buf; 1444 void *buf;
1445 1445 unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
1446 if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
1447 USBDEVFS_URB_SHORT_NOT_OK |
1448 USBDEVFS_URB_BULK_CONTINUATION | 1446 USBDEVFS_URB_BULK_CONTINUATION |
1449 USBDEVFS_URB_NO_FSBR | 1447 USBDEVFS_URB_NO_FSBR |
1450 USBDEVFS_URB_ZERO_PACKET | 1448 USBDEVFS_URB_ZERO_PACKET |
1451 USBDEVFS_URB_NO_INTERRUPT)) 1449 USBDEVFS_URB_NO_INTERRUPT;
1452 return -EINVAL; 1450 /* USBDEVFS_URB_ISO_ASAP is a special case */
1451 if (uurb->type == USBDEVFS_URB_TYPE_ISO)
1452 mask |= USBDEVFS_URB_ISO_ASAP;
1453
1454 if (uurb->flags & ~mask)
1455 return -EINVAL;
1456
1453 if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) 1457 if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
1454 return -EINVAL; 1458 return -EINVAL;
1455 if (uurb->buffer_length > 0 && !uurb->buffer) 1459 if (uurb->buffer_length > 0 && !uurb->buffer)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7ccdd3d4db84..cf7bbcb9a63c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4948,6 +4948,15 @@ loop:
4948 usb_put_dev(udev); 4948 usb_put_dev(udev);
4949 if ((status == -ENOTCONN) || (status == -ENOTSUPP)) 4949 if ((status == -ENOTCONN) || (status == -ENOTSUPP))
4950 break; 4950 break;
4951
4952 /* When halfway through our retry count, power-cycle the port */
4953 if (i == (SET_CONFIG_TRIES / 2) - 1) {
4954 dev_info(&port_dev->dev, "attempt power cycle\n");
4955 usb_hub_set_port_power(hdev, hub, port1, false);
4956 msleep(2 * hub_power_on_good_delay(hub));
4957 usb_hub_set_port_power(hdev, hub, port1, true);
4958 msleep(hub_power_on_good_delay(hub));
4959 }
4951 } 4960 }
4952 if (hub->hdev->parent || 4961 if (hub->hdev->parent ||
4953 !hcd->driver->port_handed_over || 4962 !hcd->driver->port_handed_over ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f1dbab6f798f..4024926c1d68 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
52 /* Microsoft LifeCam-VX700 v2.0 */ 52 /* Microsoft LifeCam-VX700 v2.0 */
53 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 53 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
54 54
55 /* Logitech HD Pro Webcams C920, C920-C and C930e */ 55 /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
56 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 56 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
57 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, 57 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
58 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, 58 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
59 { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
59 60
60 /* Logitech ConferenceCam CC3000e */ 61 /* Logitech ConferenceCam CC3000e */
61 { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, 62 { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -146,6 +147,12 @@ static const struct usb_device_id usb_quirk_list[] = {
146 /* appletouch */ 147 /* appletouch */
147 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 148 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
148 149
150 /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
151 { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
152
153 /* ELSA MicroLink 56K */
154 { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
155
149 /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ 156 /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
150 { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, 157 { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
151 158
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c94130cac..31749c79045f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
537 * 2 - Internal DMA 537 * 2 - Internal DMA
538 * @power_optimized Are power optimizations enabled? 538 * @power_optimized Are power optimizations enabled?
539 * @num_dev_ep Number of device endpoints available 539 * @num_dev_ep Number of device endpoints available
540 * @num_dev_in_eps Number of device IN endpoints available
540 * @num_dev_perio_in_ep Number of device periodic IN endpoints 541 * @num_dev_perio_in_ep Number of device periodic IN endpoints
541 * available 542 * available
542 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue 543 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
565 * 2 - 8 or 16 bits 566 * 2 - 8 or 16 bits
566 * @snpsid: Value from SNPSID register 567 * @snpsid: Value from SNPSID register
567 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) 568 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
569 * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
568 */ 570 */
569struct dwc2_hw_params { 571struct dwc2_hw_params {
570 unsigned op_mode:3; 572 unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
586 unsigned fs_phy_type:2; 588 unsigned fs_phy_type:2;
587 unsigned i2c_enable:1; 589 unsigned i2c_enable:1;
588 unsigned num_dev_ep:4; 590 unsigned num_dev_ep:4;
591 unsigned num_dev_in_eps : 4;
589 unsigned num_dev_perio_in_ep:4; 592 unsigned num_dev_perio_in_ep:4;
590 unsigned total_fifo_size:16; 593 unsigned total_fifo_size:16;
591 unsigned power_optimized:1; 594 unsigned power_optimized:1;
592 unsigned utmi_phy_data_width:2; 595 unsigned utmi_phy_data_width:2;
593 u32 snpsid; 596 u32 snpsid;
594 u32 dev_ep_dirs; 597 u32 dev_ep_dirs;
598 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
595}; 599};
596 600
597/* Size of control and EP0 buffers */ 601/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d092503..e4c3ce0de5de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
195{ 195{
196 if (hsotg->hw_params.en_multiple_tx_fifo) 196 if (hsotg->hw_params.en_multiple_tx_fifo)
197 /* In dedicated FIFO mode we need count of IN EPs */ 197 /* In dedicated FIFO mode we need count of IN EPs */
198 return (dwc2_readl(hsotg->regs + GHWCFG4) & 198 return hsotg->hw_params.num_dev_in_eps;
199 GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
200 else 199 else
201 /* In shared FIFO mode we need count of Periodic IN EPs */ 200 /* In shared FIFO mode we need count of Periodic IN EPs */
202 return hsotg->hw_params.num_dev_perio_in_ep; 201 return hsotg->hw_params.num_dev_perio_in_ep;
203} 202}
204 203
205/** 204/**
206 * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
207 */
208static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
209{
210 int val = 0;
211 int i;
212 u32 ep_dirs;
213
214 /*
215 * Don't need additional space for ep info control registers in
216 * slave mode.
217 */
218 if (!using_dma(hsotg)) {
219 dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
220 return 0;
221 }
222
223 /*
224 * Buffer DMA mode - 1 location per endpoit
225 * Descriptor DMA mode - 4 locations per endpoint
226 */
227 ep_dirs = hsotg->hw_params.dev_ep_dirs;
228
229 for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
230 val += ep_dirs & 3 ? 1 : 2;
231 ep_dirs >>= 2;
232 }
233
234 if (using_desc_dma(hsotg))
235 val = val * 4;
236
237 return val;
238}
239
240/**
241 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 205 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
242 * device mode TX FIFOs 206 * device mode TX FIFOs
243 */ 207 */
244int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 208int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
245{ 209{
246 int ep_info_size;
247 int addr; 210 int addr;
248 int tx_addr_max; 211 int tx_addr_max;
249 u32 np_tx_fifo_size; 212 u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
252 hsotg->params.g_np_tx_fifo_size); 215 hsotg->params.g_np_tx_fifo_size);
253 216
254 /* Get Endpoint Info Control block size in DWORDs. */ 217 /* Get Endpoint Info Control block size in DWORDs. */
255 ep_info_size = dwc2_hsotg_ep_info_size(hsotg); 218 tx_addr_max = hsotg->hw_params.total_fifo_size;
256 tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
257 219
258 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 220 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
259 if (tx_addr_max <= addr) 221 if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6e03a9..03fd20f0b496 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
484 } 484 }
485 485
486 for (fifo = 1; fifo <= fifo_count; fifo++) { 486 for (fifo = 1; fifo <= fifo_count; fifo++) {
487 dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & 487 dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
488 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
489 488
490 if (hsotg->params.g_tx_fifo_size[fifo] < min || 489 if (hsotg->params.g_tx_fifo_size[fifo] < min ||
491 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { 490 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
609 struct dwc2_hw_params *hw = &hsotg->hw_params; 608 struct dwc2_hw_params *hw = &hsotg->hw_params;
610 bool forced; 609 bool forced;
611 u32 gnptxfsiz; 610 u32 gnptxfsiz;
611 int fifo, fifo_count;
612 612
613 if (hsotg->dr_mode == USB_DR_MODE_HOST) 613 if (hsotg->dr_mode == USB_DR_MODE_HOST)
614 return; 614 return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
617 617
618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
619 619
620 fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
621
622 for (fifo = 1; fifo <= fifo_count; fifo++) {
623 hw->g_tx_fifo_size[fifo] =
624 (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
625 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
626 }
627
620 if (forced) 628 if (forced)
621 dwc2_clear_force_mode(hsotg); 629 dwc2_clear_force_mode(hsotg);
622 630
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
661 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 669 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
662 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 670 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
663 671
664 /*
665 * Host specific hardware parameters. Reading these parameters
666 * requires the controller to be in host mode. The mode will
667 * be forced, if necessary, to read these values.
668 */
669 dwc2_get_host_hwparams(hsotg);
670 dwc2_get_dev_hwparams(hsotg);
671
672 /* hwcfg1 */ 672 /* hwcfg1 */
673 hw->dev_ep_dirs = hwcfg1; 673 hw->dev_ep_dirs = hwcfg1;
674 674
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
714 hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
715 GHWCFG4_NUM_IN_EPS_SHIFT;
714 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 716 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
715 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 717 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
716 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 718 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
719 /* fifo sizes */ 721 /* fifo sizes */
720 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 722 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
721 GRXFSIZ_DEPTH_SHIFT; 723 GRXFSIZ_DEPTH_SHIFT;
724 /*
725 * Host specific hardware parameters. Reading these parameters
726 * requires the controller to be in host mode. The mode will
727 * be forced, if necessary, to read these values.
728 */
729 dwc2_get_host_hwparams(hsotg);
730 dwc2_get_dev_hwparams(hsotg);
722 731
723 return 0; 732 return 0;
724} 733}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7bd2766..7ae0eefc7cc7 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
51 51
52 clk = of_clk_get(np, i); 52 clk = of_clk_get(np, i);
53 if (IS_ERR(clk)) { 53 if (IS_ERR(clk)) {
54 while (--i >= 0) 54 while (--i >= 0) {
55 clk_disable_unprepare(simple->clks[i]);
55 clk_put(simple->clks[i]); 56 clk_put(simple->clks[i]);
57 }
56 return PTR_ERR(clk); 58 return PTR_ERR(clk);
57 } 59 }
58 60
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
203 .driver = { 205 .driver = {
204 .name = "dwc3-of-simple", 206 .name = "dwc3-of-simple",
205 .of_match_table = of_dwc3_simple_match, 207 .of_match_table = of_dwc3_simple_match,
208 .pm = &dwc3_of_simple_dev_pm_ops,
206 }, 209 },
207}; 210};
208 211
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd986cf82..639dd1b163a0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
259{ 259{
260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
261 struct dwc3 *dwc = dep->dwc; 261 struct dwc3 *dwc = dep->dwc;
262 u32 timeout = 500; 262 u32 timeout = 1000;
263 u32 reg; 263 u32 reg;
264 264
265 int cmd_status = 0; 265 int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
912 */ 912 */
913 if (speed == USB_SPEED_HIGH) { 913 if (speed == USB_SPEED_HIGH) {
914 struct usb_ep *ep = &dep->endpoint; 914 struct usb_ep *ep = &dep->endpoint;
915 unsigned int mult = ep->mult - 1; 915 unsigned int mult = 2;
916 unsigned int maxp = usb_endpoint_maxp(ep->desc); 916 unsigned int maxp = usb_endpoint_maxp(ep->desc);
917 917
918 if (length <= (2 * maxp)) 918 if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eec14e6ed20b..77c7ecca816a 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -146,7 +146,6 @@ int config_ep_by_speed(struct usb_gadget *g,
146 struct usb_function *f, 146 struct usb_function *f,
147 struct usb_ep *_ep) 147 struct usb_ep *_ep)
148{ 148{
149 struct usb_composite_dev *cdev = get_gadget_data(g);
150 struct usb_endpoint_descriptor *chosen_desc = NULL; 149 struct usb_endpoint_descriptor *chosen_desc = NULL;
151 struct usb_descriptor_header **speed_desc = NULL; 150 struct usb_descriptor_header **speed_desc = NULL;
152 151
@@ -226,8 +225,12 @@ ep_found:
226 _ep->maxburst = comp_desc->bMaxBurst + 1; 225 _ep->maxburst = comp_desc->bMaxBurst + 1;
227 break; 226 break;
228 default: 227 default:
229 if (comp_desc->bMaxBurst != 0) 228 if (comp_desc->bMaxBurst != 0) {
229 struct usb_composite_dev *cdev;
230
231 cdev = get_gadget_data(g);
230 ERROR(cdev, "ep0 bMaxBurst must be 0\n"); 232 ERROR(cdev, "ep0 bMaxBurst must be 0\n");
233 }
231 _ep->maxburst = 1; 234 _ep->maxburst = 1;
232 break; 235 break;
233 } 236 }
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 97ea059a7aa4..b6cf5ab5a0a1 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1012,7 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
1012 else 1012 else
1013 ret = ep->status; 1013 ret = ep->status;
1014 goto error_mutex; 1014 goto error_mutex;
1015 } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { 1015 } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
1016 ret = -ENOMEM; 1016 ret = -ENOMEM;
1017 } else { 1017 } else {
1018 req->buf = data; 1018 req->buf = data;
@@ -2282,9 +2282,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2282 int i; 2282 int i;
2283 2283
2284 if (len < sizeof(*d) || 2284 if (len < sizeof(*d) ||
2285 d->bFirstInterfaceNumber >= ffs->interfaces_count || 2285 d->bFirstInterfaceNumber >= ffs->interfaces_count)
2286 !d->Reserved1)
2287 return -EINVAL; 2286 return -EINVAL;
2287 if (d->Reserved1 != 1) {
2288 /*
2289 * According to the spec, Reserved1 must be set to 1
2290 * but older kernels incorrectly rejected non-zero
2291 * values. We fix it here to avoid returning EINVAL
2292 * in response to values we used to accept.
2293 */
2294 pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2295 d->Reserved1 = 1;
2296 }
2288 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) 2297 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2289 if (d->Reserved2[i]) 2298 if (d->Reserved2[i])
2290 return -EINVAL; 2299 return -EINVAL;
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index a12fb459dbd9..784bf86dad4f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -479,7 +479,7 @@ endif
479# or video class gadget drivers), or specific hardware, here. 479# or video class gadget drivers), or specific hardware, here.
480config USB_G_WEBCAM 480config USB_G_WEBCAM
481 tristate "USB Webcam Gadget" 481 tristate "USB Webcam Gadget"
482 depends on VIDEO_DEV 482 depends on VIDEO_V4L2
483 select USB_LIBCOMPOSITE 483 select USB_LIBCOMPOSITE
484 select VIDEOBUF2_VMALLOC 484 select VIDEOBUF2_VMALLOC
485 select USB_F_UVC 485 select USB_F_UVC
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index d39f070acbd7..01b44e159623 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -642,7 +642,6 @@ static const struct of_device_id bdc_of_match[] = {
642static struct platform_driver bdc_driver = { 642static struct platform_driver bdc_driver = {
643 .driver = { 643 .driver = {
644 .name = BRCM_BDC_NAME, 644 .name = BRCM_BDC_NAME,
645 .owner = THIS_MODULE,
646 .pm = &bdc_pm_ops, 645 .pm = &bdc_pm_ops,
647 .of_match_table = bdc_of_match, 646 .of_match_table = bdc_of_match,
648 }, 647 },
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 61422d624ad0..1b3efb14aec7 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1069,8 +1069,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
1069static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, 1069static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
1070 enum usb_device_speed speed) 1070 enum usb_device_speed speed)
1071{ 1071{
1072 if (udc->gadget->ops->udc_set_speed) 1072 if (udc->gadget->ops->udc_set_speed) {
1073 udc->gadget->ops->udc_set_speed(udc->gadget, speed); 1073 enum usb_device_speed s;
1074
1075 s = min(speed, udc->gadget->max_speed);
1076 udc->gadget->ops->udc_set_speed(udc->gadget, s);
1077 }
1074} 1078}
1075 1079
1076/** 1080/**
@@ -1143,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1143 1147
1144 udc = kzalloc(sizeof(*udc), GFP_KERNEL); 1148 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
1145 if (!udc) 1149 if (!udc)
1146 goto err1; 1150 goto err_put_gadget;
1147
1148 ret = device_add(&gadget->dev);
1149 if (ret)
1150 goto err2;
1151 1151
1152 device_initialize(&udc->dev); 1152 device_initialize(&udc->dev);
1153 udc->dev.release = usb_udc_release; 1153 udc->dev.release = usb_udc_release;
@@ -1156,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1156 udc->dev.parent = parent; 1156 udc->dev.parent = parent;
1157 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); 1157 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
1158 if (ret) 1158 if (ret)
1159 goto err3; 1159 goto err_put_udc;
1160
1161 ret = device_add(&gadget->dev);
1162 if (ret)
1163 goto err_put_udc;
1160 1164
1161 udc->gadget = gadget; 1165 udc->gadget = gadget;
1162 gadget->udc = udc; 1166 gadget->udc = udc;
@@ -1166,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1166 1170
1167 ret = device_add(&udc->dev); 1171 ret = device_add(&udc->dev);
1168 if (ret) 1172 if (ret)
1169 goto err4; 1173 goto err_unlist_udc;
1170 1174
1171 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); 1175 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
1172 udc->vbus = true; 1176 udc->vbus = true;
@@ -1174,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1174 /* pick up one of pending gadget drivers */ 1178 /* pick up one of pending gadget drivers */
1175 ret = check_pending_gadget_drivers(udc); 1179 ret = check_pending_gadget_drivers(udc);
1176 if (ret) 1180 if (ret)
1177 goto err5; 1181 goto err_del_udc;
1178 1182
1179 mutex_unlock(&udc_lock); 1183 mutex_unlock(&udc_lock);
1180 1184
1181 return 0; 1185 return 0;
1182 1186
1183err5: 1187 err_del_udc:
1184 device_del(&udc->dev); 1188 device_del(&udc->dev);
1185 1189
1186err4: 1190 err_unlist_udc:
1187 list_del(&udc->list); 1191 list_del(&udc->list);
1188 mutex_unlock(&udc_lock); 1192 mutex_unlock(&udc_lock);
1189 1193
1190err3:
1191 put_device(&udc->dev);
1192 device_del(&gadget->dev); 1194 device_del(&gadget->dev);
1193 1195
1194err2: 1196 err_put_udc:
1195 kfree(udc); 1197 put_device(&udc->dev);
1196 1198
1197err1: 1199 err_put_gadget:
1198 put_device(&gadget->dev); 1200 put_device(&gadget->dev);
1199 return ret; 1201 return ret;
1200} 1202}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index bc37f40baacf..6e87af248367 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -252,7 +252,7 @@
252#define USB3_EP0_SS_MAX_PACKET_SIZE 512 252#define USB3_EP0_SS_MAX_PACKET_SIZE 512
253#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64 253#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
254#define USB3_EP0_BUF_SIZE 8 254#define USB3_EP0_BUF_SIZE 8
255#define USB3_MAX_NUM_PIPES 30 255#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
256#define USB3_WAIT_US 3 256#define USB3_WAIT_US 3
257#define USB3_DMA_NUM_SETTING_AREA 4 257#define USB3_DMA_NUM_SETTING_AREA 4
258/* 258/*
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 19f00424f53e..3ed75aaa09d9 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -827,7 +827,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
827 default: /* unknown */ 827 default: /* unknown */
828 break; 828 break;
829 } 829 }
830 temp = (cap >> 8) & 0xff; 830 offset = (cap >> 8) & 0xff;
831 } 831 }
832 } 832 }
833#endif 833#endif
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f7895dbcf88..e26e685d8a57 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
162static int xhci_ring_enqueue_show(struct seq_file *s, void *unused) 162static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
163{ 163{
164 dma_addr_t dma; 164 dma_addr_t dma;
165 struct xhci_ring *ring = s->private; 165 struct xhci_ring *ring = *(struct xhci_ring **)s->private;
166 166
167 dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 167 dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
168 seq_printf(s, "%pad\n", &dma); 168 seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
173static int xhci_ring_dequeue_show(struct seq_file *s, void *unused) 173static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
174{ 174{
175 dma_addr_t dma; 175 dma_addr_t dma;
176 struct xhci_ring *ring = s->private; 176 struct xhci_ring *ring = *(struct xhci_ring **)s->private;
177 177
178 dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 178 dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
179 seq_printf(s, "%pad\n", &dma); 179 seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
183 183
184static int xhci_ring_cycle_show(struct seq_file *s, void *unused) 184static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
185{ 185{
186 struct xhci_ring *ring = s->private; 186 struct xhci_ring *ring = *(struct xhci_ring **)s->private;
187 187
188 seq_printf(s, "%d\n", ring->cycle_state); 188 seq_printf(s, "%d\n", ring->cycle_state);
189 189
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
346} 346}
347 347
348static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci, 348static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
349 struct xhci_ring *ring, 349 struct xhci_ring **ring,
350 const char *name, 350 const char *name,
351 struct dentry *parent) 351 struct dentry *parent)
352{ 352{
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
387 387
388 snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); 388 snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
389 epriv->root = xhci_debugfs_create_ring_dir(xhci, 389 epriv->root = xhci_debugfs_create_ring_dir(xhci,
390 dev->eps[ep_index].new_ring, 390 &dev->eps[ep_index].new_ring,
391 epriv->name, 391 epriv->name,
392 spriv->root); 392 spriv->root);
393 spriv->eps[ep_index] = epriv; 393 spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
423 priv->dev = dev; 423 priv->dev = dev;
424 dev->debugfs_private = priv; 424 dev->debugfs_private = priv;
425 425
426 xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring, 426 xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
427 "ep00", priv->root); 427 "ep00", priv->root);
428 428
429 xhci_debugfs_create_context_files(xhci, priv->root, slot_id); 429 xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
488 ARRAY_SIZE(xhci_extcap_dbc), 488 ARRAY_SIZE(xhci_extcap_dbc),
489 "reg-ext-dbc"); 489 "reg-ext-dbc");
490 490
491 xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring, 491 xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
492 "command-ring", 492 "command-ring",
493 xhci->debugfs_root); 493 xhci->debugfs_root);
494 494
495 xhci_debugfs_create_ring_dir(xhci, xhci->event_ring, 495 xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
496 "event-ring", 496 "event-ring",
497 xhci->debugfs_root); 497 xhci->debugfs_root);
498 498
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index e1fba4688509..3a29b32a3bd0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -934,6 +934,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
934 if (!vdev) 934 if (!vdev)
935 return; 935 return;
936 936
937 if (vdev->real_port == 0 ||
938 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
939 xhci_dbg(xhci, "Bad vdev->real_port.\n");
940 goto out;
941 }
942
937 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); 943 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
938 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 944 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
939 /* is this a hub device that added a tt_info to the tts list */ 945 /* is this a hub device that added a tt_info to the tts list */
@@ -947,6 +953,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
947 } 953 }
948 } 954 }
949 } 955 }
956out:
950 /* we are now at a leaf device */ 957 /* we are now at a leaf device */
951 xhci_debugfs_remove_slot(xhci, slot_id); 958 xhci_debugfs_remove_slot(xhci, slot_id);
952 xhci_free_virt_device(xhci, slot_id); 959 xhci_free_virt_device(xhci, slot_id);
@@ -964,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
964 return 0; 971 return 0;
965 } 972 }
966 973
967 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 974 dev = kzalloc(sizeof(*dev), flags);
968 if (!xhci->devs[slot_id]) 975 if (!dev)
969 return 0; 976 return 0;
970 dev = xhci->devs[slot_id];
971 977
972 /* Allocate the (output) device context that will be used in the HC. */ 978 /* Allocate the (output) device context that will be used in the HC. */
973 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 979 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1008,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
1008 1014
1009 trace_xhci_alloc_virt_device(dev); 1015 trace_xhci_alloc_virt_device(dev);
1010 1016
1017 xhci->devs[slot_id] = dev;
1018
1011 return 1; 1019 return 1;
1012fail: 1020fail:
1013 xhci_free_virt_device(xhci, slot_id); 1021
1022 if (dev->in_ctx)
1023 xhci_free_container_ctx(xhci, dev->in_ctx);
1024 if (dev->out_ctx)
1025 xhci_free_container_ctx(xhci, dev->out_ctx);
1026 kfree(dev);
1027
1014 return 0; 1028 return 0;
1015} 1029}
1016 1030
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7ef1274ef7f7..1aad89b8aba0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -178,6 +178,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
178 xhci->quirks |= XHCI_BROKEN_STREAMS; 178 xhci->quirks |= XHCI_BROKEN_STREAMS;
179 } 179 }
180 if (pdev->vendor == PCI_VENDOR_ID_RENESAS && 180 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
181 pdev->device == 0x0014)
182 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
183 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
181 pdev->device == 0x0015) 184 pdev->device == 0x0015)
182 xhci->quirks |= XHCI_RESET_ON_RESUME; 185 xhci->quirks |= XHCI_RESET_ON_RESUME;
183 if (pdev->vendor == PCI_VENDOR_ID_VIA) 186 if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c239c688076c..c5cbc685c691 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2477,12 +2477,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2477 */ 2477 */
2478 if (list_empty(&ep_ring->td_list)) { 2478 if (list_empty(&ep_ring->td_list)) {
2479 /* 2479 /*
2480 * A stopped endpoint may generate an extra completion 2480 * Don't print wanings if it's due to a stopped endpoint
2481 * event if the device was suspended. Don't print 2481 * generating an extra completion event if the device
2482 * warnings. 2482 * was suspended. Or, a event for the last TRB of a
2483 * short TD we already got a short event for.
2484 * The short TD is already removed from the TD list.
2483 */ 2485 */
2486
2484 if (!(trb_comp_code == COMP_STOPPED || 2487 if (!(trb_comp_code == COMP_STOPPED ||
2485 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { 2488 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2489 ep_ring->last_td_was_short)) {
2486 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 2490 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2487 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2491 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2488 ep_index); 2492 ep_index);
@@ -3108,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3108{ 3112{
3109 u32 maxp, total_packet_count; 3113 u32 maxp, total_packet_count;
3110 3114
3111 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ 3115 /* MTK xHCI 0.96 contains some features from 1.0 */
3112 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) 3116 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3113 return ((td_total_len - transferred) >> 10); 3117 return ((td_total_len - transferred) >> 10);
3114 3118
@@ -3117,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3117 trb_buff_len == td_total_len) 3121 trb_buff_len == td_total_len)
3118 return 0; 3122 return 0;
3119 3123
3120 /* for MTK xHCI, TD size doesn't include this TRB */ 3124 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3121 if (xhci->quirks & XHCI_MTK_HOST) 3125 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3122 trb_buff_len = 0; 3126 trb_buff_len = 0;
3123 3127
3124 maxp = usb_endpoint_maxp(&urb->ep->desc); 3128 maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2424d3020ca3..da6dbe3ebd8b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3525 struct xhci_slot_ctx *slot_ctx; 3525 struct xhci_slot_ctx *slot_ctx;
3526 int i, ret; 3526 int i, ret;
3527 3527
3528 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3529
3530#ifndef CONFIG_USB_DEFAULT_PERSIST 3528#ifndef CONFIG_USB_DEFAULT_PERSIST
3531 /* 3529 /*
3532 * We called pm_runtime_get_noresume when the device was attached. 3530 * We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3555 } 3553 }
3556 3554
3557 ret = xhci_disable_slot(xhci, udev->slot_id); 3555 ret = xhci_disable_slot(xhci, udev->slot_id);
3558 if (ret) 3556 if (ret) {
3557 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3559 xhci_free_virt_device(xhci, udev->slot_id); 3558 xhci_free_virt_device(xhci, udev->slot_id);
3559 }
3560} 3560}
3561 3561
3562int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3562int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf68b463..f723f7b8c9ac 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
279 if (gpio_is_valid(hub->gpio_reset)) { 279 if (gpio_is_valid(hub->gpio_reset)) {
280 err = devm_gpio_request_one(dev, hub->gpio_reset, 280 err = devm_gpio_request_one(dev, hub->gpio_reset,
281 GPIOF_OUT_INIT_LOW, "usb3503 reset"); 281 GPIOF_OUT_INIT_LOW, "usb3503 reset");
282 /* Datasheet defines a hardware reset to be at least 100us */
283 usleep_range(100, 10000);
282 if (err) { 284 if (err) {
283 dev_err(dev, 285 dev_err(dev,
284 "unable to request GPIO %d as reset pin (%d)\n", 286 "unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753ab99b..f932f40302df 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
1004 break; 1004 break;
1005 1005
1006 case MON_IOCQ_RING_SIZE: 1006 case MON_IOCQ_RING_SIZE:
1007 mutex_lock(&rp->fetch_lock);
1007 ret = rp->b_size; 1008 ret = rp->b_size;
1009 mutex_unlock(&rp->fetch_lock);
1008 break; 1010 break;
1009 1011
1010 case MON_IOCT_RING_SIZE: 1012 case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
1231 unsigned long offset, chunk_idx; 1233 unsigned long offset, chunk_idx;
1232 struct page *pageptr; 1234 struct page *pageptr;
1233 1235
1236 mutex_lock(&rp->fetch_lock);
1234 offset = vmf->pgoff << PAGE_SHIFT; 1237 offset = vmf->pgoff << PAGE_SHIFT;
1235 if (offset >= rp->b_size) 1238 if (offset >= rp->b_size) {
1239 mutex_unlock(&rp->fetch_lock);
1236 return VM_FAULT_SIGBUS; 1240 return VM_FAULT_SIGBUS;
1241 }
1237 chunk_idx = offset / CHUNK_SIZE; 1242 chunk_idx = offset / CHUNK_SIZE;
1238 pageptr = rp->b_vec[chunk_idx].pg; 1243 pageptr = rp->b_vec[chunk_idx].pg;
1239 get_page(pageptr); 1244 get_page(pageptr);
1245 mutex_unlock(&rp->fetch_lock);
1240 vmf->page = pageptr; 1246 vmf->page = pageptr;
1241 return 0; 1247 return 0;
1242} 1248}
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606a211b..6c036de63272 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; 284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
285 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 285 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
286 del_timer(&musb->dev_timer); 286 del_timer(&musb->dev_timer);
287 } else { 287 } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
288 /*
289 * When babble condition happens, drvvbus interrupt
290 * is also generated. Ignore this drvvbus interrupt
291 * and let babble interrupt handler recovers the
292 * controller; otherwise, the host-mode flag is lost
293 * due to the MUSB_DEV_MODE() call below and babble
294 * recovery logic will not be called.
295 */
288 musb->is_active = 0; 296 musb->is_active = 0;
289 MUSB_DEV_MODE(musb); 297 MUSB_DEV_MODE(musb);
290 otg->default_a = 0; 298 otg->default_a = 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273bf5beb..06d502b3e913 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ 124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
127 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
127 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 128 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
128 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 129 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
129 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 130 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
174 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 175 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
175 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 176 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
176 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 177 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
178 { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
177 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ 179 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
178 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 180 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
179 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 181 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1aba9105b369..fc68952c994a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
1013 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 1013 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1014 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, 1014 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1015 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, 1015 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1016 { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
1016 { } /* Terminating entry */ 1017 { } /* Terminating entry */
1017}; 1018};
1018 1019
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4faa09fe308c..8b4ecd2bd297 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -915,6 +915,12 @@
915#define ICPDAS_I7563U_PID 0x0105 915#define ICPDAS_I7563U_PID 0x0105
916 916
917/* 917/*
918 * Airbus Defence and Space
919 */
920#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
921#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
922
923/*
918 * RT Systems programming cables for various ham radios 924 * RT Systems programming cables for various ham radios
919 */ 925 */
920#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 926#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index aaa7d901a06d..b6320e3be429 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,11 +233,14 @@ static void option_instat_callback(struct urb *urb);
233/* These Quectel products use Qualcomm's vendor ID */ 233/* These Quectel products use Qualcomm's vendor ID */
234#define QUECTEL_PRODUCT_UC20 0x9003 234#define QUECTEL_PRODUCT_UC20 0x9003
235#define QUECTEL_PRODUCT_UC15 0x9090 235#define QUECTEL_PRODUCT_UC15 0x9090
236/* These Yuga products use Qualcomm's vendor ID */
237#define YUGA_PRODUCT_CLM920_NC5 0x9625
236 238
237#define QUECTEL_VENDOR_ID 0x2c7c 239#define QUECTEL_VENDOR_ID 0x2c7c
238/* These Quectel products use Quectel's vendor ID */ 240/* These Quectel products use Quectel's vendor ID */
239#define QUECTEL_PRODUCT_EC21 0x0121 241#define QUECTEL_PRODUCT_EC21 0x0121
240#define QUECTEL_PRODUCT_EC25 0x0125 242#define QUECTEL_PRODUCT_EC25 0x0125
243#define QUECTEL_PRODUCT_BG96 0x0296
241 244
242#define CMOTECH_VENDOR_ID 0x16d8 245#define CMOTECH_VENDOR_ID 0x16d8
243#define CMOTECH_PRODUCT_6001 0x6001 246#define CMOTECH_PRODUCT_6001 0x6001
@@ -279,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
279#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 282#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
280#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 283#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
281#define TELIT_PRODUCT_ME910 0x1100 284#define TELIT_PRODUCT_ME910 0x1100
285#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
282#define TELIT_PRODUCT_LE920 0x1200 286#define TELIT_PRODUCT_LE920 0x1200
283#define TELIT_PRODUCT_LE910 0x1201 287#define TELIT_PRODUCT_LE910 0x1201
284#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 288#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -644,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
644 .reserved = BIT(1) | BIT(3), 648 .reserved = BIT(1) | BIT(3),
645}; 649};
646 650
651static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
652 .sendsetup = BIT(0),
653 .reserved = BIT(3),
654};
655
647static const struct option_blacklist_info telit_le910_blacklist = { 656static const struct option_blacklist_info telit_le910_blacklist = {
648 .sendsetup = BIT(0), 657 .sendsetup = BIT(0),
649 .reserved = BIT(1) | BIT(2), 658 .reserved = BIT(1) | BIT(2),
@@ -673,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
673 .reserved = BIT(4) | BIT(5), 682 .reserved = BIT(4) | BIT(5),
674}; 683};
675 684
685static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
686 .reserved = BIT(1) | BIT(4),
687};
688
676static const struct usb_device_id option_ids[] = { 689static const struct usb_device_id option_ids[] = {
677 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 690 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
678 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 691 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1177,11 +1190,16 @@ static const struct usb_device_id option_ids[] = {
1177 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, 1190 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1178 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), 1191 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1179 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1192 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1193 /* Yuga products use Qualcomm vendor ID */
1194 { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
1195 .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
1180 /* Quectel products using Quectel vendor ID */ 1196 /* Quectel products using Quectel vendor ID */
1181 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), 1197 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1182 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1198 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1183 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), 1199 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1184 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1200 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1201 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
1202 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1185 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1203 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1186 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1204 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1187 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1205 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1241,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
1241 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1259 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1242 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1260 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1243 .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, 1261 .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1262 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1263 .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
1244 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1264 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1245 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1265 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1246 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), 1266 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e3892541a489..613f91add03d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
162 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ 162 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
163 {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ 163 {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
164 {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ 164 {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
165 {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
166 {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
165 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
342 break; 344 break;
343 case 2: 345 case 2:
344 dev_dbg(dev, "NMEA GPS interface found\n"); 346 dev_dbg(dev, "NMEA GPS interface found\n");
347 sendsetup = true;
345 break; 348 break;
346 case 3: 349 case 3:
347 dev_dbg(dev, "Modem port found\n"); 350 dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index ab5a2ac4993a..aaf4813e4971 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -31,12 +31,14 @@ static const struct usb_device_id id_table[] = {
31}; 31};
32 32
33static const struct usb_device_id dbc_id_table[] = { 33static const struct usb_device_id dbc_id_table[] = {
34 { USB_DEVICE(0x1d6b, 0x0010) },
34 { USB_DEVICE(0x1d6b, 0x0011) }, 35 { USB_DEVICE(0x1d6b, 0x0011) },
35 { }, 36 { },
36}; 37};
37 38
38static const struct usb_device_id id_table_combined[] = { 39static const struct usb_device_id id_table_combined[] = {
39 { USB_DEVICE(0x0525, 0x127a) }, 40 { USB_DEVICE(0x0525, 0x127a) },
41 { USB_DEVICE(0x1d6b, 0x0010) },
40 { USB_DEVICE(0x1d6b, 0x0011) }, 42 { USB_DEVICE(0x1d6b, 0x0011) },
41 { }, 43 { },
42}; 44};
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 1fcd758a961f..3734a25e09e5 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
112 } 112 }
113 } 113 }
114 114
115 /* All Seagate disk enclosures have broken ATA pass-through support */
116 if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
117 flags |= US_FL_NO_ATA_1X;
118
115 usb_stor_adjust_quirks(udev, &flags); 119 usb_stor_adjust_quirks(udev, &flags);
116 120
117 if (flags & US_FL_IGNORE_UAS) { 121 if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046e7c05..f72d045ee9ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2101 US_FL_BROKEN_FUA ), 2101 US_FL_BROKEN_FUA ),
2102 2102
2103/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
2104UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
2105 "JMicron",
2106 "JMS567",
2107 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2108 US_FL_BROKEN_FUA),
2109
2103/* 2110/*
2104 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> 2111 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
2105 * JMicron responds to USN and several other SCSI ioctls with a 2112 * JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374a824e..a7d08ae0adad 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
129 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), 130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
131 131
132/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
133UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
134 "JMicron",
135 "JMS567",
136 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
137 US_FL_BROKEN_FUA),
138
132/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 139/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
133UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 140UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
134 "VIA", 141 "VIA",
@@ -136,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
136 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 143 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
137 US_FL_NO_ATA_1X), 144 US_FL_NO_ATA_1X),
138 145
146/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
147UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
148 "Norelsys",
149 "NS1068X",
150 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
151 US_FL_IGNORE_UAS),
152
139/* Reported-by: Takeo Nakayama <javhera@gmx.com> */ 153/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
140UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, 154UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
141 "JMicron", 155 "JMicron",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 465d7da849c3..bcb2744c5977 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -1,13 +1,53 @@
1 1
2menu "USB Power Delivery and Type-C drivers" 2menuconfig TYPEC
3 tristate "USB Type-C Support"
4 help
5 USB Type-C Specification defines a cable and connector for USB where
6 only one type of plug is supported on both ends, i.e. there will not
7 be Type-A plug on one end of the cable and Type-B plug on the other.
8 Determination of the host-to-device relationship happens through a
9 specific Configuration Channel (CC) which goes through the USB Type-C
10 cable. The Configuration Channel may also be used to detect optional
11 Accessory Modes - Analog Audio and Debug - and if USB Power Delivery
12 is supported, the Alternate Modes, where the connector is used for
13 something else then USB communication.
14
15 USB Power Delivery Specification defines a protocol that can be used
16 to negotiate the voltage and current levels with the connected
17 partners. USB Power Delivery allows higher voltages then the normal
18 5V, up to 20V, and current up to 5A over the cable. The USB Power
19 Delivery protocol is also used to negotiate the optional Alternate
20 Modes when they are supported. USB Power Delivery does not depend on
21 USB Type-C connector, however it is mostly used together with USB
22 Type-C connectors.
23
24 USB Type-C and USB Power Delivery Specifications define a set of state
25 machines that need to be implemented in either software or firmware.
26 Simple USB Type-C PHYs, for example USB Type-C Port Controller
27 Interface Specification compliant "Port Controllers" need the state
28 machines to be handled in the OS, but stand-alone USB Type-C and Power
29 Delivery controllers handle the state machines inside their firmware.
30 The USB Type-C and Power Delivery controllers usually function
31 autonomously, and do not necessarily require drivers.
32
33 Enable this configurations option if you have USB Type-C connectors on
34 your system and 1) you know your USB Type-C hardware requires OS
35 control (a driver) to function, or 2) if you need to be able to read
36 the status of the USB Type-C ports in your system, or 3) if you need
37 to be able to swap the power role (decide are you supplying or
38 consuming power over the cable) or data role (host or device) when
39 both roles are supported.
40
41 For more information, see the kernel documentation for USB Type-C
42 Connector Class API (Documentation/driver-api/usb/typec.rst)
43 <https://www.kernel.org/doc/html/latest/driver-api/usb/typec.html>
44 and ABI (Documentation/ABI/testing/sysfs-class-typec).
3 45
4config TYPEC 46if TYPEC
5 tristate
6 47
7config TYPEC_TCPM 48config TYPEC_TCPM
8 tristate "USB Type-C Port Controller Manager" 49 tristate "USB Type-C Port Controller Manager"
9 depends on USB 50 depends on USB
10 select TYPEC
11 help 51 help
12 The Type-C Port Controller Manager provides a USB PD and USB Type-C 52 The Type-C Port Controller Manager provides a USB PD and USB Type-C
13 state machine for use with Type-C Port Controllers. 53 state machine for use with Type-C Port Controllers.
@@ -22,7 +62,6 @@ config TYPEC_WCOVE
22 depends on INTEL_SOC_PMIC 62 depends on INTEL_SOC_PMIC
23 depends on INTEL_PMC_IPC 63 depends on INTEL_PMC_IPC
24 depends on BXT_WC_PMIC_OPREGION 64 depends on BXT_WC_PMIC_OPREGION
25 select TYPEC
26 help 65 help
27 This driver adds support for USB Type-C detection on Intel Broxton 66 This driver adds support for USB Type-C detection on Intel Broxton
28 platforms that have Intel Whiskey Cove PMIC. The driver can detect the 67 platforms that have Intel Whiskey Cove PMIC. The driver can detect the
@@ -31,14 +70,13 @@ config TYPEC_WCOVE
31 To compile this driver as module, choose M here: the module will be 70 To compile this driver as module, choose M here: the module will be
32 called typec_wcove 71 called typec_wcove
33 72
34endif 73endif # TYPEC_TCPM
35 74
36source "drivers/usb/typec/ucsi/Kconfig" 75source "drivers/usb/typec/ucsi/Kconfig"
37 76
38config TYPEC_TPS6598X 77config TYPEC_TPS6598X
39 tristate "TI TPS6598x USB Power Delivery controller driver" 78 tristate "TI TPS6598x USB Power Delivery controller driver"
40 depends on I2C 79 depends on I2C
41 select TYPEC
42 help 80 help
43 Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power 81 Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
44 Delivery controller. 82 Delivery controller.
@@ -46,4 +84,4 @@ config TYPEC_TPS6598X
46 If you choose to build this driver as a dynamically linked module, the 84 If you choose to build this driver as a dynamically linked module, the
47 module will be called tps6598x.ko. 85 module will be called tps6598x.ko.
48 86
49endmenu 87endif # TYPEC
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index d0c31cee4720..e36d6c73c4a4 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -1,7 +1,6 @@
1config TYPEC_UCSI 1config TYPEC_UCSI
2 tristate "USB Type-C Connector System Software Interface driver" 2 tristate "USB Type-C Connector System Software Interface driver"
3 depends on !CPU_BIG_ENDIAN 3 depends on !CPU_BIG_ENDIAN
4 select TYPEC
5 help 4 help
6 USB Type-C Connector System Software Interface (UCSI) is a 5 USB Type-C Connector System Software Interface (UCSI) is a
7 specification for an interface that allows the operating system to 6 specification for an interface that allows the operating system to
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3df8ee82faf..e31a6f204397 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
149 * step 1? 149 * step 1?
150 */ 150 */
151 if (ud->tcp_socket) { 151 if (ud->tcp_socket) {
152 dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", 152 dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
153 ud->tcp_socket);
154 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); 153 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
155 } 154 }
156 155
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 4f48b306713f..c31c8402a0c5 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
237 struct stub_priv *priv; 237 struct stub_priv *priv;
238 struct urb *urb; 238 struct urb *urb;
239 239
240 dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); 240 dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
241 241
242 while ((priv = stub_priv_pop(sdev))) { 242 while ((priv = stub_priv_pop(sdev))) {
243 urb = priv->urb; 243 urb = priv->urb;
244 dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); 244 dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
245 priv->seqnum);
245 usb_kill_urb(urb); 246 usb_kill_urb(urb);
246 247
247 kmem_cache_free(stub_priv_cache, priv); 248 kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037f541f..6c5a59313999 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
211 if (priv->seqnum != pdu->u.cmd_unlink.seqnum) 211 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
212 continue; 212 continue;
213 213
214 dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
215 priv->urb);
216
217 /* 214 /*
218 * This matched urb is not completed yet (i.e., be in 215 * This matched urb is not completed yet (i.e., be in
219 * flight in usb hcd hardware/driver). Now we are 216 * flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
252 ret = usb_unlink_urb(priv->urb); 249 ret = usb_unlink_urb(priv->urb);
253 if (ret != -EINPROGRESS) 250 if (ret != -EINPROGRESS)
254 dev_err(&priv->urb->dev->dev, 251 dev_err(&priv->urb->dev->dev,
255 "failed to unlink a urb %p, ret %d\n", 252 "failed to unlink a urb # %lu, ret %d\n",
256 priv->urb, ret); 253 priv->seqnum, ret);
257 254
258 return 0; 255 return 0;
259 } 256 }
@@ -322,23 +319,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
322 return priv; 319 return priv;
323} 320}
324 321
325static int get_pipe(struct stub_device *sdev, int epnum, int dir) 322static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
326{ 323{
327 struct usb_device *udev = sdev->udev; 324 struct usb_device *udev = sdev->udev;
328 struct usb_host_endpoint *ep; 325 struct usb_host_endpoint *ep;
329 struct usb_endpoint_descriptor *epd = NULL; 326 struct usb_endpoint_descriptor *epd = NULL;
327 int epnum = pdu->base.ep;
328 int dir = pdu->base.direction;
329
330 if (epnum < 0 || epnum > 15)
331 goto err_ret;
330 332
331 if (dir == USBIP_DIR_IN) 333 if (dir == USBIP_DIR_IN)
332 ep = udev->ep_in[epnum & 0x7f]; 334 ep = udev->ep_in[epnum & 0x7f];
333 else 335 else
334 ep = udev->ep_out[epnum & 0x7f]; 336 ep = udev->ep_out[epnum & 0x7f];
335 if (!ep) { 337 if (!ep)
336 dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", 338 goto err_ret;
337 epnum);
338 BUG();
339 }
340 339
341 epd = &ep->desc; 340 epd = &ep->desc;
341
342 if (usb_endpoint_xfer_control(epd)) { 342 if (usb_endpoint_xfer_control(epd)) {
343 if (dir == USBIP_DIR_OUT) 343 if (dir == USBIP_DIR_OUT)
344 return usb_sndctrlpipe(udev, epnum); 344 return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +361,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
361 } 361 }
362 362
363 if (usb_endpoint_xfer_isoc(epd)) { 363 if (usb_endpoint_xfer_isoc(epd)) {
364 /* validate packet size and number of packets */
365 unsigned int maxp, packets, bytes;
366
367 maxp = usb_endpoint_maxp(epd);
368 maxp *= usb_endpoint_maxp_mult(epd);
369 bytes = pdu->u.cmd_submit.transfer_buffer_length;
370 packets = DIV_ROUND_UP(bytes, maxp);
371
372 if (pdu->u.cmd_submit.number_of_packets < 0 ||
373 pdu->u.cmd_submit.number_of_packets > packets) {
374 dev_err(&sdev->udev->dev,
375 "CMD_SUBMIT: isoc invalid num packets %d\n",
376 pdu->u.cmd_submit.number_of_packets);
377 return -1;
378 }
364 if (dir == USBIP_DIR_OUT) 379 if (dir == USBIP_DIR_OUT)
365 return usb_sndisocpipe(udev, epnum); 380 return usb_sndisocpipe(udev, epnum);
366 else 381 else
367 return usb_rcvisocpipe(udev, epnum); 382 return usb_rcvisocpipe(udev, epnum);
368 } 383 }
369 384
385err_ret:
370 /* NOT REACHED */ 386 /* NOT REACHED */
371 dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); 387 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
372 return 0; 388 return -1;
373} 389}
374 390
375static void masking_bogus_flags(struct urb *urb) 391static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +449,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
433 struct stub_priv *priv; 449 struct stub_priv *priv;
434 struct usbip_device *ud = &sdev->ud; 450 struct usbip_device *ud = &sdev->ud;
435 struct usb_device *udev = sdev->udev; 451 struct usb_device *udev = sdev->udev;
436 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); 452 int pipe = get_pipe(sdev, pdu);
453
454 if (pipe == -1)
455 return;
437 456
438 priv = stub_priv_alloc(sdev, pdu); 457 priv = stub_priv_alloc(sdev, pdu);
439 if (!priv) 458 if (!priv)
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce96c212..f0ec41a50cbc 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
88 /* link a urb to the queue of tx. */ 88 /* link a urb to the queue of tx. */
89 spin_lock_irqsave(&sdev->priv_lock, flags); 89 spin_lock_irqsave(&sdev->priv_lock, flags);
90 if (sdev->ud.tcp_socket == NULL) { 90 if (sdev->ud.tcp_socket == NULL) {
91 usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); 91 usbip_dbg_stub_tx("ignore urb for closed connection\n");
92 /* It will be freed in stub_device_cleanup_urbs(). */ 92 /* It will be freed in stub_device_cleanup_urbs(). */
93 } else if (priv->unlinking) { 93 } else if (priv->unlinking) {
94 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); 94 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
167 memset(&pdu_header, 0, sizeof(pdu_header)); 167 memset(&pdu_header, 0, sizeof(pdu_header));
168 memset(&msg, 0, sizeof(msg)); 168 memset(&msg, 0, sizeof(msg));
169 169
170 if (urb->actual_length > 0 && !urb->transfer_buffer) {
171 dev_err(&sdev->udev->dev,
172 "urb: actual_length %d transfer_buffer null\n",
173 urb->actual_length);
174 return -1;
175 }
176
170 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 177 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
171 iovnum = 2 + urb->number_of_packets; 178 iovnum = 2 + urb->number_of_packets;
172 else 179 else
@@ -183,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
183 190
184 /* 1. setup usbip_header */ 191 /* 1. setup usbip_header */
185 setup_ret_submit_pdu(&pdu_header, urb); 192 setup_ret_submit_pdu(&pdu_header, urb);
186 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 193 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
187 pdu_header.base.seqnum, urb); 194 pdu_header.base.seqnum);
188 usbip_header_correct_endian(&pdu_header, 1); 195 usbip_header_correct_endian(&pdu_header, 1);
189 196
190 iov[iovnum].iov_base = &pdu_header; 197 iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index f7978933b402..ee2bbce24584 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", 91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
92 udev->devnum, udev->devpath, usb_speed_string(udev->speed)); 92 udev->devnum, udev->devpath, usb_speed_string(udev->speed));
93 93
94 pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); 94 pr_debug("tt hub ttport %d\n", udev->ttport);
95 95
96 dev_dbg(dev, " "); 96 dev_dbg(dev, " ");
97 for (i = 0; i < 16; i++) 97 for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
124 } 124 }
125 pr_debug("\n"); 125 pr_debug("\n");
126 126
127 dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); 127 dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
128 128 udev->bus->bus_name);
129 dev_dbg(dev,
130 "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
131 &udev->descriptor, udev->config,
132 udev->actconfig, udev->rawdescriptors);
133 129
134 dev_dbg(dev, "have_langid %d, string_langid %d\n", 130 dev_dbg(dev, "have_langid %d, string_langid %d\n",
135 udev->have_langid, udev->string_langid); 131 udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
237 233
238 dev = &urb->dev->dev; 234 dev = &urb->dev->dev;
239 235
240 dev_dbg(dev, " urb :%p\n", urb);
241 dev_dbg(dev, " dev :%p\n", urb->dev);
242
243 usbip_dump_usb_device(urb->dev); 236 usbip_dump_usb_device(urb->dev);
244 237
245 dev_dbg(dev, " pipe :%08x ", urb->pipe); 238 dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
248 241
249 dev_dbg(dev, " status :%d\n", urb->status); 242 dev_dbg(dev, " status :%d\n", urb->status);
250 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); 243 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
251 dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
252 dev_dbg(dev, " transfer_buffer_length:%d\n", 244 dev_dbg(dev, " transfer_buffer_length:%d\n",
253 urb->transfer_buffer_length); 245 urb->transfer_buffer_length);
254 dev_dbg(dev, " actual_length :%d\n", urb->actual_length); 246 dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
255 dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
256 247
257 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) 248 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
258 usbip_dump_usb_ctrlrequest( 249 usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
262 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); 253 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
263 dev_dbg(dev, " interval :%d\n", urb->interval); 254 dev_dbg(dev, " interval :%d\n", urb->interval);
264 dev_dbg(dev, " error_count :%d\n", urb->error_count); 255 dev_dbg(dev, " error_count :%d\n", urb->error_count);
265 dev_dbg(dev, " context :%p\n", urb->context);
266 dev_dbg(dev, " complete :%p\n", urb->complete);
267} 256}
268EXPORT_SYMBOL_GPL(usbip_dump_urb); 257EXPORT_SYMBOL_GPL(usbip_dump_urb);
269 258
@@ -317,26 +306,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
317 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; 306 struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
318 int total = 0; 307 int total = 0;
319 308
309 if (!sock || !buf || !size)
310 return -EINVAL;
311
320 iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); 312 iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
321 313
322 usbip_dbg_xmit("enter\n"); 314 usbip_dbg_xmit("enter\n");
323 315
324 if (!sock || !buf || !size) {
325 pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
326 size);
327 return -EINVAL;
328 }
329
330 do { 316 do {
331 int sz = msg_data_left(&msg); 317 msg_data_left(&msg);
332 sock->sk->sk_allocation = GFP_NOIO; 318 sock->sk->sk_allocation = GFP_NOIO;
333 319
334 result = sock_recvmsg(sock, &msg, MSG_WAITALL); 320 result = sock_recvmsg(sock, &msg, MSG_WAITALL);
335 if (result <= 0) { 321 if (result <= 0)
336 pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
337 sock, buf + total, sz, result, total);
338 goto err; 322 goto err;
339 }
340 323
341 total += result; 324 total += result;
342 } while (msg_data_left(&msg)); 325 } while (msg_data_left(&msg));
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c8c505..473fb8a87289 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
256 /* lock for status */ 256 /* lock for status */
257 spinlock_t lock; 257 spinlock_t lock;
258 258
259 int sockfd;
259 struct socket *tcp_socket; 260 struct socket *tcp_socket;
260 261
261 struct task_struct *tcp_rx; 262 struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 713e94170963..c3e1008aa491 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
656 struct vhci_device *vdev; 656 struct vhci_device *vdev;
657 unsigned long flags; 657 unsigned long flags;
658 658
659 usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
660 hcd, urb, mem_flags);
661
662 if (portnum > VHCI_HC_PORTS) { 659 if (portnum > VHCI_HC_PORTS) {
663 pr_err("invalid port number %d\n", portnum); 660 pr_err("invalid port number %d\n", portnum);
664 return -ENODEV; 661 return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
822 struct vhci_device *vdev; 819 struct vhci_device *vdev;
823 unsigned long flags; 820 unsigned long flags;
824 821
825 pr_info("dequeue a urb %p\n", urb);
826
827 spin_lock_irqsave(&vhci->lock, flags); 822 spin_lock_irqsave(&vhci->lock, flags);
828 823
829 priv = urb->hcpriv; 824 priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
851 /* tcp connection is closed */ 846 /* tcp connection is closed */
852 spin_lock(&vdev->priv_lock); 847 spin_lock(&vdev->priv_lock);
853 848
854 pr_info("device %p seems to be disconnected\n", vdev);
855 list_del(&priv->list); 849 list_del(&priv->list);
856 kfree(priv); 850 kfree(priv);
857 urb->hcpriv = NULL; 851 urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
863 * vhci_rx will receive RET_UNLINK and give back the URB. 857 * vhci_rx will receive RET_UNLINK and give back the URB.
864 * Otherwise, we give back it here. 858 * Otherwise, we give back it here.
865 */ 859 */
866 pr_info("gives back urb %p\n", urb);
867
868 usb_hcd_unlink_urb_from_ep(hcd, urb); 860 usb_hcd_unlink_urb_from_ep(hcd, urb);
869 861
870 spin_unlock_irqrestore(&vhci->lock, flags); 862 spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
892 884
893 unlink->unlink_seqnum = priv->seqnum; 885 unlink->unlink_seqnum = priv->seqnum;
894 886
895 pr_info("device %p seems to be still connected\n", vdev);
896
897 /* send cmd_unlink and try to cancel the pending URB in the 887 /* send cmd_unlink and try to cancel the pending URB in the
898 * peer */ 888 * peer */
899 list_add_tail(&unlink->list, &vdev->unlink_tx); 889 list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
975 965
976 /* need this? see stub_dev.c */ 966 /* need this? see stub_dev.c */
977 if (ud->tcp_socket) { 967 if (ud->tcp_socket) {
978 pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); 968 pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
979 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); 969 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
980 } 970 }
981 971
@@ -1098,7 +1088,6 @@ static int hcd_name_to_id(const char *name)
1098static int vhci_setup(struct usb_hcd *hcd) 1088static int vhci_setup(struct usb_hcd *hcd)
1099{ 1089{
1100 struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller)); 1090 struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
1101 hcd->self.sg_tablesize = ~0;
1102 if (usb_hcd_is_primary_hcd(hcd)) { 1091 if (usb_hcd_is_primary_hcd(hcd)) {
1103 vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd); 1092 vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
1104 vhci->vhci_hcd_hs->vhci = vhci; 1093 vhci->vhci_hcd_hs->vhci = vhci;
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 90577e8b2282..112ebb90d8c9 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
23 urb = priv->urb; 23 urb = priv->urb;
24 status = urb->status; 24 status = urb->status;
25 25
26 usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", 26 usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
27 urb, priv, seqnum);
28 27
29 switch (status) { 28 switch (status) {
30 case -ENOENT: 29 case -ENOENT:
31 /* fall through */ 30 /* fall through */
32 case -ECONNRESET: 31 case -ECONNRESET:
33 dev_info(&urb->dev->dev, 32 dev_dbg(&urb->dev->dev,
34 "urb %p was unlinked %ssynchronuously.\n", urb, 33 "urb seq# %u was unlinked %ssynchronuously\n",
35 status == -ENOENT ? "" : "a"); 34 seqnum, status == -ENOENT ? "" : "a");
36 break; 35 break;
37 case -EINPROGRESS: 36 case -EINPROGRESS:
38 /* no info output */ 37 /* no info output */
39 break; 38 break;
40 default: 39 default:
41 dev_info(&urb->dev->dev, 40 dev_dbg(&urb->dev->dev,
42 "urb %p may be in a error, status %d\n", urb, 41 "urb seq# %u may be in a error, status %d\n",
43 status); 42 seqnum, status);
44 } 43 }
45 44
46 list_del(&priv->list); 45 list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
67 spin_unlock_irqrestore(&vdev->priv_lock, flags); 66 spin_unlock_irqrestore(&vdev->priv_lock, flags);
68 67
69 if (!urb) { 68 if (!urb) {
70 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); 69 pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
71 pr_info("max seqnum %d\n", 70 pdu->base.seqnum,
72 atomic_read(&vhci_hcd->seqnum)); 71 atomic_read(&vhci_hcd->seqnum));
73 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); 72 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
74 return; 73 return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
91 if (usbip_dbg_flag_vhci_rx) 90 if (usbip_dbg_flag_vhci_rx)
92 usbip_dump_urb(urb); 91 usbip_dump_urb(urb);
93 92
94 usbip_dbg_vhci_rx("now giveback urb %p\n", urb); 93 usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
95 94
96 spin_lock_irqsave(&vhci->lock, flags); 95 spin_lock_irqsave(&vhci->lock, flags);
97 usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); 96 usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
158 pr_info("the urb (seqnum %d) was already given back\n", 157 pr_info("the urb (seqnum %d) was already given back\n",
159 pdu->base.seqnum); 158 pdu->base.seqnum);
160 } else { 159 } else {
161 usbip_dbg_vhci_rx("now giveback urb %p\n", urb); 160 usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
162 161
163 /* If unlink is successful, status is -ECONNRESET */ 162 /* If unlink is successful, status is -ECONNRESET */
164 urb->status = pdu->u.ret_unlink.status; 163 urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f7472cac4..091f76b7196d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
17 17
18/* 18/*
19 * output example: 19 * output example:
20 * hub port sta spd dev socket local_busid 20 * hub port sta spd dev sockfd local_busid
21 * hs 0000 004 000 00000000 c5a7bb80 1-2.3 21 * hs 0000 004 000 00000000 3 1-2.3
22 * ................................................ 22 * ................................................
23 * ss 0008 004 000 00000000 d8cee980 2-3.4 23 * ss 0008 004 000 00000000 4 2-3.4
24 * ................................................ 24 * ................................................
25 * 25 *
26 * IP address can be retrieved from a socket pointer address by looking 26 * Output includes socket fd instead of socket pointer address to avoid
27 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a 27 * leaking kernel memory address in:
28 * port number and its peer IP address. 28 * /sys/devices/platform/vhci_hcd.0/status and in debug output.
29 * The socket pointer address is not used at the moment and it was made
30 * visible as a convenient way to find IP address from socket pointer
31 * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
32 * hole, the change is made to use sockfd instead.
33 *
29 */ 34 */
30static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) 35static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
31{ 36{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
39 if (vdev->ud.status == VDEV_ST_USED) { 44 if (vdev->ud.status == VDEV_ST_USED) {
40 *out += sprintf(*out, "%03u %08x ", 45 *out += sprintf(*out, "%03u %08x ",
41 vdev->speed, vdev->devid); 46 vdev->speed, vdev->devid);
42 *out += sprintf(*out, "%16p %s", 47 *out += sprintf(*out, "%u %s",
43 vdev->ud.tcp_socket, 48 vdev->ud.sockfd,
44 dev_name(&vdev->udev->dev)); 49 dev_name(&vdev->udev->dev));
45 50
46 } else { 51 } else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
160 char *s = out; 165 char *s = out;
161 166
162 /* 167 /*
163 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. 168 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
169 * thus the * 2.
164 */ 170 */
165 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); 171 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
166 return out - s; 172 return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
366 372
367 vdev->devid = devid; 373 vdev->devid = devid;
368 vdev->speed = speed; 374 vdev->speed = speed;
375 vdev->ud.sockfd = sockfd;
369 vdev->ud.tcp_socket = socket; 376 vdev->ud.tcp_socket = socket;
370 vdev->ud.status = VDEV_ST_NOTASSIGNED; 377 vdev->ud.status = VDEV_ST_NOTASSIGNED;
371 378
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index d625a2ff4b71..9aed15a358b7 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
69 memset(&msg, 0, sizeof(msg)); 69 memset(&msg, 0, sizeof(msg));
70 memset(&iov, 0, sizeof(iov)); 70 memset(&iov, 0, sizeof(iov));
71 71
72 usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); 72 usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
73 priv->seqnum);
73 74
74 /* 1. setup usbip_header */ 75 /* 1. setup usbip_header */
75 setup_cmd_submit_pdu(&pdu_header, urb); 76 setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e30989148..1e8a23d92cb4 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
120 urb_p->new = 1; 120 urb_p->new = 1;
121 urb_p->seqnum = pdu->base.seqnum; 121 urb_p->seqnum = pdu->base.seqnum;
122 122
123 if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
124 /* validate packet size and number of packets */
125 unsigned int maxp, packets, bytes;
126
127 maxp = usb_endpoint_maxp(urb_p->ep->desc);
128 maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
129 bytes = pdu->u.cmd_submit.transfer_buffer_length;
130 packets = DIV_ROUND_UP(bytes, maxp);
131
132 if (pdu->u.cmd_submit.number_of_packets < 0 ||
133 pdu->u.cmd_submit.number_of_packets > packets) {
134 dev_err(&udc->gadget.dev,
135 "CMD_SUBMIT: isoc invalid num packets %d\n",
136 pdu->u.cmd_submit.number_of_packets);
137 ret = -EMSGSIZE;
138 goto free_urbp;
139 }
140 }
141
123 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); 142 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
124 if (ret) { 143 if (ret) {
125 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); 144 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0919ec..3ccb17c3e840 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
85 memset(&pdu_header, 0, sizeof(pdu_header)); 85 memset(&pdu_header, 0, sizeof(pdu_header));
86 memset(&msg, 0, sizeof(msg)); 86 memset(&msg, 0, sizeof(msg));
87 87
88 if (urb->actual_length > 0 && !urb->transfer_buffer) {
89 dev_err(&udc->gadget.dev,
90 "urb: actual_length %d transfer_buffer null\n",
91 urb->actual_length);
92 return -1;
93 }
94
88 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) 95 if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
89 iovnum = 2 + urb->number_of_packets; 96 iovnum = 2 + urb->number_of_packets;
90 else 97 else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
100 107
101 /* 1. setup usbip_header */ 108 /* 1. setup usbip_header */
102 setup_ret_submit_pdu(&pdu_header, urb_p); 109 setup_ret_submit_pdu(&pdu_header, urb_p);
103 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 110 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
104 pdu_header.base.seqnum, urb); 111 pdu_header.base.seqnum);
105 usbip_header_correct_endian(&pdu_header, 1); 112 usbip_header_correct_endian(&pdu_header, 1);
106 113
107 iov[iovnum].iov_base = &pdu_header; 114 iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8d626d7c2e7e..c7bdeb655646 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -778,16 +778,6 @@ static void handle_rx(struct vhost_net *net)
778 /* On error, stop handling until the next kick. */ 778 /* On error, stop handling until the next kick. */
779 if (unlikely(headcount < 0)) 779 if (unlikely(headcount < 0))
780 goto out; 780 goto out;
781 if (nvq->rx_array)
782 msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
783 /* On overrun, truncate and discard */
784 if (unlikely(headcount > UIO_MAXIOV)) {
785 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
786 err = sock->ops->recvmsg(sock, &msg,
787 1, MSG_DONTWAIT | MSG_TRUNC);
788 pr_debug("Discarded rx packet: len %zd\n", sock_len);
789 continue;
790 }
791 /* OK, now we need to know about added descriptors. */ 781 /* OK, now we need to know about added descriptors. */
792 if (!headcount) { 782 if (!headcount) {
793 if (unlikely(vhost_enable_notify(&net->dev, vq))) { 783 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -800,6 +790,16 @@ static void handle_rx(struct vhost_net *net)
800 * they refilled. */ 790 * they refilled. */
801 goto out; 791 goto out;
802 } 792 }
793 if (nvq->rx_array)
794 msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
795 /* On overrun, truncate and discard */
796 if (unlikely(headcount > UIO_MAXIOV)) {
797 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
798 err = sock->ops->recvmsg(sock, &msg,
799 1, MSG_DONTWAIT | MSG_TRUNC);
800 pr_debug("Discarded rx packet: len %zd\n", sock_len);
801 continue;
802 }
803 /* We don't need to be notified again. */ 803 /* We don't need to be notified again. */
804 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); 804 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
805 fixup = msg.msg_iter; 805 fixup = msg.msg_iter;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 33ac2b186b85..5727b186b3ca 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
904{ 904{
905 int i = 0; 905 int i = 0;
906 for (i = 0; i < d->nvqs; ++i) 906 for (i = 0; i < d->nvqs; ++i)
907 mutex_lock(&d->vqs[i]->mutex); 907 mutex_lock_nested(&d->vqs[i]->mutex, i);
908} 908}
909 909
910static void vhost_dev_unlock_vqs(struct vhost_dev *d) 910static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1015 vhost_iotlb_notify_vq(dev, msg); 1015 vhost_iotlb_notify_vq(dev, msg);
1016 break; 1016 break;
1017 case VHOST_IOTLB_INVALIDATE: 1017 case VHOST_IOTLB_INVALIDATE:
1018 if (!dev->iotlb) {
1019 ret = -EFAULT;
1020 break;
1021 }
1018 vhost_vq_meta_reset(dev); 1022 vhost_vq_meta_reset(dev);
1019 vhost_del_umem_range(dev->iotlb, msg->iova, 1023 vhost_del_umem_range(dev->iotlb, msg->iova,
1020 msg->iova + msg->size - 1); 1024 msg->iova + msg->size - 1);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 48230a5e12f2..bf7ff3934d7f 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev)
333 /* device_register() causes the bus infrastructure to look for a 333 /* device_register() causes the bus infrastructure to look for a
334 * matching driver. */ 334 * matching driver. */
335 err = device_register(&dev->dev); 335 err = device_register(&dev->dev);
336 if (err)
337 ida_simple_remove(&virtio_index_ida, dev->index);
336out: 338out:
337 if (err) 339 if (err)
338 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 340 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 7960746f7597..a1fb52cb3f0a 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -174,13 +174,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
174 while ((page = balloon_page_pop(&pages))) { 174 while ((page = balloon_page_pop(&pages))) {
175 balloon_page_enqueue(&vb->vb_dev_info, page); 175 balloon_page_enqueue(&vb->vb_dev_info, page);
176 176
177 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
178
179 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 177 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
180 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 178 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
181 if (!virtio_has_feature(vb->vdev, 179 if (!virtio_has_feature(vb->vdev,
182 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 180 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
183 adjust_managed_page_count(page, -1); 181 adjust_managed_page_count(page, -1);
182 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
184 } 183 }
185 184
186 num_allocated_pages = vb->num_pfns; 185 num_allocated_pages = vb->num_pfns;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 74dc7170fd35..c92131edfaba 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -493,7 +493,16 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
493}; 493};
494 494
495 495
496static void virtio_mmio_release_dev_empty(struct device *_d) {} 496static void virtio_mmio_release_dev(struct device *_d)
497{
498 struct virtio_device *vdev =
499 container_of(_d, struct virtio_device, dev);
500 struct virtio_mmio_device *vm_dev =
501 container_of(vdev, struct virtio_mmio_device, vdev);
502 struct platform_device *pdev = vm_dev->pdev;
503
504 devm_kfree(&pdev->dev, vm_dev);
505}
497 506
498/* Platform device */ 507/* Platform device */
499 508
@@ -514,10 +523,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
514 523
515 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 524 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
516 if (!vm_dev) 525 if (!vm_dev)
517 return -ENOMEM; 526 return -ENOMEM;
518 527
519 vm_dev->vdev.dev.parent = &pdev->dev; 528 vm_dev->vdev.dev.parent = &pdev->dev;
520 vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty; 529 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
521 vm_dev->vdev.config = &virtio_mmio_config_ops; 530 vm_dev->vdev.config = &virtio_mmio_config_ops;
522 vm_dev->pdev = pdev; 531 vm_dev->pdev = pdev;
523 INIT_LIST_HEAD(&vm_dev->virtqueues); 532 INIT_LIST_HEAD(&vm_dev->virtqueues);
@@ -573,13 +582,16 @@ static int virtio_mmio_probe(struct platform_device *pdev)
573 582
574 platform_set_drvdata(pdev, vm_dev); 583 platform_set_drvdata(pdev, vm_dev);
575 584
576 return register_virtio_device(&vm_dev->vdev); 585 rc = register_virtio_device(&vm_dev->vdev);
586 if (rc)
587 put_device(&vm_dev->vdev.dev);
588
589 return rc;
577} 590}
578 591
579static int virtio_mmio_remove(struct platform_device *pdev) 592static int virtio_mmio_remove(struct platform_device *pdev)
580{ 593{
581 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 594 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
582
583 unregister_virtio_device(&vm_dev->vdev); 595 unregister_virtio_device(&vm_dev->vdev);
584 596
585 return 0; 597 return 0;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd54678ab7..e5d0c28372ea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
269 269
270config XEN_ACPI_PROCESSOR 270config XEN_ACPI_PROCESSOR
271 tristate "Xen ACPI processor" 271 tristate "Xen ACPI processor"
272 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 272 depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
273 default m 273 default m
274 help 274 help
275 This ACPI processor uploads Power Management information to the Xen 275 This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f77e499afddd..065f0b607373 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
257 kfree(resource); 257 kfree(resource);
258} 258}
259 259
260/*
261 * Host memory not allocated to dom0. We can use this range for hotplug-based
262 * ballooning.
263 *
264 * It's a type-less resource. Setting IORESOURCE_MEM will make resource
265 * management algorithms (arch_remove_reservations()) look into guest e820,
266 * which we don't want.
267 */
268static struct resource hostmem_resource = {
269 .name = "Host RAM",
270};
271
272void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
273{}
274
260static struct resource *additional_memory_resource(phys_addr_t size) 275static struct resource *additional_memory_resource(phys_addr_t size)
261{ 276{
262 struct resource *res; 277 struct resource *res, *res_hostmem;
263 int ret; 278 int ret = -ENOMEM;
264 279
265 res = kzalloc(sizeof(*res), GFP_KERNEL); 280 res = kzalloc(sizeof(*res), GFP_KERNEL);
266 if (!res) 281 if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
269 res->name = "System RAM"; 284 res->name = "System RAM";
270 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 285 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
271 286
272 ret = allocate_resource(&iomem_resource, res, 287 res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
273 size, 0, -1, 288 if (res_hostmem) {
274 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 289 /* Try to grab a range from hostmem */
275 if (ret < 0) { 290 res_hostmem->name = "Host memory";
276 pr_err("Cannot allocate new System RAM resource\n"); 291 ret = allocate_resource(&hostmem_resource, res_hostmem,
277 kfree(res); 292 size, 0, -1,
278 return NULL; 293 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
294 }
295
296 if (!ret) {
297 /*
298 * Insert this resource into iomem. Because hostmem_resource
299 * tracks portion of guest e820 marked as UNUSABLE noone else
300 * should try to use it.
301 */
302 res->start = res_hostmem->start;
303 res->end = res_hostmem->end;
304 ret = insert_resource(&iomem_resource, res);
305 if (ret < 0) {
306 pr_err("Can't insert iomem_resource [%llx - %llx]\n",
307 res->start, res->end);
308 release_memory_resource(res_hostmem);
309 res_hostmem = NULL;
310 res->start = res->end = 0;
311 }
312 }
313
314 if (ret) {
315 ret = allocate_resource(&iomem_resource, res,
316 size, 0, -1,
317 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
318 if (ret < 0) {
319 pr_err("Cannot allocate new System RAM resource\n");
320 kfree(res);
321 return NULL;
322 }
279 } 323 }
280 324
281#ifdef CONFIG_SPARSEMEM 325#ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
287 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 331 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
288 pfn, limit); 332 pfn, limit);
289 release_memory_resource(res); 333 release_memory_resource(res);
334 release_memory_resource(res_hostmem);
290 return NULL; 335 return NULL;
291 } 336 }
292 } 337 }
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
765 set_online_page_callback(&xen_online_page); 810 set_online_page_callback(&xen_online_page);
766 register_memory_notifier(&xen_memory_nb); 811 register_memory_notifier(&xen_memory_nb);
767 register_sysctl_table(xen_root); 812 register_sysctl_table(xen_root);
813
814 arch_xen_balloon_init(&hostmem_resource);
768#endif 815#endif
769 816
770#ifdef CONFIG_XEN_PV 817#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3b053b..bd56653b9bbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
380 } 380 }
381 range = 0; 381 range = 0;
382 while (range < pages) { 382 while (range < pages) {
383 if (map->unmap_ops[offset+range].handle == -1) { 383 if (map->unmap_ops[offset+range].handle == -1)
384 range--;
385 break; 384 break;
386 }
387 range++; 385 range++;
388 } 386 }
389 err = __unmap_grant_pages(map, offset, range); 387 err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
1073out_unlock_put: 1071out_unlock_put:
1074 mutex_unlock(&priv->lock); 1072 mutex_unlock(&priv->lock);
1075out_put_map: 1073out_put_map:
1076 if (use_ptemod) 1074 if (use_ptemod) {
1077 map->vma = NULL; 1075 map->vma = NULL;
1076 unmap_grant_pages(map, 0, map->count);
1077 }
1078 gntdev_put_map(priv, map); 1078 gntdev_put_map(priv, map);
1079 return err; 1079 return err;
1080} 1080}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 40caa92bff33..4c789e61554b 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
805 pvcalls_exit(); 805 pvcalls_exit();
806 return ret; 806 return ret;
807 } 807 }
808 map2 = kzalloc(sizeof(*map2), GFP_KERNEL); 808 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
809 if (map2 == NULL) { 809 if (map2 == NULL) {
810 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 810 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
811 (void *)&map->passive.flags); 811 (void *)&map->passive.flags);
@@ -1103,7 +1103,7 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
1103 kfree(map); 1103 kfree(map);
1104 } 1104 }
1105 } 1105 }
1106 if (bedata->ref >= 0) 1106 if (bedata->ref != -1)
1107 gnttab_end_foreign_access(bedata->ref, 0, 0); 1107 gnttab_end_foreign_access(bedata->ref, 0, 0);
1108 kfree(bedata->ring.sring); 1108 kfree(bedata->ring.sring);
1109 kfree(bedata); 1109 kfree(bedata);
@@ -1128,6 +1128,8 @@ static int pvcalls_front_probe(struct xenbus_device *dev,
1128 } 1128 }
1129 1129
1130 versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len); 1130 versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
1131 if (IS_ERR(versions))
1132 return PTR_ERR(versions);
1131 if (!len) 1133 if (!len)
1132 return -EINVAL; 1134 return -EINVAL;
1133 if (strcmp(versions, "1")) { 1135 if (strcmp(versions, "1")) {
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ff8d5bf4354f..23c7f395d718 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -895,20 +895,38 @@ error:
895 * However, if we didn't have a callback promise outstanding, or it was 895 * However, if we didn't have a callback promise outstanding, or it was
896 * outstanding on a different server, then it won't break it either... 896 * outstanding on a different server, then it won't break it either...
897 */ 897 */
898static int afs_dir_remove_link(struct dentry *dentry, struct key *key) 898static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
899 unsigned long d_version_before,
900 unsigned long d_version_after)
899{ 901{
902 bool dir_valid;
900 int ret = 0; 903 int ret = 0;
901 904
905 /* There were no intervening changes on the server if the version
906 * number we got back was incremented by exactly 1.
907 */
908 dir_valid = (d_version_after == d_version_before + 1);
909
902 if (d_really_is_positive(dentry)) { 910 if (d_really_is_positive(dentry)) {
903 struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); 911 struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
904 912
905 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 913 if (dir_valid) {
906 kdebug("AFS_VNODE_DELETED"); 914 drop_nlink(&vnode->vfs_inode);
907 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); 915 if (vnode->vfs_inode.i_nlink == 0) {
908 916 set_bit(AFS_VNODE_DELETED, &vnode->flags);
909 ret = afs_validate(vnode, key); 917 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
910 if (ret == -ESTALE) 918 }
911 ret = 0; 919 ret = 0;
920 } else {
921 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
922
923 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
924 kdebug("AFS_VNODE_DELETED");
925
926 ret = afs_validate(vnode, key);
927 if (ret == -ESTALE)
928 ret = 0;
929 }
912 _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret); 930 _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
913 } 931 }
914 932
@@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
923 struct afs_fs_cursor fc; 941 struct afs_fs_cursor fc;
924 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 942 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
925 struct key *key; 943 struct key *key;
944 unsigned long d_version = (unsigned long)dentry->d_fsdata;
926 int ret; 945 int ret;
927 946
928 _enter("{%x:%u},{%pd}", 947 _enter("{%x:%u},{%pd}",
@@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
955 afs_vnode_commit_status(&fc, dvnode, fc.cb_break); 974 afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
956 ret = afs_end_vnode_operation(&fc); 975 ret = afs_end_vnode_operation(&fc);
957 if (ret == 0) 976 if (ret == 0)
958 ret = afs_dir_remove_link(dentry, key); 977 ret = afs_dir_remove_link(
978 dentry, key, d_version,
979 (unsigned long)dvnode->status.data_version);
959 } 980 }
960 981
961error_key: 982error_key:
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 3415eb7484f6..1e81864ef0b2 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -377,6 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
377 } 377 }
378 378
379 read_sequnlock_excl(&vnode->cb_lock); 379 read_sequnlock_excl(&vnode->cb_lock);
380
381 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
382 clear_nlink(&vnode->vfs_inode);
383
380 if (valid) 384 if (valid)
381 goto valid; 385 goto valid;
382 386
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ea1460b9b71a..e1126659f043 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
885{ 885{
886 struct afs_net *net = call->net; 886 struct afs_net *net = call->net;
887 enum afs_call_state state; 887 enum afs_call_state state;
888 u32 remote_abort; 888 u32 remote_abort = 0;
889 int ret; 889 int ret;
890 890
891 _enter("{%s,%zu},,%zu,%d", 891 _enter("{%s,%zu},,%zu,%d",
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb5f8a3df577..9370e2feb999 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
198 ret = afs_fill_page(vnode, key, pos + copied, 198 ret = afs_fill_page(vnode, key, pos + copied,
199 len - copied, page); 199 len - copied, page);
200 if (ret < 0) 200 if (ret < 0)
201 return ret; 201 goto out;
202 } 202 }
203 SetPageUptodate(page); 203 SetPageUptodate(page);
204 } 204 }
@@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping,
206 set_page_dirty(page); 206 set_page_dirty(page);
207 if (PageDirty(page)) 207 if (PageDirty(page))
208 _debug("dirtied"); 208 _debug("dirtied");
209 ret = copied;
210
211out:
209 unlock_page(page); 212 unlock_page(page);
210 put_page(page); 213 put_page(page);
211 214 return ret;
212 return copied;
213} 215}
214 216
215/* 217/*
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 8fc41705c7cd..961a12dc6dc8 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
170 170
171 mutex_unlock(&sbi->wq_mutex); 171 mutex_unlock(&sbi->wq_mutex);
172 172
173 if (autofs4_write(sbi, pipe, &pkt, pktsz))
174 switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { 173 switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
175 case 0: 174 case 0:
176 break; 175 break;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 531e0a8645b0..1e74cf826532 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1); 1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */ 1035 if (ret)
1036 return ret;
1036 1037
1037 if (root->root_key.objectid == 1038 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) { 1039 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0); 1040 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */ 1041 if (ret)
1042 return ret;
1041 ret = btrfs_inc_ref(trans, root, cow, 1); 1043 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */ 1044 if (ret)
1045 return ret;
1043 } 1046 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 1047 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else { 1048 } else {
@@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1049 ret = btrfs_inc_ref(trans, root, cow, 1); 1052 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else 1053 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0); 1054 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */ 1055 if (ret)
1056 return ret;
1053 } 1057 }
1054 if (new_flags != 0) { 1058 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf); 1059 int level = btrfs_header_level(buf);
@@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1068 ret = btrfs_inc_ref(trans, root, cow, 1); 1072 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else 1073 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0); 1074 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */ 1075 if (ret)
1076 return ret;
1072 ret = btrfs_dec_ref(trans, root, buf, 1); 1077 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */ 1078 if (ret)
1079 return ret;
1074 } 1080 }
1075 clean_tree_block(fs_info, buf); 1081 clean_tree_block(fs_info, buf);
1076 *last_ref = 1; 1082 *last_ref = 1;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5d73f79ded8b..a6226cd6063c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
87 87
88 spin_lock(&root->inode_lock); 88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90
90 if (node) { 91 if (node) {
91 if (btrfs_inode->delayed_node) { 92 if (btrfs_inode->delayed_node) {
92 refcount_inc(&node->refs); /* can be accessed */ 93 refcount_inc(&node->refs); /* can be accessed */
@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
94 spin_unlock(&root->inode_lock); 95 spin_unlock(&root->inode_lock);
95 return node; 96 return node;
96 } 97 }
97 btrfs_inode->delayed_node = node; 98
98 /* can be accessed and cached in the inode */ 99 /*
99 refcount_add(2, &node->refs); 100 * It's possible that we're racing into the middle of removing
101 * this node from the radix tree. In this case, the refcount
102 * was zero and it should never go back to one. Just return
103 * NULL like it was never in the radix at all; our release
104 * function is in the process of removing it.
105 *
106 * Some implementations of refcount_inc refuse to bump the
107 * refcount once it has hit zero. If we don't do this dance
108 * here, refcount_inc() may decide to just WARN_ONCE() instead
109 * of actually bumping the refcount.
110 *
111 * If this node is properly in the radix, we want to bump the
112 * refcount twice, once for the inode and once for this get
113 * operation.
114 */
115 if (refcount_inc_not_zero(&node->refs)) {
116 refcount_inc(&node->refs);
117 btrfs_inode->delayed_node = node;
118 } else {
119 node = NULL;
120 }
121
100 spin_unlock(&root->inode_lock); 122 spin_unlock(&root->inode_lock);
101 return node; 123 return node;
102 } 124 }
@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
254 mutex_unlock(&delayed_node->mutex); 276 mutex_unlock(&delayed_node->mutex);
255 277
256 if (refcount_dec_and_test(&delayed_node->refs)) { 278 if (refcount_dec_and_test(&delayed_node->refs)) {
257 bool free = false;
258 struct btrfs_root *root = delayed_node->root; 279 struct btrfs_root *root = delayed_node->root;
280
259 spin_lock(&root->inode_lock); 281 spin_lock(&root->inode_lock);
260 if (refcount_read(&delayed_node->refs) == 0) { 282 /*
261 radix_tree_delete(&root->delayed_nodes_tree, 283 * Once our refcount goes to zero, nobody is allowed to bump it
262 delayed_node->inode_id); 284 * back up. We can delete it now.
263 free = true; 285 */
264 } 286 ASSERT(refcount_read(&delayed_node->refs) == 0);
287 radix_tree_delete(&root->delayed_nodes_tree,
288 delayed_node->inode_id);
265 spin_unlock(&root->inode_lock); 289 spin_unlock(&root->inode_lock);
266 if (free) 290 kmem_cache_free(delayed_node_cache, delayed_node);
267 kmem_cache_free(delayed_node_cache, delayed_node);
268 } 291 }
269} 292}
270 293
@@ -1610,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
1610int btrfs_should_delete_dir_index(struct list_head *del_list, 1633int btrfs_should_delete_dir_index(struct list_head *del_list,
1611 u64 index) 1634 u64 index)
1612{ 1635{
1613 struct btrfs_delayed_item *curr, *next; 1636 struct btrfs_delayed_item *curr;
1614 int ret; 1637 int ret = 0;
1615
1616 if (list_empty(del_list))
1617 return 0;
1618 1638
1619 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1639 list_for_each_entry(curr, del_list, readdir_list) {
1620 if (curr->key.offset > index) 1640 if (curr->key.offset > index)
1621 break; 1641 break;
1622 1642 if (curr->key.offset == index) {
1623 list_del(&curr->readdir_list); 1643 ret = 1;
1624 ret = (curr->key.offset == index); 1644 break;
1625 1645 }
1626 if (refcount_dec_and_test(&curr->refs))
1627 kfree(curr);
1628
1629 if (ret)
1630 return 1;
1631 else
1632 continue;
1633 } 1646 }
1634 return 0; 1647 return ret;
1635} 1648}
1636 1649
1637/* 1650/*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 10a2a579cc7f..a8ecccfc36de 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3231,6 +3231,7 @@ static int write_dev_supers(struct btrfs_device *device,
3231 int errors = 0; 3231 int errors = 0;
3232 u32 crc; 3232 u32 crc;
3233 u64 bytenr; 3233 u64 bytenr;
3234 int op_flags;
3234 3235
3235 if (max_mirrors == 0) 3236 if (max_mirrors == 0)
3236 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3237 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
@@ -3273,13 +3274,10 @@ static int write_dev_supers(struct btrfs_device *device,
3273 * we fua the first super. The others we allow 3274 * we fua the first super. The others we allow
3274 * to go down lazy. 3275 * to go down lazy.
3275 */ 3276 */
3276 if (i == 0) { 3277 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3277 ret = btrfsic_submit_bh(REQ_OP_WRITE, 3278 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3278 REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh); 3279 op_flags |= REQ_FUA;
3279 } else { 3280 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3280 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3281 REQ_SYNC | REQ_META | REQ_PRIO, bh);
3282 }
3283 if (ret) 3281 if (ret)
3284 errors++; 3282 errors++;
3285 } 3283 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4497f937e8fb..2f4328511ac8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9206,6 +9206,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
9206 ret = btrfs_del_root(trans, fs_info, &root->root_key); 9206 ret = btrfs_del_root(trans, fs_info, &root->root_key);
9207 if (ret) { 9207 if (ret) {
9208 btrfs_abort_transaction(trans, ret); 9208 btrfs_abort_transaction(trans, ret);
9209 err = ret;
9209 goto out_end_trans; 9210 goto out_end_trans;
9210 } 9211 }
9211 9212
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 993061f83067..e1a7f3cb5be9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3005,6 +3005,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3005 compress_type = ordered_extent->compress_type; 3005 compress_type = ordered_extent->compress_type;
3006 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3006 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3007 BUG_ON(compress_type); 3007 BUG_ON(compress_type);
3008 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3009 ordered_extent->len);
3008 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), 3010 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3009 ordered_extent->file_offset, 3011 ordered_extent->file_offset,
3010 ordered_extent->file_offset + 3012 ordered_extent->file_offset +
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d748ad1c3620..2ef8acaac688 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2206,7 +2206,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2206 if (!path) 2206 if (!path)
2207 return -ENOMEM; 2207 return -ENOMEM;
2208 2208
2209 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; 2209 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2210 2210
2211 key.objectid = tree_id; 2211 key.objectid = tree_id;
2212 key.type = BTRFS_ROOT_ITEM_KEY; 2212 key.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 49810b70afd3..a25684287501 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -237,7 +237,6 @@ static struct btrfs_device *__alloc_device(void)
237 kfree(dev); 237 kfree(dev);
238 return ERR_PTR(-ENOMEM); 238 return ERR_PTR(-ENOMEM);
239 } 239 }
240 bio_get(dev->flush_bio);
241 240
242 INIT_LIST_HEAD(&dev->dev_list); 241 INIT_LIST_HEAD(&dev->dev_list);
243 INIT_LIST_HEAD(&dev->dev_alloc_list); 242 INIT_LIST_HEAD(&dev->dev_alloc_list);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index ab69dcb70e8a..1b468250e947 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1440,6 +1440,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
1440 return request_close_session(mdsc, session); 1440 return request_close_session(mdsc, session);
1441} 1441}
1442 1442
1443static bool drop_negative_children(struct dentry *dentry)
1444{
1445 struct dentry *child;
1446 bool all_negative = true;
1447
1448 if (!d_is_dir(dentry))
1449 goto out;
1450
1451 spin_lock(&dentry->d_lock);
1452 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1453 if (d_really_is_positive(child)) {
1454 all_negative = false;
1455 break;
1456 }
1457 }
1458 spin_unlock(&dentry->d_lock);
1459
1460 if (all_negative)
1461 shrink_dcache_parent(dentry);
1462out:
1463 return all_negative;
1464}
1465
1443/* 1466/*
1444 * Trim old(er) caps. 1467 * Trim old(er) caps.
1445 * 1468 *
@@ -1490,16 +1513,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1490 if ((used | wanted) & ~oissued & mine) 1513 if ((used | wanted) & ~oissued & mine)
1491 goto out; /* we need these caps */ 1514 goto out; /* we need these caps */
1492 1515
1493 session->s_trim_caps--;
1494 if (oissued) { 1516 if (oissued) {
1495 /* we aren't the only cap.. just remove us */ 1517 /* we aren't the only cap.. just remove us */
1496 __ceph_remove_cap(cap, true); 1518 __ceph_remove_cap(cap, true);
1519 session->s_trim_caps--;
1497 } else { 1520 } else {
1521 struct dentry *dentry;
1498 /* try dropping referring dentries */ 1522 /* try dropping referring dentries */
1499 spin_unlock(&ci->i_ceph_lock); 1523 spin_unlock(&ci->i_ceph_lock);
1500 d_prune_aliases(inode); 1524 dentry = d_find_any_alias(inode);
1501 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1525 if (dentry && drop_negative_children(dentry)) {
1502 inode, cap, atomic_read(&inode->i_count)); 1526 int count;
1527 dput(dentry);
1528 d_prune_aliases(inode);
1529 count = atomic_read(&inode->i_count);
1530 if (count == 1)
1531 session->s_trim_caps--;
1532 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1533 inode, cap, count);
1534 } else {
1535 dput(dentry);
1536 }
1503 return 0; 1537 return 0;
1504 } 1538 }
1505 1539
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e06740436b92..ed88ab8a4774 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1406,7 +1406,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
1406 } while (rc == -EAGAIN); 1406 } while (rc == -EAGAIN);
1407 1407
1408 if (rc) { 1408 if (rc) {
1409 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); 1409 if (rc != -ENOENT)
1410 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
1410 goto out; 1411 goto out;
1411 } 1412 }
1412 1413
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 5331631386a2..01346b8b6edb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2678 cifs_small_buf_release(req); 2678 cifs_small_buf_release(req);
2679 2679
2680 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 2680 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
2681 shdr = get_sync_hdr(rsp);
2682 2681
2683 if (shdr->Status == STATUS_END_OF_FILE) { 2682 if (rc) {
2683 if (rc != -ENODATA) {
2684 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
2685 cifs_dbg(VFS, "Send error in read = %d\n", rc);
2686 }
2684 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 2687 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
2685 return 0; 2688 return rc == -ENODATA ? 0 : rc;
2686 } 2689 }
2687 2690
2688 if (rc) { 2691 *nbytes = le32_to_cpu(rsp->DataLength);
2689 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 2692 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
2690 cifs_dbg(VFS, "Send error in read = %d\n", rc); 2693 (*nbytes > io_parms->length)) {
2691 } else { 2694 cifs_dbg(FYI, "bad length %d for count %d\n",
2692 *nbytes = le32_to_cpu(rsp->DataLength); 2695 *nbytes, io_parms->length);
2693 if ((*nbytes > CIFS_MAX_MSGSIZE) || 2696 rc = -EIO;
2694 (*nbytes > io_parms->length)) { 2697 *nbytes = 0;
2695 cifs_dbg(FYI, "bad length %d for count %d\n",
2696 *nbytes, io_parms->length);
2697 rc = -EIO;
2698 *nbytes = 0;
2699 }
2700 } 2698 }
2701 2699
2700 shdr = get_sync_hdr(rsp);
2701
2702 if (*buf) { 2702 if (*buf) {
2703 memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); 2703 memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes);
2704 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 2704 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
index f937082f3244..58e2fe40b2a0 100644
--- a/fs/cramfs/Kconfig
+++ b/fs/cramfs/Kconfig
@@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV
34config CRAMFS_MTD 34config CRAMFS_MTD
35 bool "Support CramFs image directly mapped in physical memory" 35 bool "Support CramFs image directly mapped in physical memory"
36 depends on CRAMFS && MTD 36 depends on CRAMFS && MTD
37 depends on CRAMFS=m || MTD=y
37 default y if !CRAMFS_BLOCKDEV 38 default y if !CRAMFS_BLOCKDEV
38 help 39 help
39 This option allows the CramFs driver to load data directly from 40 This option allows the CramFs driver to load data directly from
diff --git a/fs/dax.c b/fs/dax.c
index 78b72c48374e..95981591977a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -627,8 +627,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
627 627
628 if (pfn != pmd_pfn(*pmdp)) 628 if (pfn != pmd_pfn(*pmdp))
629 goto unlock_pmd; 629 goto unlock_pmd;
630 if (!pmd_dirty(*pmdp) 630 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
631 && !pmd_access_permitted(*pmdp, WRITE))
632 goto unlock_pmd; 631 goto unlock_pmd;
633 632
634 flush_cache_page(vma, address, pfn); 633 flush_cache_page(vma, address, pfn);
diff --git a/fs/exec.c b/fs/exec.c
index 6be2aa0ab26f..7eb8d21bcab9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1216,15 +1216,14 @@ killed:
1216 return -EAGAIN; 1216 return -EAGAIN;
1217} 1217}
1218 1218
1219char *get_task_comm(char *buf, struct task_struct *tsk) 1219char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1220{ 1220{
1221 /* buf must be at least sizeof(tsk->comm) in size */
1222 task_lock(tsk); 1221 task_lock(tsk);
1223 strncpy(buf, tsk->comm, sizeof(tsk->comm)); 1222 strncpy(buf, tsk->comm, buf_size);
1224 task_unlock(tsk); 1223 task_unlock(tsk);
1225 return buf; 1224 return buf;
1226} 1225}
1227EXPORT_SYMBOL_GPL(get_task_comm); 1226EXPORT_SYMBOL_GPL(__get_task_comm);
1228 1227
1229/* 1228/*
1230 * These functions flushes out all traces of the currently running executable 1229 * These functions flushes out all traces of the currently running executable
@@ -1340,24 +1339,24 @@ void setup_new_exec(struct linux_binprm * bprm)
1340 * avoid bad behavior from the prior rlimits. This has to 1339 * avoid bad behavior from the prior rlimits. This has to
1341 * happen before arch_pick_mmap_layout(), which examines 1340 * happen before arch_pick_mmap_layout(), which examines
1342 * RLIMIT_STACK, but after the point of no return to avoid 1341 * RLIMIT_STACK, but after the point of no return to avoid
1343 * races from other threads changing the limits. This also 1342 * needing to clean up the change on failure.
1344 * must be protected from races with prlimit() calls.
1345 */ 1343 */
1346 task_lock(current->group_leader);
1347 if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM) 1344 if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
1348 current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM; 1345 current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
1349 if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
1350 current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
1351 task_unlock(current->group_leader);
1352 } 1346 }
1353 1347
1354 arch_pick_mmap_layout(current->mm); 1348 arch_pick_mmap_layout(current->mm);
1355 1349
1356 current->sas_ss_sp = current->sas_ss_size = 0; 1350 current->sas_ss_sp = current->sas_ss_size = 0;
1357 1351
1358 /* Figure out dumpability. */ 1352 /*
1353 * Figure out dumpability. Note that this checking only of current
1354 * is wrong, but userspace depends on it. This should be testing
1355 * bprm->secureexec instead.
1356 */
1359 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || 1357 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1360 bprm->secureexec) 1358 !(uid_eq(current_euid(), current_uid()) &&
1359 gid_eq(current_egid(), current_gid())))
1361 set_dumpable(current->mm, suid_dumpable); 1360 set_dumpable(current->mm, suid_dumpable);
1362 else 1361 else
1363 set_dumpable(current->mm, SUID_DUMP_USER); 1362 set_dumpable(current->mm, SUID_DUMP_USER);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 07bca11749d4..c941251ac0c0 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4722,6 +4722,7 @@ retry:
4722 EXT4_INODE_EOFBLOCKS); 4722 EXT4_INODE_EOFBLOCKS);
4723 } 4723 }
4724 ext4_mark_inode_dirty(handle, inode); 4724 ext4_mark_inode_dirty(handle, inode);
4725 ext4_update_inode_fsync_trans(handle, inode, 1);
4725 ret2 = ext4_journal_stop(handle); 4726 ret2 = ext4_journal_stop(handle);
4726 if (ret2) 4727 if (ret2)
4727 break; 4728 break;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index b4267d72f249..b32cf263750d 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
816#ifdef CONFIG_EXT4_FS_POSIX_ACL 816#ifdef CONFIG_EXT4_FS_POSIX_ACL
817 struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT); 817 struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
818 818
819 if (IS_ERR(p))
820 return ERR_CAST(p);
819 if (p) { 821 if (p) {
820 int acl_size = p->a_count * sizeof(ext4_acl_entry); 822 int acl_size = p->a_count * sizeof(ext4_acl_entry);
821 823
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7df2c5644e59..534a9130f625 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
149 */ 149 */
150int ext4_inode_is_fast_symlink(struct inode *inode) 150int ext4_inode_is_fast_symlink(struct inode *inode)
151{ 151{
152 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
153 int ea_blocks = EXT4_I(inode)->i_file_acl ?
154 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
155
156 if (ext4_has_inline_data(inode))
157 return 0;
158
159 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160 }
152 return S_ISLNK(inode->i_mode) && inode->i_size && 161 return S_ISLNK(inode->i_mode) && inode->i_size &&
153 (inode->i_size < EXT4_N_BLOCKS * 4); 162 (inode->i_size < EXT4_N_BLOCKS * 4);
154} 163}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 798b3ac680db..e750d68fbcb5 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1399 "falling back\n")); 1399 "falling back\n"));
1400 } 1400 }
1401 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); 1401 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1402 if (!nblocks) {
1403 ret = NULL;
1404 goto cleanup_and_exit;
1405 }
1402 start = EXT4_I(dir)->i_dir_start_lookup; 1406 start = EXT4_I(dir)->i_dir_start_lookup;
1403 if (start >= nblocks) 1407 if (start >= nblocks)
1404 start = 0; 1408 start = 0;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 8d6b7e35faf9..c83ece7facc5 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -150,7 +150,6 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
150 if (unlikely(ret < 0)) 150 if (unlikely(ret < 0))
151 goto out; 151 goto out;
152 ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; 152 ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
153 file->f_version = inode->i_version;
154 } 153 }
155 next_pos = ctx->pos; 154 next_pos = ctx->pos;
156 if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) { 155 if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 3b834563b1f1..a4ad18afbdec 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -419,7 +419,6 @@ int hpfs_add_dirent(struct inode *i,
419 c = 1; 419 c = 1;
420 goto ret; 420 goto ret;
421 } 421 }
422 i->i_version++;
423 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); 422 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
424 ret: 423 ret:
425 return c; 424 return c;
@@ -726,7 +725,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
726 return 2; 725 return 2;
727 } 726 }
728 } 727 }
729 i->i_version++;
730 for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); 728 for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
731 hpfs_delete_de(i->i_sb, dnode, de); 729 hpfs_delete_de(i->i_sb, dnode, de);
732 hpfs_mark_4buffers_dirty(qbh); 730 hpfs_mark_4buffers_dirty(qbh);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index c45a3b9b9ac7..f2c3ebcd309c 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -235,7 +235,6 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
235 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); 235 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
236 if (!ei) 236 if (!ei)
237 return NULL; 237 return NULL;
238 ei->vfs_inode.i_version = 1;
239 return &ei->vfs_inode; 238 return &ei->vfs_inode;
240} 239}
241 240
diff --git a/fs/namespace.c b/fs/namespace.c
index e158ec6b527b..9d1374ab6e06 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2826 SB_DIRSYNC | 2826 SB_DIRSYNC |
2827 SB_SILENT | 2827 SB_SILENT |
2828 SB_POSIXACL | 2828 SB_POSIXACL |
2829 SB_LAZYTIME |
2829 SB_I_VERSION); 2830 SB_I_VERSION);
2830 2831
2831 if (flags & MS_REMOUNT) 2832 if (flags & MS_REMOUNT)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0ac2fb1c6b63..b9129e2befea 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -291,12 +291,23 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
291 const struct sockaddr *sap = data->addr; 291 const struct sockaddr *sap = data->addr;
292 struct nfs_net *nn = net_generic(data->net, nfs_net_id); 292 struct nfs_net *nn = net_generic(data->net, nfs_net_id);
293 293
294again:
294 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { 295 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
295 const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; 296 const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
296 /* Don't match clients that failed to initialise properly */ 297 /* Don't match clients that failed to initialise properly */
297 if (clp->cl_cons_state < 0) 298 if (clp->cl_cons_state < 0)
298 continue; 299 continue;
299 300
301 /* If a client is still initializing then we need to wait */
302 if (clp->cl_cons_state > NFS_CS_READY) {
303 refcount_inc(&clp->cl_count);
304 spin_unlock(&nn->nfs_client_lock);
305 nfs_wait_client_init_complete(clp);
306 nfs_put_client(clp);
307 spin_lock(&nn->nfs_client_lock);
308 goto again;
309 }
310
300 /* Different NFS versions cannot share the same nfs_client */ 311 /* Different NFS versions cannot share the same nfs_client */
301 if (clp->rpc_ops != data->nfs_mod->rpc_ops) 312 if (clp->rpc_ops != data->nfs_mod->rpc_ops)
302 continue; 313 continue;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 12bbab0becb4..65a7e5da508c 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -404,15 +404,19 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
404 if (error < 0) 404 if (error < 0)
405 goto error; 405 goto error;
406 406
407 if (!nfs4_has_session(clp))
408 nfs_mark_client_ready(clp, NFS_CS_READY);
409
410 error = nfs4_discover_server_trunking(clp, &old); 407 error = nfs4_discover_server_trunking(clp, &old);
411 if (error < 0) 408 if (error < 0)
412 goto error; 409 goto error;
413 410
414 if (clp != old) 411 if (clp != old) {
415 clp->cl_preserve_clid = true; 412 clp->cl_preserve_clid = true;
413 /*
414 * Mark the client as having failed initialization so other
415 * processes walking the nfs_client_list in nfs_match_client()
416 * won't try to use it.
417 */
418 nfs_mark_client_ready(clp, -EPERM);
419 }
416 nfs_put_client(clp); 420 nfs_put_client(clp);
417 clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags); 421 clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
418 return old; 422 return old;
@@ -539,6 +543,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
539 spin_lock(&nn->nfs_client_lock); 543 spin_lock(&nn->nfs_client_lock);
540 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { 544 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
541 545
546 if (pos == new)
547 goto found;
548
542 status = nfs4_match_client(pos, new, &prev, nn); 549 status = nfs4_match_client(pos, new, &prev, nn);
543 if (status < 0) 550 if (status < 0)
544 goto out_unlock; 551 goto out_unlock;
@@ -559,6 +566,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
559 * way that a SETCLIENTID_CONFIRM to pos can succeed is 566 * way that a SETCLIENTID_CONFIRM to pos can succeed is
560 * if new and pos point to the same server: 567 * if new and pos point to the same server:
561 */ 568 */
569found:
562 refcount_inc(&pos->cl_count); 570 refcount_inc(&pos->cl_count);
563 spin_unlock(&nn->nfs_client_lock); 571 spin_unlock(&nn->nfs_client_lock);
564 572
@@ -572,6 +580,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
572 case 0: 580 case 0:
573 nfs4_swap_callback_idents(pos, new); 581 nfs4_swap_callback_idents(pos, new);
574 pos->cl_confirm = new->cl_confirm; 582 pos->cl_confirm = new->cl_confirm;
583 nfs_mark_client_ready(pos, NFS_CS_READY);
575 584
576 prev = NULL; 585 prev = NULL;
577 *result = pos; 586 *result = pos;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5b5f464f6f2a..4a379d7918f2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1890,6 +1890,8 @@ int nfs_commit_inode(struct inode *inode, int how)
1890 if (res) 1890 if (res)
1891 error = nfs_generic_commit_list(inode, &head, how, &cinfo); 1891 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1892 nfs_commit_end(cinfo.mds); 1892 nfs_commit_end(cinfo.mds);
1893 if (res == 0)
1894 return res;
1893 if (error < 0) 1895 if (error < 0)
1894 goto out_error; 1896 goto out_error;
1895 if (!may_wait) 1897 if (!may_wait)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 697f8ae7792d..fdf2aad73470 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -61,6 +61,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
61 else 61 else
62 gi->gid[i] = rqgi->gid[i]; 62 gi->gid[i] = rqgi->gid[i];
63 } 63 }
64
65 /* Each thread allocates its own gi, no race */
66 groups_sort(gi);
64 } else { 67 } else {
65 gi = get_group_info(rqgi); 68 gi = get_group_info(rqgi);
66 } 69 }
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index ded456f17de6..c584ad8d023c 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
162 struct orangefs_kernel_op_s *op, *temp; 162 struct orangefs_kernel_op_s *op, *temp;
163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; 163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC; 164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
165 struct orangefs_kernel_op_s *cur_op = NULL; 165 struct orangefs_kernel_op_s *cur_op;
166 unsigned long ret; 166 unsigned long ret;
167 167
168 /* We do not support blocking IO. */ 168 /* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
186 return -EAGAIN; 186 return -EAGAIN;
187 187
188restart: 188restart:
189 cur_op = NULL;
189 /* Get next op (if any) from top of list. */ 190 /* Get next op (if any) from top of list. */
190 spin_lock(&orangefs_request_list_lock); 191 spin_lock(&orangefs_request_list_lock);
191 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { 192 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 1668fd645c45..0d228cd087e6 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
452static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 452static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
453{ 453{
454 struct file *file = iocb->ki_filp; 454 struct file *file = iocb->ki_filp;
455 loff_t pos = *(&iocb->ki_pos); 455 loff_t pos = iocb->ki_pos;
456 ssize_t rc = 0; 456 ssize_t rc = 0;
457 457
458 BUG_ON(iocb->private); 458 BUG_ON(iocb->private);
@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
492 } 492 }
493 } 493 }
494 494
495 if (file->f_pos > i_size_read(file->f_mapping->host))
496 orangefs_i_size_write(file->f_mapping->host, file->f_pos);
497
498 rc = generic_write_checks(iocb, iter); 495 rc = generic_write_checks(iocb, iter);
499 496
500 if (rc <= 0) { 497 if (rc <= 0) {
@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
508 * pos to the end of the file, so we will wait till now to set 505 * pos to the end of the file, so we will wait till now to set
509 * pos... 506 * pos...
510 */ 507 */
511 pos = *(&iocb->ki_pos); 508 pos = iocb->ki_pos;
512 509
513 rc = do_readv_writev(ORANGEFS_IO_WRITE, 510 rc = do_readv_writev(ORANGEFS_IO_WRITE,
514 file, 511 file,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 97adf7d100b5..2595453fe737 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -533,17 +533,6 @@ do { \
533 sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ 533 sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
534} while (0) 534} while (0)
535 535
536static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
537{
538#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
539 inode_lock(inode);
540#endif
541 i_size_write(inode, i_size);
542#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
543 inode_unlock(inode);
544#endif
545}
546
547static inline void orangefs_set_timeout(struct dentry *dentry) 536static inline void orangefs_set_timeout(struct dentry *dentry)
548{ 537{
549 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 538 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index 835c6e148afc..0577d6dba8c8 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
29 */ 29 */
30void purge_waiting_ops(void) 30void purge_waiting_ops(void)
31{ 31{
32 struct orangefs_kernel_op_s *op; 32 struct orangefs_kernel_op_s *op, *tmp;
33 33
34 spin_lock(&orangefs_request_list_lock); 34 spin_lock(&orangefs_request_list_lock);
35 list_for_each_entry(op, &orangefs_request_list, list) { 35 list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
36 gossip_debug(GOSSIP_WAIT_DEBUG, 36 gossip_debug(GOSSIP_WAIT_DEBUG,
37 "pvfs2-client-core: purging op tag %llu %s\n", 37 "pvfs2-client-core: purging op tag %llu %s\n",
38 llu(op->tag), 38 llu(op->tag),
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index cbfc196e5dc5..5ac415466861 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -24,6 +24,16 @@ config OVERLAY_FS_REDIRECT_DIR
24 an overlay which has redirects on a kernel that doesn't support this 24 an overlay which has redirects on a kernel that doesn't support this
25 feature will have unexpected results. 25 feature will have unexpected results.
26 26
27config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
28 bool "Overlayfs: follow redirects even if redirects are turned off"
29 default y
30 depends on OVERLAY_FS
31 help
32 Disable this to get a possibly more secure configuration, but that
33 might not be backward compatible with previous kernels.
34
35 For more information, see Documentation/filesystems/overlayfs.txt
36
27config OVERLAY_FS_INDEX 37config OVERLAY_FS_INDEX
28 bool "Overlayfs: turn on inodes index feature by default" 38 bool "Overlayfs: turn on inodes index feature by default"
29 depends on OVERLAY_FS 39 depends on OVERLAY_FS
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index e13921824c70..f9788bc116a8 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -887,7 +887,8 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
887 spin_unlock(&dentry->d_lock); 887 spin_unlock(&dentry->d_lock);
888 } else { 888 } else {
889 kfree(redirect); 889 kfree(redirect);
890 pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 890 pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
891 err);
891 /* Fall back to userspace copy-up */ 892 /* Fall back to userspace copy-up */
892 err = -EXDEV; 893 err = -EXDEV;
893 } 894 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 625ed8066570..beb945e1963c 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -435,7 +435,7 @@ int ovl_verify_index(struct dentry *index, struct ovl_path *lower,
435 435
436 /* Check if index is orphan and don't warn before cleaning it */ 436 /* Check if index is orphan and don't warn before cleaning it */
437 if (d_inode(index)->i_nlink == 1 && 437 if (d_inode(index)->i_nlink == 1 &&
438 ovl_get_nlink(index, origin.dentry, 0) == 0) 438 ovl_get_nlink(origin.dentry, index, 0) == 0)
439 err = -ENOENT; 439 err = -ENOENT;
440 440
441 dput(origin.dentry); 441 dput(origin.dentry);
@@ -681,6 +681,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
681 if (d.stop) 681 if (d.stop)
682 break; 682 break;
683 683
684 /*
685 * Following redirects can have security consequences: it's like
686 * a symlink into the lower layer without the permission checks.
687 * This is only a problem if the upper layer is untrusted (e.g
688 * comes from an USB drive). This can allow a non-readable file
689 * or directory to become readable.
690 *
691 * Only following redirects when redirects are enabled disables
692 * this attack vector when not necessary.
693 */
694 err = -EPERM;
695 if (d.redirect && !ofs->config.redirect_follow) {
696 pr_warn_ratelimited("overlay: refusing to follow redirect for (%pd2)\n", dentry);
697 goto out_put;
698 }
699
684 if (d.redirect && d.redirect[0] == '/' && poe != roe) { 700 if (d.redirect && d.redirect[0] == '/' && poe != roe) {
685 poe = roe; 701 poe = roe;
686 702
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 13eab09a6b6f..b489099ccd49 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -180,7 +180,7 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
180static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) 180static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
181{ 181{
182 struct dentry *ret = vfs_tmpfile(dentry, mode, 0); 182 struct dentry *ret = vfs_tmpfile(dentry, mode, 0);
183 int err = IS_ERR(ret) ? PTR_ERR(ret) : 0; 183 int err = PTR_ERR_OR_ZERO(ret);
184 184
185 pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); 185 pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
186 return ret; 186 return ret;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 752bab645879..9d0bc03bf6e4 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -14,6 +14,8 @@ struct ovl_config {
14 char *workdir; 14 char *workdir;
15 bool default_permissions; 15 bool default_permissions;
16 bool redirect_dir; 16 bool redirect_dir;
17 bool redirect_follow;
18 const char *redirect_mode;
17 bool index; 19 bool index;
18}; 20};
19 21
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 0daa4354fec4..8c98578d27a1 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -499,7 +499,7 @@ out:
499 return err; 499 return err;
500 500
501fail: 501fail:
502 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n", 502 pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
503 p->name, err); 503 p->name, err);
504 goto out; 504 goto out;
505} 505}
@@ -663,7 +663,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
663 return PTR_ERR(rdt.cache); 663 return PTR_ERR(rdt.cache);
664 } 664 }
665 665
666 return iterate_dir(od->realfile, &rdt.ctx); 666 err = iterate_dir(od->realfile, &rdt.ctx);
667 ctx->pos = rdt.ctx.pos;
668
669 return err;
667} 670}
668 671
669 672
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 288d20f9a55a..76440feb79f6 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -33,6 +33,13 @@ module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
33MODULE_PARM_DESC(ovl_redirect_dir_def, 33MODULE_PARM_DESC(ovl_redirect_dir_def,
34 "Default to on or off for the redirect_dir feature"); 34 "Default to on or off for the redirect_dir feature");
35 35
36static bool ovl_redirect_always_follow =
37 IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
38module_param_named(redirect_always_follow, ovl_redirect_always_follow,
39 bool, 0644);
40MODULE_PARM_DESC(ovl_redirect_always_follow,
41 "Follow redirects even if redirect_dir feature is turned off");
42
36static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX); 43static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
37module_param_named(index, ovl_index_def, bool, 0644); 44module_param_named(index, ovl_index_def, bool, 0644);
38MODULE_PARM_DESC(ovl_index_def, 45MODULE_PARM_DESC(ovl_index_def,
@@ -232,6 +239,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
232 kfree(ofs->config.lowerdir); 239 kfree(ofs->config.lowerdir);
233 kfree(ofs->config.upperdir); 240 kfree(ofs->config.upperdir);
234 kfree(ofs->config.workdir); 241 kfree(ofs->config.workdir);
242 kfree(ofs->config.redirect_mode);
235 if (ofs->creator_cred) 243 if (ofs->creator_cred)
236 put_cred(ofs->creator_cred); 244 put_cred(ofs->creator_cred);
237 kfree(ofs); 245 kfree(ofs);
@@ -244,6 +252,7 @@ static void ovl_put_super(struct super_block *sb)
244 ovl_free_fs(ofs); 252 ovl_free_fs(ofs);
245} 253}
246 254
255/* Sync real dirty inodes in upper filesystem (if it exists) */
247static int ovl_sync_fs(struct super_block *sb, int wait) 256static int ovl_sync_fs(struct super_block *sb, int wait)
248{ 257{
249 struct ovl_fs *ofs = sb->s_fs_info; 258 struct ovl_fs *ofs = sb->s_fs_info;
@@ -252,14 +261,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
252 261
253 if (!ofs->upper_mnt) 262 if (!ofs->upper_mnt)
254 return 0; 263 return 0;
255 upper_sb = ofs->upper_mnt->mnt_sb; 264
256 if (!upper_sb->s_op->sync_fs) 265 /*
266 * If this is a sync(2) call or an emergency sync, all the super blocks
267 * will be iterated, including upper_sb, so no need to do anything.
268 *
269 * If this is a syncfs(2) call, then we do need to call
270 * sync_filesystem() on upper_sb, but enough if we do it when being
271 * called with wait == 1.
272 */
273 if (!wait)
257 return 0; 274 return 0;
258 275
259 /* real inodes have already been synced by sync_filesystem(ovl_sb) */ 276 upper_sb = ofs->upper_mnt->mnt_sb;
277
260 down_read(&upper_sb->s_umount); 278 down_read(&upper_sb->s_umount);
261 ret = upper_sb->s_op->sync_fs(upper_sb, wait); 279 ret = sync_filesystem(upper_sb);
262 up_read(&upper_sb->s_umount); 280 up_read(&upper_sb->s_umount);
281
263 return ret; 282 return ret;
264} 283}
265 284
@@ -295,6 +314,11 @@ static bool ovl_force_readonly(struct ovl_fs *ofs)
295 return (!ofs->upper_mnt || !ofs->workdir); 314 return (!ofs->upper_mnt || !ofs->workdir);
296} 315}
297 316
317static const char *ovl_redirect_mode_def(void)
318{
319 return ovl_redirect_dir_def ? "on" : "off";
320}
321
298/** 322/**
299 * ovl_show_options 323 * ovl_show_options
300 * 324 *
@@ -313,12 +337,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
313 } 337 }
314 if (ofs->config.default_permissions) 338 if (ofs->config.default_permissions)
315 seq_puts(m, ",default_permissions"); 339 seq_puts(m, ",default_permissions");
316 if (ofs->config.redirect_dir != ovl_redirect_dir_def) 340 if (strcmp(ofs->config.redirect_mode, ovl_redirect_mode_def()) != 0)
317 seq_printf(m, ",redirect_dir=%s", 341 seq_printf(m, ",redirect_dir=%s", ofs->config.redirect_mode);
318 ofs->config.redirect_dir ? "on" : "off");
319 if (ofs->config.index != ovl_index_def) 342 if (ofs->config.index != ovl_index_def)
320 seq_printf(m, ",index=%s", 343 seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
321 ofs->config.index ? "on" : "off");
322 return 0; 344 return 0;
323} 345}
324 346
@@ -348,8 +370,7 @@ enum {
348 OPT_UPPERDIR, 370 OPT_UPPERDIR,
349 OPT_WORKDIR, 371 OPT_WORKDIR,
350 OPT_DEFAULT_PERMISSIONS, 372 OPT_DEFAULT_PERMISSIONS,
351 OPT_REDIRECT_DIR_ON, 373 OPT_REDIRECT_DIR,
352 OPT_REDIRECT_DIR_OFF,
353 OPT_INDEX_ON, 374 OPT_INDEX_ON,
354 OPT_INDEX_OFF, 375 OPT_INDEX_OFF,
355 OPT_ERR, 376 OPT_ERR,
@@ -360,8 +381,7 @@ static const match_table_t ovl_tokens = {
360 {OPT_UPPERDIR, "upperdir=%s"}, 381 {OPT_UPPERDIR, "upperdir=%s"},
361 {OPT_WORKDIR, "workdir=%s"}, 382 {OPT_WORKDIR, "workdir=%s"},
362 {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, 383 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
363 {OPT_REDIRECT_DIR_ON, "redirect_dir=on"}, 384 {OPT_REDIRECT_DIR, "redirect_dir=%s"},
364 {OPT_REDIRECT_DIR_OFF, "redirect_dir=off"},
365 {OPT_INDEX_ON, "index=on"}, 385 {OPT_INDEX_ON, "index=on"},
366 {OPT_INDEX_OFF, "index=off"}, 386 {OPT_INDEX_OFF, "index=off"},
367 {OPT_ERR, NULL} 387 {OPT_ERR, NULL}
@@ -390,10 +410,37 @@ static char *ovl_next_opt(char **s)
390 return sbegin; 410 return sbegin;
391} 411}
392 412
413static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
414{
415 if (strcmp(mode, "on") == 0) {
416 config->redirect_dir = true;
417 /*
418 * Does not make sense to have redirect creation without
419 * redirect following.
420 */
421 config->redirect_follow = true;
422 } else if (strcmp(mode, "follow") == 0) {
423 config->redirect_follow = true;
424 } else if (strcmp(mode, "off") == 0) {
425 if (ovl_redirect_always_follow)
426 config->redirect_follow = true;
427 } else if (strcmp(mode, "nofollow") != 0) {
428 pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
429 mode);
430 return -EINVAL;
431 }
432
433 return 0;
434}
435
393static int ovl_parse_opt(char *opt, struct ovl_config *config) 436static int ovl_parse_opt(char *opt, struct ovl_config *config)
394{ 437{
395 char *p; 438 char *p;
396 439
440 config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
441 if (!config->redirect_mode)
442 return -ENOMEM;
443
397 while ((p = ovl_next_opt(&opt)) != NULL) { 444 while ((p = ovl_next_opt(&opt)) != NULL) {
398 int token; 445 int token;
399 substring_t args[MAX_OPT_ARGS]; 446 substring_t args[MAX_OPT_ARGS];
@@ -428,12 +475,11 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
428 config->default_permissions = true; 475 config->default_permissions = true;
429 break; 476 break;
430 477
431 case OPT_REDIRECT_DIR_ON: 478 case OPT_REDIRECT_DIR:
432 config->redirect_dir = true; 479 kfree(config->redirect_mode);
433 break; 480 config->redirect_mode = match_strdup(&args[0]);
434 481 if (!config->redirect_mode)
435 case OPT_REDIRECT_DIR_OFF: 482 return -ENOMEM;
436 config->redirect_dir = false;
437 break; 483 break;
438 484
439 case OPT_INDEX_ON: 485 case OPT_INDEX_ON:
@@ -458,7 +504,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
458 config->workdir = NULL; 504 config->workdir = NULL;
459 } 505 }
460 506
461 return 0; 507 return ovl_parse_redirect_mode(config, config->redirect_mode);
462} 508}
463 509
464#define OVL_WORKDIR_NAME "work" 510#define OVL_WORKDIR_NAME "work"
@@ -1160,7 +1206,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1160 if (!cred) 1206 if (!cred)
1161 goto out_err; 1207 goto out_err;
1162 1208
1163 ofs->config.redirect_dir = ovl_redirect_dir_def;
1164 ofs->config.index = ovl_index_def; 1209 ofs->config.index = ovl_index_def;
1165 err = ovl_parse_opt((char *) data, &ofs->config); 1210 err = ovl_parse_opt((char *) data, &ofs->config);
1166 if (err) 1211 if (err)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 79375fc115d2..d67a72dcb92c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -430,8 +430,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
430 * safe because the task has stopped executing permanently. 430 * safe because the task has stopped executing permanently.
431 */ 431 */
432 if (permitted && (task->flags & PF_DUMPCORE)) { 432 if (permitted && (task->flags & PF_DUMPCORE)) {
433 eip = KSTK_EIP(task); 433 if (try_get_task_stack(task)) {
434 esp = KSTK_ESP(task); 434 eip = KSTK_EIP(task);
435 esp = KSTK_ESP(task);
436 put_task_stack(task);
437 }
435 } 438 }
436 } 439 }
437 440
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 28fa85276eec..60316b52d659 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2268,7 +2268,7 @@ static int show_timer(struct seq_file *m, void *v)
2268 notify = timer->it_sigev_notify; 2268 notify = timer->it_sigev_notify;
2269 2269
2270 seq_printf(m, "ID: %d\n", timer->it_id); 2270 seq_printf(m, "ID: %d\n", timer->it_id);
2271 seq_printf(m, "signal: %d/%p\n", 2271 seq_printf(m, "signal: %d/%px\n",
2272 timer->sigq->info.si_signo, 2272 timer->sigq->info.si_signo,
2273 timer->sigq->info.si_value.sival_ptr); 2273 timer->sigq->info.si_value.sival_ptr);
2274 seq_printf(m, "notify: %s/%s.%d\n", 2274 seq_printf(m, "notify: %s/%s.%d\n",
diff --git a/fs/super.c b/fs/super.c
index d4e33e8f1e6f..06bd25d90ba5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
191 191
192 INIT_LIST_HEAD(&s->s_mounts); 192 INIT_LIST_HEAD(&s->s_mounts);
193 s->s_user_ns = get_user_ns(user_ns); 193 s->s_user_ns = get_user_ns(user_ns);
194 init_rwsem(&s->s_umount);
195 lockdep_set_class(&s->s_umount, &type->s_umount_key);
196 /*
197 * sget() can have s_umount recursion.
198 *
199 * When it cannot find a suitable sb, it allocates a new
200 * one (this one), and tries again to find a suitable old
201 * one.
202 *
203 * In case that succeeds, it will acquire the s_umount
204 * lock of the old one. Since these are clearly distrinct
205 * locks, and this object isn't exposed yet, there's no
206 * risk of deadlocks.
207 *
208 * Annotate this by putting this lock in a different
209 * subclass.
210 */
211 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
194 212
195 if (security_sb_alloc(s)) 213 if (security_sb_alloc(s))
196 goto fail; 214 goto fail;
@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
218 goto fail; 236 goto fail;
219 if (list_lru_init_memcg(&s->s_inode_lru)) 237 if (list_lru_init_memcg(&s->s_inode_lru))
220 goto fail; 238 goto fail;
221
222 init_rwsem(&s->s_umount);
223 lockdep_set_class(&s->s_umount, &type->s_umount_key);
224 /*
225 * sget() can have s_umount recursion.
226 *
227 * When it cannot find a suitable sb, it allocates a new
228 * one (this one), and tries again to find a suitable old
229 * one.
230 *
231 * In case that succeeds, it will acquire the s_umount
232 * lock of the old one. Since these are clearly distrinct
233 * locks, and this object isn't exposed yet, there's no
234 * risk of deadlocks.
235 *
236 * Annotate this by putting this lock in a different
237 * subclass.
238 */
239 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
240 s->s_count = 1; 239 s->s_count = 1;
241 atomic_set(&s->s_active, 1); 240 atomic_set(&s->s_active, 1);
242 mutex_init(&s->s_vfs_rename_mutex); 241 mutex_init(&s->s_vfs_rename_mutex);
@@ -518,7 +517,11 @@ retry:
518 hlist_add_head(&s->s_instances, &type->fs_supers); 517 hlist_add_head(&s->s_instances, &type->fs_supers);
519 spin_unlock(&sb_lock); 518 spin_unlock(&sb_lock);
520 get_filesystem(type); 519 get_filesystem(type);
521 register_shrinker(&s->s_shrink); 520 err = register_shrinker(&s->s_shrink);
521 if (err) {
522 deactivate_locked_super(s);
523 s = ERR_PTR(err);
524 }
522 return s; 525 return s;
523} 526}
524 527
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ac9a4e65ca49..41a75f9f23fd 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -570,11 +570,14 @@ out:
570static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 570static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
571 struct userfaultfd_wait_queue *ewq) 571 struct userfaultfd_wait_queue *ewq)
572{ 572{
573 struct userfaultfd_ctx *release_new_ctx;
574
573 if (WARN_ON_ONCE(current->flags & PF_EXITING)) 575 if (WARN_ON_ONCE(current->flags & PF_EXITING))
574 goto out; 576 goto out;
575 577
576 ewq->ctx = ctx; 578 ewq->ctx = ctx;
577 init_waitqueue_entry(&ewq->wq, current); 579 init_waitqueue_entry(&ewq->wq, current);
580 release_new_ctx = NULL;
578 581
579 spin_lock(&ctx->event_wqh.lock); 582 spin_lock(&ctx->event_wqh.lock);
580 /* 583 /*
@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
601 new = (struct userfaultfd_ctx *) 604 new = (struct userfaultfd_ctx *)
602 (unsigned long) 605 (unsigned long)
603 ewq->msg.arg.reserved.reserved1; 606 ewq->msg.arg.reserved.reserved1;
604 607 release_new_ctx = new;
605 userfaultfd_ctx_put(new);
606 } 608 }
607 break; 609 break;
608 } 610 }
@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
617 __set_current_state(TASK_RUNNING); 619 __set_current_state(TASK_RUNNING);
618 spin_unlock(&ctx->event_wqh.lock); 620 spin_unlock(&ctx->event_wqh.lock);
619 621
622 if (release_new_ctx) {
623 struct vm_area_struct *vma;
624 struct mm_struct *mm = release_new_ctx->mm;
625
626 /* the various vma->vm_userfaultfd_ctx still points to it */
627 down_write(&mm->mmap_sem);
628 for (vma = mm->mmap; vma; vma = vma->vm_next)
629 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
630 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
631 up_write(&mm->mmap_sem);
632
633 userfaultfd_ctx_put(release_new_ctx);
634 }
635
620 /* 636 /*
621 * ctx may go away after this if the userfault pseudo fd is 637 * ctx may go away after this if the userfault pseudo fd is
622 * already released. 638 * already released.
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 0da80019a917..83ed7715f856 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -702,7 +702,7 @@ xfs_alloc_ag_vextent(
702 ASSERT(args->agbno % args->alignment == 0); 702 ASSERT(args->agbno % args->alignment == 0);
703 703
704 /* if not file data, insert new block into the reverse map btree */ 704 /* if not file data, insert new block into the reverse map btree */
705 if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) { 705 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
706 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno, 706 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
707 args->agbno, args->len, &args->oinfo); 707 args->agbno, args->len, &args->oinfo);
708 if (error) 708 if (error)
@@ -1682,7 +1682,7 @@ xfs_free_ag_extent(
1682 bno_cur = cnt_cur = NULL; 1682 bno_cur = cnt_cur = NULL;
1683 mp = tp->t_mountp; 1683 mp = tp->t_mountp;
1684 1684
1685 if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) { 1685 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1686 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo); 1686 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1687 if (error) 1687 if (error)
1688 goto error0; 1688 goto error0;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 6249c92671de..a76914db72ef 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -212,6 +212,7 @@ xfs_attr_set(
212 int flags) 212 int flags)
213{ 213{
214 struct xfs_mount *mp = dp->i_mount; 214 struct xfs_mount *mp = dp->i_mount;
215 struct xfs_buf *leaf_bp = NULL;
215 struct xfs_da_args args; 216 struct xfs_da_args args;
216 struct xfs_defer_ops dfops; 217 struct xfs_defer_ops dfops;
217 struct xfs_trans_res tres; 218 struct xfs_trans_res tres;
@@ -327,9 +328,16 @@ xfs_attr_set(
327 * GROT: another possible req'mt for a double-split btree op. 328 * GROT: another possible req'mt for a double-split btree op.
328 */ 329 */
329 xfs_defer_init(args.dfops, args.firstblock); 330 xfs_defer_init(args.dfops, args.firstblock);
330 error = xfs_attr_shortform_to_leaf(&args); 331 error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
331 if (error) 332 if (error)
332 goto out_defer_cancel; 333 goto out_defer_cancel;
334 /*
335 * Prevent the leaf buffer from being unlocked so that a
336 * concurrent AIL push cannot grab the half-baked leaf
337 * buffer and run into problems with the write verifier.
338 */
339 xfs_trans_bhold(args.trans, leaf_bp);
340 xfs_defer_bjoin(args.dfops, leaf_bp);
333 xfs_defer_ijoin(args.dfops, dp); 341 xfs_defer_ijoin(args.dfops, dp);
334 error = xfs_defer_finish(&args.trans, args.dfops); 342 error = xfs_defer_finish(&args.trans, args.dfops);
335 if (error) 343 if (error)
@@ -337,13 +345,14 @@ xfs_attr_set(
337 345
338 /* 346 /*
339 * Commit the leaf transformation. We'll need another (linked) 347 * Commit the leaf transformation. We'll need another (linked)
340 * transaction to add the new attribute to the leaf. 348 * transaction to add the new attribute to the leaf, which
349 * means that we have to hold & join the leaf buffer here too.
341 */ 350 */
342
343 error = xfs_trans_roll_inode(&args.trans, dp); 351 error = xfs_trans_roll_inode(&args.trans, dp);
344 if (error) 352 if (error)
345 goto out; 353 goto out;
346 354 xfs_trans_bjoin(args.trans, leaf_bp);
355 leaf_bp = NULL;
347 } 356 }
348 357
349 if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) 358 if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
@@ -374,8 +383,9 @@ xfs_attr_set(
374 383
375out_defer_cancel: 384out_defer_cancel:
376 xfs_defer_cancel(&dfops); 385 xfs_defer_cancel(&dfops);
377 args.trans = NULL;
378out: 386out:
387 if (leaf_bp)
388 xfs_trans_brelse(args.trans, leaf_bp);
379 if (args.trans) 389 if (args.trans)
380 xfs_trans_cancel(args.trans); 390 xfs_trans_cancel(args.trans);
381 xfs_iunlock(dp, XFS_ILOCK_EXCL); 391 xfs_iunlock(dp, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 53cc8b986eac..601eaa36f1ad 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
735} 735}
736 736
737/* 737/*
738 * Convert from using the shortform to the leaf. 738 * Convert from using the shortform to the leaf. On success, return the
739 * buffer so that we can keep it locked until we're totally done with it.
739 */ 740 */
740int 741int
741xfs_attr_shortform_to_leaf(xfs_da_args_t *args) 742xfs_attr_shortform_to_leaf(
743 struct xfs_da_args *args,
744 struct xfs_buf **leaf_bp)
742{ 745{
743 xfs_inode_t *dp; 746 xfs_inode_t *dp;
744 xfs_attr_shortform_t *sf; 747 xfs_attr_shortform_t *sf;
@@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
818 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 821 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
819 } 822 }
820 error = 0; 823 error = 0;
821 824 *leaf_bp = bp;
822out: 825out:
823 kmem_free(tmpbuffer); 826 kmem_free(tmpbuffer);
824 return error; 827 return error;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index f7dda0c237b0..894124efb421 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -48,7 +48,8 @@ void xfs_attr_shortform_create(struct xfs_da_args *args);
48void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff); 48void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
49int xfs_attr_shortform_lookup(struct xfs_da_args *args); 49int xfs_attr_shortform_lookup(struct xfs_da_args *args);
50int xfs_attr_shortform_getvalue(struct xfs_da_args *args); 50int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
51int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); 51int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
52 struct xfs_buf **leaf_bp);
52int xfs_attr_shortform_remove(struct xfs_da_args *args); 53int xfs_attr_shortform_remove(struct xfs_da_args *args);
53int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); 54int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
54int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes); 55int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 1210f684d3c2..1bddbba6b80c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5136,7 +5136,7 @@ __xfs_bunmapi(
5136 * blowing out the transaction with a mix of EFIs and reflink 5136 * blowing out the transaction with a mix of EFIs and reflink
5137 * adjustments. 5137 * adjustments.
5138 */ 5138 */
5139 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) 5139 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5140 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); 5140 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5141 else 5141 else
5142 max_len = len; 5142 max_len = len;
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 072ebfe1d6ae..087fea02c389 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -249,6 +249,10 @@ xfs_defer_trans_roll(
249 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 249 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
250 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE); 250 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
251 251
252 /* Hold the (previously bjoin'd) buffer locked across the roll. */
253 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
254 xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
255
252 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop); 256 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
253 257
254 /* Roll the transaction. */ 258 /* Roll the transaction. */
@@ -264,6 +268,12 @@ xfs_defer_trans_roll(
264 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 268 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
265 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0); 269 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
266 270
271 /* Rejoin the buffers and dirty them so the log moves forward. */
272 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
273 xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
274 xfs_trans_bhold(*tp, dop->dop_bufs[i]);
275 }
276
267 return error; 277 return error;
268} 278}
269 279
@@ -295,6 +305,31 @@ xfs_defer_ijoin(
295 } 305 }
296 } 306 }
297 307
308 ASSERT(0);
309 return -EFSCORRUPTED;
310}
311
312/*
313 * Add this buffer to the deferred op. Each joined buffer is relogged
314 * each time we roll the transaction.
315 */
316int
317xfs_defer_bjoin(
318 struct xfs_defer_ops *dop,
319 struct xfs_buf *bp)
320{
321 int i;
322
323 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
324 if (dop->dop_bufs[i] == bp)
325 return 0;
326 else if (dop->dop_bufs[i] == NULL) {
327 dop->dop_bufs[i] = bp;
328 return 0;
329 }
330 }
331
332 ASSERT(0);
298 return -EFSCORRUPTED; 333 return -EFSCORRUPTED;
299} 334}
300 335
@@ -493,9 +528,7 @@ xfs_defer_init(
493 struct xfs_defer_ops *dop, 528 struct xfs_defer_ops *dop,
494 xfs_fsblock_t *fbp) 529 xfs_fsblock_t *fbp)
495{ 530{
496 dop->dop_committed = false; 531 memset(dop, 0, sizeof(struct xfs_defer_ops));
497 dop->dop_low = false;
498 memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
499 *fbp = NULLFSBLOCK; 532 *fbp = NULLFSBLOCK;
500 INIT_LIST_HEAD(&dop->dop_intake); 533 INIT_LIST_HEAD(&dop->dop_intake);
501 INIT_LIST_HEAD(&dop->dop_pending); 534 INIT_LIST_HEAD(&dop->dop_pending);
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index d4f046dd44bd..045beacdd37d 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
59}; 59};
60 60
61#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */ 61#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
62#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
62 63
63struct xfs_defer_ops { 64struct xfs_defer_ops {
64 bool dop_committed; /* did any trans commit? */ 65 bool dop_committed; /* did any trans commit? */
@@ -66,8 +67,9 @@ struct xfs_defer_ops {
66 struct list_head dop_intake; /* unlogged pending work */ 67 struct list_head dop_intake; /* unlogged pending work */
67 struct list_head dop_pending; /* logged pending work */ 68 struct list_head dop_pending; /* logged pending work */
68 69
69 /* relog these inodes with each roll */ 70 /* relog these with each roll */
70 struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES]; 71 struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
72 struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
71}; 73};
72 74
73void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type, 75void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
@@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
77void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp); 79void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
78bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop); 80bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
79int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip); 81int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
82int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
80 83
81/* Description of a deferred type. */ 84/* Description of a deferred type. */
82struct xfs_defer_op_type { 85struct xfs_defer_op_type {
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index de3f04a98656..3b57ef0f2f76 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -920,8 +920,7 @@ STATIC xfs_agnumber_t
920xfs_ialloc_ag_select( 920xfs_ialloc_ag_select(
921 xfs_trans_t *tp, /* transaction pointer */ 921 xfs_trans_t *tp, /* transaction pointer */
922 xfs_ino_t parent, /* parent directory inode number */ 922 xfs_ino_t parent, /* parent directory inode number */
923 umode_t mode, /* bits set to indicate file type */ 923 umode_t mode) /* bits set to indicate file type */
924 int okalloc) /* ok to allocate more space */
925{ 924{
926 xfs_agnumber_t agcount; /* number of ag's in the filesystem */ 925 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
927 xfs_agnumber_t agno; /* current ag number */ 926 xfs_agnumber_t agno; /* current ag number */
@@ -978,9 +977,6 @@ xfs_ialloc_ag_select(
978 return agno; 977 return agno;
979 } 978 }
980 979
981 if (!okalloc)
982 goto nextag;
983
984 if (!pag->pagf_init) { 980 if (!pag->pagf_init) {
985 error = xfs_alloc_pagf_init(mp, tp, agno, flags); 981 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
986 if (error) 982 if (error)
@@ -1680,7 +1676,6 @@ xfs_dialloc(
1680 struct xfs_trans *tp, 1676 struct xfs_trans *tp,
1681 xfs_ino_t parent, 1677 xfs_ino_t parent,
1682 umode_t mode, 1678 umode_t mode,
1683 int okalloc,
1684 struct xfs_buf **IO_agbp, 1679 struct xfs_buf **IO_agbp,
1685 xfs_ino_t *inop) 1680 xfs_ino_t *inop)
1686{ 1681{
@@ -1692,6 +1687,7 @@ xfs_dialloc(
1692 int noroom = 0; 1687 int noroom = 0;
1693 xfs_agnumber_t start_agno; 1688 xfs_agnumber_t start_agno;
1694 struct xfs_perag *pag; 1689 struct xfs_perag *pag;
1690 int okalloc = 1;
1695 1691
1696 if (*IO_agbp) { 1692 if (*IO_agbp) {
1697 /* 1693 /*
@@ -1707,7 +1703,7 @@ xfs_dialloc(
1707 * We do not have an agbp, so select an initial allocation 1703 * We do not have an agbp, so select an initial allocation
1708 * group for inode allocation. 1704 * group for inode allocation.
1709 */ 1705 */
1710 start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); 1706 start_agno = xfs_ialloc_ag_select(tp, parent, mode);
1711 if (start_agno == NULLAGNUMBER) { 1707 if (start_agno == NULLAGNUMBER) {
1712 *inop = NULLFSINO; 1708 *inop = NULLFSINO;
1713 return 0; 1709 return 0;
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index d2bdcd5e7312..66a8de0b1caa 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -81,7 +81,6 @@ xfs_dialloc(
81 struct xfs_trans *tp, /* transaction pointer */ 81 struct xfs_trans *tp, /* transaction pointer */
82 xfs_ino_t parent, /* parent inode (directory) */ 82 xfs_ino_t parent, /* parent inode (directory) */
83 umode_t mode, /* mode bits for new inode */ 83 umode_t mode, /* mode bits for new inode */
84 int okalloc, /* ok to allocate more space */
85 struct xfs_buf **agbp, /* buf for a.g. inode header */ 84 struct xfs_buf **agbp, /* buf for a.g. inode header */
86 xfs_ino_t *inop); /* inode number allocated */ 85 xfs_ino_t *inop); /* inode number allocated */
87 86
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 89bf16b4d937..b0f31791c7e6 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -632,8 +632,6 @@ xfs_iext_insert(
632 struct xfs_iext_leaf *new = NULL; 632 struct xfs_iext_leaf *new = NULL;
633 int nr_entries, i; 633 int nr_entries, i;
634 634
635 trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
636
637 if (ifp->if_height == 0) 635 if (ifp->if_height == 0)
638 xfs_iext_alloc_root(ifp, cur); 636 xfs_iext_alloc_root(ifp, cur);
639 else if (ifp->if_height == 1) 637 else if (ifp->if_height == 1)
@@ -661,6 +659,8 @@ xfs_iext_insert(
661 xfs_iext_set(cur_rec(cur), irec); 659 xfs_iext_set(cur_rec(cur), irec);
662 ifp->if_bytes += sizeof(struct xfs_iext_rec); 660 ifp->if_bytes += sizeof(struct xfs_iext_rec);
663 661
662 trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
663
664 if (new) 664 if (new)
665 xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2); 665 xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
666} 666}
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 585b35d34142..c40d26763075 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc(
1488 xfs_extlen_t aglen, 1488 xfs_extlen_t aglen,
1489 struct xfs_defer_ops *dfops) 1489 struct xfs_defer_ops *dfops)
1490{ 1490{
1491 int error;
1492
1493 trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno, 1491 trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
1494 agbno, aglen); 1492 agbno, aglen);
1495 1493
1496 /* Add refcount btree reservation */ 1494 /* Add refcount btree reservation */
1497 error = xfs_refcount_adjust_cow(rcur, agbno, aglen, 1495 return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1498 XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops); 1496 XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
1499 if (error)
1500 return error;
1501
1502 /* Add rmap entry */
1503 if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
1504 error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
1505 rcur->bc_private.a.agno,
1506 agbno, aglen, XFS_RMAP_OWN_COW);
1507 if (error)
1508 return error;
1509 }
1510
1511 return error;
1512} 1497}
1513 1498
1514/* 1499/*
@@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free(
1521 xfs_extlen_t aglen, 1506 xfs_extlen_t aglen,
1522 struct xfs_defer_ops *dfops) 1507 struct xfs_defer_ops *dfops)
1523{ 1508{
1524 int error;
1525
1526 trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno, 1509 trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
1527 agbno, aglen); 1510 agbno, aglen);
1528 1511
1529 /* Remove refcount btree reservation */ 1512 /* Remove refcount btree reservation */
1530 error = xfs_refcount_adjust_cow(rcur, agbno, aglen, 1513 return xfs_refcount_adjust_cow(rcur, agbno, aglen,
1531 XFS_REFCOUNT_ADJUST_COW_FREE, dfops); 1514 XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
1532 if (error)
1533 return error;
1534
1535 /* Remove rmap entry */
1536 if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
1537 error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
1538 rcur->bc_private.a.agno,
1539 agbno, aglen, XFS_RMAP_OWN_COW);
1540 if (error)
1541 return error;
1542 }
1543
1544 return error;
1545} 1515}
1546 1516
1547/* Record a CoW staging extent in the refcount btree. */ 1517/* Record a CoW staging extent in the refcount btree. */
@@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent(
1552 xfs_fsblock_t fsb, 1522 xfs_fsblock_t fsb,
1553 xfs_extlen_t len) 1523 xfs_extlen_t len)
1554{ 1524{
1525 int error;
1526
1555 if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1527 if (!xfs_sb_version_hasreflink(&mp->m_sb))
1556 return 0; 1528 return 0;
1557 1529
1558 return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW, 1530 error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
1559 fsb, len); 1531 fsb, len);
1532 if (error)
1533 return error;
1534
1535 /* Add rmap entry */
1536 return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
1537 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1560} 1538}
1561 1539
1562/* Forget a CoW staging event in the refcount btree. */ 1540/* Forget a CoW staging event in the refcount btree. */
@@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent(
1567 xfs_fsblock_t fsb, 1545 xfs_fsblock_t fsb,
1568 xfs_extlen_t len) 1546 xfs_extlen_t len)
1569{ 1547{
1548 int error;
1549
1570 if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1550 if (!xfs_sb_version_hasreflink(&mp->m_sb))
1571 return 0; 1551 return 0;
1572 1552
1553 /* Remove rmap entry */
1554 error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
1555 XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
1556 if (error)
1557 return error;
1558
1573 return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW, 1559 return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
1574 fsb, len); 1560 fsb, len);
1575} 1561}
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index dd019cee1b3b..50db920ceeeb 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -368,6 +368,51 @@ xfs_rmap_lookup_le_range(
368} 368}
369 369
370/* 370/*
371 * Perform all the relevant owner checks for a removal op. If we're doing an
372 * unknown-owner removal then we have no owner information to check.
373 */
374static int
375xfs_rmap_free_check_owner(
376 struct xfs_mount *mp,
377 uint64_t ltoff,
378 struct xfs_rmap_irec *rec,
379 xfs_fsblock_t bno,
380 xfs_filblks_t len,
381 uint64_t owner,
382 uint64_t offset,
383 unsigned int flags)
384{
385 int error = 0;
386
387 if (owner == XFS_RMAP_OWN_UNKNOWN)
388 return 0;
389
390 /* Make sure the unwritten flag matches. */
391 XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
392 (rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
393
394 /* Make sure the owner matches what we expect to find in the tree. */
395 XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
396
397 /* Check the offset, if necessary. */
398 if (XFS_RMAP_NON_INODE_OWNER(owner))
399 goto out;
400
401 if (flags & XFS_RMAP_BMBT_BLOCK) {
402 XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
403 out);
404 } else {
405 XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
406 XFS_WANT_CORRUPTED_GOTO(mp,
407 ltoff + rec->rm_blockcount >= offset + len,
408 out);
409 }
410
411out:
412 return error;
413}
414
415/*
371 * Find the extent in the rmap btree and remove it. 416 * Find the extent in the rmap btree and remove it.
372 * 417 *
373 * The record we find should always be an exact match for the extent that we're 418 * The record we find should always be an exact match for the extent that we're
@@ -444,33 +489,40 @@ xfs_rmap_unmap(
444 goto out_done; 489 goto out_done;
445 } 490 }
446 491
447 /* Make sure the unwritten flag matches. */ 492 /*
448 XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) == 493 * If we're doing an unknown-owner removal for EFI recovery, we expect
449 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error); 494 * to find the full range in the rmapbt or nothing at all. If we
495 * don't find any rmaps overlapping either end of the range, we're
496 * done. Hopefully this means that the EFI creator already queued
497 * (and finished) a RUI to remove the rmap.
498 */
499 if (owner == XFS_RMAP_OWN_UNKNOWN &&
500 ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
501 struct xfs_rmap_irec rtrec;
502
503 error = xfs_btree_increment(cur, 0, &i);
504 if (error)
505 goto out_error;
506 if (i == 0)
507 goto out_done;
508 error = xfs_rmap_get_rec(cur, &rtrec, &i);
509 if (error)
510 goto out_error;
511 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
512 if (rtrec.rm_startblock >= bno + len)
513 goto out_done;
514 }
450 515
451 /* Make sure the extent we found covers the entire freeing range. */ 516 /* Make sure the extent we found covers the entire freeing range. */
452 XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno && 517 XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
453 ltrec.rm_startblock + ltrec.rm_blockcount >= 518 ltrec.rm_startblock + ltrec.rm_blockcount >=
454 bno + len, out_error); 519 bno + len, out_error);
455 520
456 /* Make sure the owner matches what we expect to find in the tree. */ 521 /* Check owner information. */
457 XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner || 522 error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, bno, len, owner,
458 XFS_RMAP_NON_INODE_OWNER(owner), out_error); 523 offset, flags);
459 524 if (error)
460 /* Check the offset, if necessary. */ 525 goto out_error;
461 if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
462 if (flags & XFS_RMAP_BMBT_BLOCK) {
463 XFS_WANT_CORRUPTED_GOTO(mp,
464 ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
465 out_error);
466 } else {
467 XFS_WANT_CORRUPTED_GOTO(mp,
468 ltrec.rm_offset <= offset, out_error);
469 XFS_WANT_CORRUPTED_GOTO(mp,
470 ltoff + ltrec.rm_blockcount >= offset + len,
471 out_error);
472 }
473 }
474 526
475 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) { 527 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
476 /* exact match, simply remove the record from rmap tree */ 528 /* exact match, simply remove the record from rmap tree */
@@ -664,6 +716,7 @@ xfs_rmap_map(
664 flags |= XFS_RMAP_UNWRITTEN; 716 flags |= XFS_RMAP_UNWRITTEN;
665 trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len, 717 trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
666 unwritten, oinfo); 718 unwritten, oinfo);
719 ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
667 720
668 /* 721 /*
669 * For the initial lookup, look for an exact match or the left-adjacent 722 * For the initial lookup, look for an exact match or the left-adjacent
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 466ede637080..0fcd5b1ba729 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -61,7 +61,21 @@ static inline void
61xfs_rmap_skip_owner_update( 61xfs_rmap_skip_owner_update(
62 struct xfs_owner_info *oi) 62 struct xfs_owner_info *oi)
63{ 63{
64 oi->oi_owner = XFS_RMAP_OWN_UNKNOWN; 64 xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
65}
66
67static inline bool
68xfs_rmap_should_skip_owner_update(
69 struct xfs_owner_info *oi)
70{
71 return oi->oi_owner == XFS_RMAP_OWN_NULL;
72}
73
74static inline void
75xfs_rmap_any_owner_update(
76 struct xfs_owner_info *oi)
77{
78 xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
65} 79}
66 80
67/* Reverse mapping functions. */ 81/* Reverse mapping functions. */
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 9c42c4efd01e..ab3aef2ae823 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -46,7 +46,6 @@
46#include "scrub/scrub.h" 46#include "scrub/scrub.h"
47#include "scrub/common.h" 47#include "scrub/common.h"
48#include "scrub/trace.h" 48#include "scrub/trace.h"
49#include "scrub/scrub.h"
50#include "scrub/btree.h" 49#include "scrub/btree.h"
51 50
52/* 51/*
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 472080e75788..86daed0e3a45 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -26,7 +26,6 @@
26#include "xfs_mount.h" 26#include "xfs_mount.h"
27#include "xfs_defer.h" 27#include "xfs_defer.h"
28#include "xfs_da_format.h" 28#include "xfs_da_format.h"
29#include "xfs_defer.h"
30#include "xfs_inode.h" 29#include "xfs_inode.h"
31#include "xfs_btree.h" 30#include "xfs_btree.h"
32#include "xfs_trans.h" 31#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 21e2d70884e1..4fc526a27a94 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -399,7 +399,7 @@ xfs_map_blocks(
399 (ip->i_df.if_flags & XFS_IFEXTENTS)); 399 (ip->i_df.if_flags & XFS_IFEXTENTS));
400 ASSERT(offset <= mp->m_super->s_maxbytes); 400 ASSERT(offset <= mp->m_super->s_maxbytes);
401 401
402 if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) 402 if (offset > mp->m_super->s_maxbytes - count)
403 count = mp->m_super->s_maxbytes - offset; 403 count = mp->m_super->s_maxbytes - offset;
404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
405 offset_fsb = XFS_B_TO_FSBT(mp, offset); 405 offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
1312 lockmode = xfs_ilock_data_map_shared(ip); 1312 lockmode = xfs_ilock_data_map_shared(ip);
1313 1313
1314 ASSERT(offset <= mp->m_super->s_maxbytes); 1314 ASSERT(offset <= mp->m_super->s_maxbytes);
1315 if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) 1315 if (offset > mp->m_super->s_maxbytes - size)
1316 size = mp->m_super->s_maxbytes - offset; 1316 size = mp->m_super->s_maxbytes - offset;
1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1318 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1318 offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 44f8c5451210..64da90655e95 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -538,7 +538,7 @@ xfs_efi_recover(
538 return error; 538 return error;
539 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); 539 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
540 540
541 xfs_rmap_skip_owner_update(&oinfo); 541 xfs_rmap_any_owner_update(&oinfo);
542 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 542 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
543 extp = &efip->efi_format.efi_extents[i]; 543 extp = &efip->efi_format.efi_extents[i];
544 error = xfs_trans_free_extent(tp, efdp, extp->ext_start, 544 error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 8f22fc579dbb..60a2e128cb6a 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -571,6 +571,11 @@ xfs_growfs_data_private(
571 * this doesn't actually exist in the rmap btree. 571 * this doesn't actually exist in the rmap btree.
572 */ 572 */
573 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL); 573 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
574 error = xfs_rmap_free(tp, bp, agno,
575 be32_to_cpu(agf->agf_length) - new,
576 new, &oinfo);
577 if (error)
578 goto error0;
574 error = xfs_free_extent(tp, 579 error = xfs_free_extent(tp,
575 XFS_AGB_TO_FSB(mp, agno, 580 XFS_AGB_TO_FSB(mp, agno,
576 be32_to_cpu(agf->agf_length) - new), 581 be32_to_cpu(agf->agf_length) - new),
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 43005fbe8b1e..3861d61fb265 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -870,7 +870,7 @@ xfs_eofblocks_worker(
870 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 870 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
871 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 871 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
872 */ 872 */
873STATIC void 873void
874xfs_queue_cowblocks( 874xfs_queue_cowblocks(
875 struct xfs_mount *mp) 875 struct xfs_mount *mp)
876{ 876{
@@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks(
1536 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1536 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1537} 1537}
1538 1538
1539static inline unsigned long
1540xfs_iflag_for_tag(
1541 int tag)
1542{
1543 switch (tag) {
1544 case XFS_ICI_EOFBLOCKS_TAG:
1545 return XFS_IEOFBLOCKS;
1546 case XFS_ICI_COWBLOCKS_TAG:
1547 return XFS_ICOWBLOCKS;
1548 default:
1549 ASSERT(0);
1550 return 0;
1551 }
1552}
1553
1539static void 1554static void
1540__xfs_inode_set_eofblocks_tag( 1555__xfs_inode_set_blocks_tag(
1541 xfs_inode_t *ip, 1556 xfs_inode_t *ip,
1542 void (*execute)(struct xfs_mount *mp), 1557 void (*execute)(struct xfs_mount *mp),
1543 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1558 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
@@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag(
1552 * Don't bother locking the AG and looking up in the radix trees 1567 * Don't bother locking the AG and looking up in the radix trees
1553 * if we already know that we have the tag set. 1568 * if we already know that we have the tag set.
1554 */ 1569 */
1555 if (ip->i_flags & XFS_IEOFBLOCKS) 1570 if (ip->i_flags & xfs_iflag_for_tag(tag))
1556 return; 1571 return;
1557 spin_lock(&ip->i_flags_lock); 1572 spin_lock(&ip->i_flags_lock);
1558 ip->i_flags |= XFS_IEOFBLOCKS; 1573 ip->i_flags |= xfs_iflag_for_tag(tag);
1559 spin_unlock(&ip->i_flags_lock); 1574 spin_unlock(&ip->i_flags_lock);
1560 1575
1561 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1576 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag(
1587 xfs_inode_t *ip) 1602 xfs_inode_t *ip)
1588{ 1603{
1589 trace_xfs_inode_set_eofblocks_tag(ip); 1604 trace_xfs_inode_set_eofblocks_tag(ip);
1590 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks, 1605 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1591 trace_xfs_perag_set_eofblocks, 1606 trace_xfs_perag_set_eofblocks,
1592 XFS_ICI_EOFBLOCKS_TAG); 1607 XFS_ICI_EOFBLOCKS_TAG);
1593} 1608}
1594 1609
1595static void 1610static void
1596__xfs_inode_clear_eofblocks_tag( 1611__xfs_inode_clear_blocks_tag(
1597 xfs_inode_t *ip, 1612 xfs_inode_t *ip,
1598 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1613 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1599 int error, unsigned long caller_ip), 1614 int error, unsigned long caller_ip),
@@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag(
1603 struct xfs_perag *pag; 1618 struct xfs_perag *pag;
1604 1619
1605 spin_lock(&ip->i_flags_lock); 1620 spin_lock(&ip->i_flags_lock);
1606 ip->i_flags &= ~XFS_IEOFBLOCKS; 1621 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1607 spin_unlock(&ip->i_flags_lock); 1622 spin_unlock(&ip->i_flags_lock);
1608 1623
1609 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1624 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag(
1630 xfs_inode_t *ip) 1645 xfs_inode_t *ip)
1631{ 1646{
1632 trace_xfs_inode_clear_eofblocks_tag(ip); 1647 trace_xfs_inode_clear_eofblocks_tag(ip);
1633 return __xfs_inode_clear_eofblocks_tag(ip, 1648 return __xfs_inode_clear_blocks_tag(ip,
1634 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1649 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1635} 1650}
1636 1651
@@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag(
1724 xfs_inode_t *ip) 1739 xfs_inode_t *ip)
1725{ 1740{
1726 trace_xfs_inode_set_cowblocks_tag(ip); 1741 trace_xfs_inode_set_cowblocks_tag(ip);
1727 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, 1742 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1728 trace_xfs_perag_set_cowblocks, 1743 trace_xfs_perag_set_cowblocks,
1729 XFS_ICI_COWBLOCKS_TAG); 1744 XFS_ICI_COWBLOCKS_TAG);
1730} 1745}
@@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag(
1734 xfs_inode_t *ip) 1749 xfs_inode_t *ip)
1735{ 1750{
1736 trace_xfs_inode_clear_cowblocks_tag(ip); 1751 trace_xfs_inode_clear_cowblocks_tag(ip);
1737 return __xfs_inode_clear_eofblocks_tag(ip, 1752 return __xfs_inode_clear_blocks_tag(ip,
1738 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1753 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1739} 1754}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index bff4d85e5498..d4a77588eca1 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
81int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *); 81int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
82int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip); 82int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
83void xfs_cowblocks_worker(struct work_struct *); 83void xfs_cowblocks_worker(struct work_struct *);
84void xfs_queue_cowblocks(struct xfs_mount *);
84 85
85int xfs_inode_ag_iterator(struct xfs_mount *mp, 86int xfs_inode_ag_iterator(struct xfs_mount *mp,
86 int (*execute)(struct xfs_inode *ip, int flags, void *args), 87 int (*execute)(struct xfs_inode *ip, int flags, void *args),
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 801274126648..6f95bdb408ce 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -749,7 +749,6 @@ xfs_ialloc(
749 xfs_nlink_t nlink, 749 xfs_nlink_t nlink,
750 dev_t rdev, 750 dev_t rdev,
751 prid_t prid, 751 prid_t prid,
752 int okalloc,
753 xfs_buf_t **ialloc_context, 752 xfs_buf_t **ialloc_context,
754 xfs_inode_t **ipp) 753 xfs_inode_t **ipp)
755{ 754{
@@ -765,7 +764,7 @@ xfs_ialloc(
765 * Call the space management code to pick 764 * Call the space management code to pick
766 * the on-disk inode to be allocated. 765 * the on-disk inode to be allocated.
767 */ 766 */
768 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 767 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
769 ialloc_context, &ino); 768 ialloc_context, &ino);
770 if (error) 769 if (error)
771 return error; 770 return error;
@@ -957,7 +956,6 @@ xfs_dir_ialloc(
957 xfs_nlink_t nlink, 956 xfs_nlink_t nlink,
958 dev_t rdev, 957 dev_t rdev,
959 prid_t prid, /* project id */ 958 prid_t prid, /* project id */
960 int okalloc, /* ok to allocate new space */
961 xfs_inode_t **ipp, /* pointer to inode; it will be 959 xfs_inode_t **ipp, /* pointer to inode; it will be
962 locked. */ 960 locked. */
963 int *committed) 961 int *committed)
@@ -988,8 +986,8 @@ xfs_dir_ialloc(
988 * transaction commit so that no other process can steal 986 * transaction commit so that no other process can steal
989 * the inode(s) that we've just allocated. 987 * the inode(s) that we've just allocated.
990 */ 988 */
991 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, 989 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
992 &ialloc_context, &ip); 990 &ip);
993 991
994 /* 992 /*
995 * Return an error if we were unable to allocate a new inode. 993 * Return an error if we were unable to allocate a new inode.
@@ -1061,7 +1059,7 @@ xfs_dir_ialloc(
1061 * this call should always succeed. 1059 * this call should always succeed.
1062 */ 1060 */
1063 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, 1061 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1064 okalloc, &ialloc_context, &ip); 1062 &ialloc_context, &ip);
1065 1063
1066 /* 1064 /*
1067 * If we get an error at this point, return to the caller 1065 * If we get an error at this point, return to the caller
@@ -1182,11 +1180,6 @@ xfs_create(
1182 xfs_flush_inodes(mp); 1180 xfs_flush_inodes(mp);
1183 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1181 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1184 } 1182 }
1185 if (error == -ENOSPC) {
1186 /* No space at all so try a "no-allocation" reservation */
1187 resblks = 0;
1188 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
1189 }
1190 if (error) 1183 if (error)
1191 goto out_release_inode; 1184 goto out_release_inode;
1192 1185
@@ -1203,19 +1196,13 @@ xfs_create(
1203 if (error) 1196 if (error)
1204 goto out_trans_cancel; 1197 goto out_trans_cancel;
1205 1198
1206 if (!resblks) {
1207 error = xfs_dir_canenter(tp, dp, name);
1208 if (error)
1209 goto out_trans_cancel;
1210 }
1211
1212 /* 1199 /*
1213 * A newly created regular or special file just has one directory 1200 * A newly created regular or special file just has one directory
1214 * entry pointing to them, but a directory also the "." entry 1201 * entry pointing to them, but a directory also the "." entry
1215 * pointing to itself. 1202 * pointing to itself.
1216 */ 1203 */
1217 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, 1204 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
1218 prid, resblks > 0, &ip, NULL); 1205 NULL);
1219 if (error) 1206 if (error)
1220 goto out_trans_cancel; 1207 goto out_trans_cancel;
1221 1208
@@ -1340,11 +1327,6 @@ xfs_create_tmpfile(
1340 tres = &M_RES(mp)->tr_create_tmpfile; 1327 tres = &M_RES(mp)->tr_create_tmpfile;
1341 1328
1342 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1329 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1343 if (error == -ENOSPC) {
1344 /* No space at all so try a "no-allocation" reservation */
1345 resblks = 0;
1346 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
1347 }
1348 if (error) 1330 if (error)
1349 goto out_release_inode; 1331 goto out_release_inode;
1350 1332
@@ -1353,8 +1335,7 @@ xfs_create_tmpfile(
1353 if (error) 1335 if (error)
1354 goto out_trans_cancel; 1336 goto out_trans_cancel;
1355 1337
1356 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, 1338 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
1357 prid, resblks > 0, &ip, NULL);
1358 if (error) 1339 if (error)
1359 goto out_trans_cancel; 1340 goto out_trans_cancel;
1360 1341
@@ -1506,6 +1487,24 @@ xfs_link(
1506 return error; 1487 return error;
1507} 1488}
1508 1489
1490/* Clear the reflink flag and the cowblocks tag if possible. */
1491static void
1492xfs_itruncate_clear_reflink_flags(
1493 struct xfs_inode *ip)
1494{
1495 struct xfs_ifork *dfork;
1496 struct xfs_ifork *cfork;
1497
1498 if (!xfs_is_reflink_inode(ip))
1499 return;
1500 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1501 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1502 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1503 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1504 if (cfork->if_bytes == 0)
1505 xfs_inode_clear_cowblocks_tag(ip);
1506}
1507
1509/* 1508/*
1510 * Free up the underlying blocks past new_size. The new size must be smaller 1509 * Free up the underlying blocks past new_size. The new size must be smaller
1511 * than the current size. This routine can be used both for the attribute and 1510 * than the current size. This routine can be used both for the attribute and
@@ -1602,15 +1601,7 @@ xfs_itruncate_extents(
1602 if (error) 1601 if (error)
1603 goto out; 1602 goto out;
1604 1603
1605 /* 1604 xfs_itruncate_clear_reflink_flags(ip);
1606 * Clear the reflink flag if there are no data fork blocks and
1607 * there are no extents staged in the cow fork.
1608 */
1609 if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
1610 if (ip->i_d.di_nblocks == 0)
1611 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1612 xfs_inode_clear_cowblocks_tag(ip);
1613 }
1614 1605
1615 /* 1606 /*
1616 * Always re-log the inode so that our permanent transaction can keep 1607 * Always re-log the inode so that our permanent transaction can keep
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index cc13c3763721..d383e392ec9d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
232 * log recovery to replay a bmap operation on the inode. 232 * log recovery to replay a bmap operation on the inode.
233 */ 233 */
234#define XFS_IRECOVERY (1 << 11) 234#define XFS_IRECOVERY (1 << 11)
235#define XFS_ICOWBLOCKS (1 << 12)/* has the cowblocks tag set */
235 236
236/* 237/*
237 * Per-lifetime flags need to be reset when re-using a reclaimable inode during 238 * Per-lifetime flags need to be reset when re-using a reclaimable inode during
@@ -428,7 +429,7 @@ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
428xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip); 429xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
429 430
430int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t, 431int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
431 xfs_nlink_t, dev_t, prid_t, int, 432 xfs_nlink_t, dev_t, prid_t,
432 struct xfs_inode **, int *); 433 struct xfs_inode **, int *);
433 434
434/* from xfs_file.c */ 435/* from xfs_file.c */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 33eb4fb2e3fd..66e1edbfb2b2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
1006 } 1006 }
1007 1007
1008 ASSERT(offset <= mp->m_super->s_maxbytes); 1008 ASSERT(offset <= mp->m_super->s_maxbytes);
1009 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 1009 if (offset > mp->m_super->s_maxbytes - length)
1010 length = mp->m_super->s_maxbytes - offset; 1010 length = mp->m_super->s_maxbytes - offset;
1011 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1011 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1012 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1012 end_fsb = XFS_B_TO_FSB(mp, offset + length);
@@ -1213,7 +1213,7 @@ xfs_xattr_iomap_begin(
1213 1213
1214 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1214 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1215 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1215 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1216 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); 1216 &nimaps, XFS_BMAPI_ATTRFORK);
1217out_unlock: 1217out_unlock:
1218 xfs_iunlock(ip, lockmode); 1218 xfs_iunlock(ip, lockmode);
1219 1219
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 010a13a201aa..b897b11afb2c 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -48,7 +48,7 @@
48STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 48STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
49STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 49STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
50 50
51 51STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
52STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 52STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
53/* 53/*
54 * We use the batch lookup interface to iterate over the dquots as it 54 * We use the batch lookup interface to iterate over the dquots as it
@@ -695,9 +695,17 @@ xfs_qm_init_quotainfo(
695 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 695 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
696 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 696 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
697 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 697 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
698 register_shrinker(&qinf->qi_shrinker); 698
699 error = register_shrinker(&qinf->qi_shrinker);
700 if (error)
701 goto out_free_inos;
702
699 return 0; 703 return 0;
700 704
705out_free_inos:
706 mutex_destroy(&qinf->qi_quotaofflock);
707 mutex_destroy(&qinf->qi_tree_lock);
708 xfs_qm_destroy_quotainos(qinf);
701out_free_lru: 709out_free_lru:
702 list_lru_destroy(&qinf->qi_lru); 710 list_lru_destroy(&qinf->qi_lru);
703out_free_qinf: 711out_free_qinf:
@@ -706,7 +714,6 @@ out_free_qinf:
706 return error; 714 return error;
707} 715}
708 716
709
710/* 717/*
711 * Gets called when unmounting a filesystem or when all quotas get 718 * Gets called when unmounting a filesystem or when all quotas get
712 * turned off. 719 * turned off.
@@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo(
723 730
724 unregister_shrinker(&qi->qi_shrinker); 731 unregister_shrinker(&qi->qi_shrinker);
725 list_lru_destroy(&qi->qi_lru); 732 list_lru_destroy(&qi->qi_lru);
726 733 xfs_qm_destroy_quotainos(qi);
727 if (qi->qi_uquotaip) { 734 mutex_destroy(&qi->qi_tree_lock);
728 IRELE(qi->qi_uquotaip);
729 qi->qi_uquotaip = NULL; /* paranoia */
730 }
731 if (qi->qi_gquotaip) {
732 IRELE(qi->qi_gquotaip);
733 qi->qi_gquotaip = NULL;
734 }
735 if (qi->qi_pquotaip) {
736 IRELE(qi->qi_pquotaip);
737 qi->qi_pquotaip = NULL;
738 }
739 mutex_destroy(&qi->qi_quotaofflock); 735 mutex_destroy(&qi->qi_quotaofflock);
740 kmem_free(qi); 736 kmem_free(qi);
741 mp->m_quotainfo = NULL; 737 mp->m_quotainfo = NULL;
@@ -793,8 +789,8 @@ xfs_qm_qino_alloc(
793 return error; 789 return error;
794 790
795 if (need_alloc) { 791 if (need_alloc) {
796 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 792 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
797 &committed); 793 &committed);
798 if (error) { 794 if (error) {
799 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
800 return error; 796 return error;
@@ -1600,6 +1596,24 @@ error_rele:
1600} 1596}
1601 1597
1602STATIC void 1598STATIC void
1599xfs_qm_destroy_quotainos(
1600 xfs_quotainfo_t *qi)
1601{
1602 if (qi->qi_uquotaip) {
1603 IRELE(qi->qi_uquotaip);
1604 qi->qi_uquotaip = NULL; /* paranoia */
1605 }
1606 if (qi->qi_gquotaip) {
1607 IRELE(qi->qi_gquotaip);
1608 qi->qi_gquotaip = NULL;
1609 }
1610 if (qi->qi_pquotaip) {
1611 IRELE(qi->qi_pquotaip);
1612 qi->qi_pquotaip = NULL;
1613 }
1614}
1615
1616STATIC void
1603xfs_qm_dqfree_one( 1617xfs_qm_dqfree_one(
1604 struct xfs_dquot *dqp) 1618 struct xfs_dquot *dqp)
1605{ 1619{
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index cc041a29eb70..47aea2e82c26 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -49,8 +49,6 @@
49#include "xfs_alloc.h" 49#include "xfs_alloc.h"
50#include "xfs_quota_defs.h" 50#include "xfs_quota_defs.h"
51#include "xfs_quota.h" 51#include "xfs_quota.h"
52#include "xfs_btree.h"
53#include "xfs_bmap_btree.h"
54#include "xfs_reflink.h" 52#include "xfs_reflink.h"
55#include "xfs_iomap.h" 53#include "xfs_iomap.h"
56#include "xfs_rmap_btree.h" 54#include "xfs_rmap_btree.h"
@@ -456,6 +454,8 @@ retry:
456 if (error) 454 if (error)
457 goto out_bmap_cancel; 455 goto out_bmap_cancel;
458 456
457 xfs_inode_set_cowblocks_tag(ip);
458
459 /* Finish up. */ 459 /* Finish up. */
460 error = xfs_defer_finish(&tp, &dfops); 460 error = xfs_defer_finish(&tp, &dfops);
461 if (error) 461 if (error)
@@ -492,8 +492,9 @@ xfs_reflink_find_cow_mapping(
492 struct xfs_iext_cursor icur; 492 struct xfs_iext_cursor icur;
493 493
494 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)); 494 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
495 ASSERT(xfs_is_reflink_inode(ip));
496 495
496 if (!xfs_is_reflink_inode(ip))
497 return false;
497 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 498 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
498 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got)) 499 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
499 return false; 500 return false;
@@ -612,6 +613,9 @@ xfs_reflink_cancel_cow_blocks(
612 613
613 /* Remove the mapping from the CoW fork. */ 614 /* Remove the mapping from the CoW fork. */
614 xfs_bmap_del_extent_cow(ip, &icur, &got, &del); 615 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
616 } else {
617 /* Didn't do anything, push cursor back. */
618 xfs_iext_prev(ifp, &icur);
615 } 619 }
616next_extent: 620next_extent:
617 if (!xfs_iext_get_extent(ifp, &icur, &got)) 621 if (!xfs_iext_get_extent(ifp, &icur, &got))
@@ -727,7 +731,7 @@ xfs_reflink_end_cow(
727 (unsigned int)(end_fsb - offset_fsb), 731 (unsigned int)(end_fsb - offset_fsb),
728 XFS_DATA_FORK); 732 XFS_DATA_FORK);
729 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write, 733 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
730 resblks, 0, 0, &tp); 734 resblks, 0, XFS_TRANS_RESERVE, &tp);
731 if (error) 735 if (error)
732 goto out; 736 goto out;
733 737
@@ -1293,6 +1297,17 @@ xfs_reflink_remap_range(
1293 1297
1294 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); 1298 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1295 1299
1300 /*
1301 * Clear out post-eof preallocations because we don't have page cache
1302 * backing the delayed allocations and they'll never get freed on
1303 * their own.
1304 */
1305 if (xfs_can_free_eofblocks(dest, true)) {
1306 ret = xfs_free_eofblocks(dest);
1307 if (ret)
1308 goto out_unlock;
1309 }
1310
1296 /* Set flags and remap blocks. */ 1311 /* Set flags and remap blocks. */
1297 ret = xfs_reflink_set_inode_flag(src, dest); 1312 ret = xfs_reflink_set_inode_flag(src, dest);
1298 if (ret) 1313 if (ret)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 5122d3021117..1dacccc367f8 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1360,6 +1360,7 @@ xfs_fs_remount(
1360 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1360 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1361 return error; 1361 return error;
1362 } 1362 }
1363 xfs_queue_cowblocks(mp);
1363 1364
1364 /* Create the per-AG metadata reservation pool .*/ 1365 /* Create the per-AG metadata reservation pool .*/
1365 error = xfs_fs_reserve_ag_blocks(mp); 1366 error = xfs_fs_reserve_ag_blocks(mp);
@@ -1369,6 +1370,14 @@ xfs_fs_remount(
1369 1370
1370 /* rw -> ro */ 1371 /* rw -> ro */
1371 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) { 1372 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1373 /* Get rid of any leftover CoW reservations... */
1374 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1375 error = xfs_icache_free_cowblocks(mp, NULL);
1376 if (error) {
1377 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1378 return error;
1379 }
1380
1372 /* Free the per-AG metadata reservation pool. */ 1381 /* Free the per-AG metadata reservation pool. */
1373 error = xfs_fs_unreserve_ag_blocks(mp); 1382 error = xfs_fs_unreserve_ag_blocks(mp);
1374 if (error) { 1383 if (error) {
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 68d3ca2c4968..2e9e793a8f9d 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -232,11 +232,6 @@ xfs_symlink(
232 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); 232 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
233 233
234 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp); 234 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
235 if (error == -ENOSPC && fs_blocks == 0) {
236 resblks = 0;
237 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
238 &tp);
239 }
240 if (error) 235 if (error)
241 goto out_release_inode; 236 goto out_release_inode;
242 237
@@ -260,14 +255,6 @@ xfs_symlink(
260 goto out_trans_cancel; 255 goto out_trans_cancel;
261 256
262 /* 257 /*
263 * Check for ability to enter directory entry, if no space reserved.
264 */
265 if (!resblks) {
266 error = xfs_dir_canenter(tp, dp, link_name);
267 if (error)
268 goto out_trans_cancel;
269 }
270 /*
271 * Initialize the bmap freelist prior to calling either 258 * Initialize the bmap freelist prior to calling either
272 * bmapi or the directory create code. 259 * bmapi or the directory create code.
273 */ 260 */
@@ -277,7 +264,7 @@ xfs_symlink(
277 * Allocate an inode for the symlink. 264 * Allocate an inode for the symlink.
278 */ 265 */
279 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, 266 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
280 prid, resblks > 0, &ip, NULL); 267 prid, &ip, NULL);
281 if (error) 268 if (error)
282 goto out_trans_cancel; 269 goto out_trans_cancel;
283 270
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 5d95fe348294..35f3546b6af5 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -24,7 +24,6 @@
24#include "xfs_mount.h" 24#include "xfs_mount.h"
25#include "xfs_defer.h" 25#include "xfs_defer.h"
26#include "xfs_da_format.h" 26#include "xfs_da_format.h"
27#include "xfs_defer.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
29#include "xfs_btree.h" 28#include "xfs_btree.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index ea189d88a3cc..8ac4e68a12f0 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -7,9 +7,10 @@
7#ifndef _ASM_GENERIC_MM_HOOKS_H 7#ifndef _ASM_GENERIC_MM_HOOKS_H
8#define _ASM_GENERIC_MM_HOOKS_H 8#define _ASM_GENERIC_MM_HOOKS_H
9 9
10static inline void arch_dup_mmap(struct mm_struct *oldmm, 10static inline int arch_dup_mmap(struct mm_struct *oldmm,
11 struct mm_struct *mm) 11 struct mm_struct *mm)
12{ 12{
13 return 0;
13} 14}
14 15
15static inline void arch_exit_mmap(struct mm_struct *mm) 16static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b234d54f2cb6..868e68561f91 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1025struct file; 1025struct file;
1026int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 1026int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1027 unsigned long size, pgprot_t *vma_prot); 1027 unsigned long size, pgprot_t *vma_prot);
1028
1029#ifndef CONFIG_X86_ESPFIX64
1030static inline void init_espfix_bsp(void) { }
1031#endif
1032
1028#endif /* !__ASSEMBLY__ */ 1033#endif /* !__ASSEMBLY__ */
1029 1034
1030#ifndef io_remap_pfn_range 1035#ifndef io_remap_pfn_range
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 38d9c5861ed8..f38227a78eae 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -18,6 +18,7 @@
18#include <linux/if_alg.h> 18#include <linux/if_alg.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/atomic.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
23#include <crypto/aead.h> 24#include <crypto/aead.h>
@@ -150,7 +151,7 @@ struct af_alg_ctx {
150 struct crypto_wait wait; 151 struct crypto_wait wait;
151 152
152 size_t used; 153 size_t used;
153 size_t rcvused; 154 atomic_t rcvused;
154 155
155 bool more; 156 bool more;
156 bool merge; 157 bool merge;
@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
215 struct af_alg_ctx *ctx = ask->private; 216 struct af_alg_ctx *ctx = ask->private;
216 217
217 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 218 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
218 ctx->rcvused, 0); 219 atomic_read(&ctx->rcvused), 0);
219} 220}
220 221
221/** 222/**
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f0b44c16e88f..c2bae8da642c 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
82 struct ahash_instance *inst); 82 struct ahash_instance *inst);
83void ahash_free_instance(struct crypto_instance *inst); 83void ahash_free_instance(struct crypto_instance *inst);
84 84
85int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
86 unsigned int keylen);
87
88static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
89{
90 return alg->setkey != shash_no_setkey;
91}
92
85int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 93int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
86 struct hash_alg_common *alg, 94 struct hash_alg_common *alg,
87 struct crypto_instance *inst); 95 struct crypto_instance *inst);
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index cceafa01f907..b67404fc4b34 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
27 27
28struct mcryptd_cpu_queue { 28struct mcryptd_cpu_queue {
29 struct crypto_queue queue; 29 struct crypto_queue queue;
30 spinlock_t q_lock;
30 struct work_struct work; 31 struct work_struct work;
31}; 32};
32 33
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index df9807a3caae..5971577016a2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -24,6 +24,7 @@
24#define __DRM_CONNECTOR_H__ 24#define __DRM_CONNECTOR_H__
25 25
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/llist.h>
27#include <linux/ctype.h> 28#include <linux/ctype.h>
28#include <linux/hdmi.h> 29#include <linux/hdmi.h>
29#include <drm/drm_mode_object.h> 30#include <drm/drm_mode_object.h>
@@ -916,6 +917,15 @@ struct drm_connector {
916 uint8_t num_h_tile, num_v_tile; 917 uint8_t num_h_tile, num_v_tile;
917 uint8_t tile_h_loc, tile_v_loc; 918 uint8_t tile_h_loc, tile_v_loc;
918 uint16_t tile_h_size, tile_v_size; 919 uint16_t tile_h_size, tile_v_size;
920
921 /**
922 * @free_node:
923 *
924 * List used only by &drm_connector_iter to be able to clean up a
925 * connector from any context, in conjunction with
926 * &drm_mode_config.connector_free_work.
927 */
928 struct llist_node free_node;
919}; 929};
920 930
921#define obj_to_connector(x) container_of(x, struct drm_connector, base) 931#define obj_to_connector(x) container_of(x, struct drm_connector, base)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 2ec41d032e56..efe6d5a8e834 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, 465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
466 struct i2c_adapter *adapter); 466 struct i2c_adapter *adapter);
467struct edid *drm_edid_duplicate(const struct edid *edid); 467struct edid *drm_edid_duplicate(const struct edid *edid);
468void drm_reset_display_info(struct drm_connector *connector);
469u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
468int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 470int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
469 471
470u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 472u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index b21e827c5c78..b0ce26d71296 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/llist.h>
30 31
31#include <drm/drm_modeset_lock.h> 32#include <drm/drm_modeset_lock.h>
32 33
@@ -393,7 +394,7 @@ struct drm_mode_config {
393 394
394 /** 395 /**
395 * @connector_list_lock: Protects @num_connector and 396 * @connector_list_lock: Protects @num_connector and
396 * @connector_list. 397 * @connector_list and @connector_free_list.
397 */ 398 */
398 spinlock_t connector_list_lock; 399 spinlock_t connector_list_lock;
399 /** 400 /**
@@ -414,6 +415,21 @@ struct drm_mode_config {
414 */ 415 */
415 struct list_head connector_list; 416 struct list_head connector_list;
416 /** 417 /**
418 * @connector_free_list:
419 *
420 * List of connector objects linked with &drm_connector.free_head.
421 * Protected by @connector_list_lock. Used by
422 * drm_for_each_connector_iter() and
423 * &struct drm_connector_list_iter to savely free connectors using
424 * @connector_free_work.
425 */
426 struct llist_head connector_free_list;
427 /**
428 * @connector_free_work: Work to clean up @connector_free_list.
429 */
430 struct work_struct connector_free_work;
431
432 /**
417 * @num_encoder: 433 * @num_encoder:
418 * 434 *
419 * Number of encoders on this device. This is invariant over the 435 * Number of encoders on this device. This is invariant over the
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 01ee473517e2..9da6ce22803f 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
62 bool enabled; 62 bool enabled;
63}; 63};
64 64
65int kvm_timer_hyp_init(void); 65int kvm_timer_hyp_init(bool);
66int kvm_timer_enable(struct kvm_vcpu *vcpu); 66int kvm_timer_enable(struct kvm_vcpu *vcpu);
67int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); 67int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
68void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); 68void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) 93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) 94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
95 95
96void enable_el1_phys_timer_access(void);
97void disable_el1_phys_timer_access(void);
98
99#endif 96#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 82f0c8fd7be8..23d29b39f71e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
492 492
493#define bio_set_dev(bio, bdev) \ 493#define bio_set_dev(bio, bdev) \
494do { \ 494do { \
495 if ((bio)->bi_disk != (bdev)->bd_disk) \
496 bio_clear_flag(bio, BIO_THROTTLED);\
495 (bio)->bi_disk = (bdev)->bd_disk; \ 497 (bio)->bi_disk = (bdev)->bd_disk; \
496 (bio)->bi_partno = (bdev)->bd_partno; \ 498 (bio)->bi_partno = (bdev)->bd_partno; \
497} while (0) 499} while (0)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1e628e032da..9e7d8bd776d2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -50,8 +50,6 @@ struct blk_issue_stat {
50struct bio { 50struct bio {
51 struct bio *bi_next; /* request queue link */ 51 struct bio *bi_next; /* request queue link */
52 struct gendisk *bi_disk; 52 struct gendisk *bi_disk;
53 u8 bi_partno;
54 blk_status_t bi_status;
55 unsigned int bi_opf; /* bottom bits req flags, 53 unsigned int bi_opf; /* bottom bits req flags,
56 * top bits REQ_OP. Use 54 * top bits REQ_OP. Use
57 * accessors. 55 * accessors.
@@ -59,8 +57,8 @@ struct bio {
59 unsigned short bi_flags; /* status, etc and bvec pool number */ 57 unsigned short bi_flags; /* status, etc and bvec pool number */
60 unsigned short bi_ioprio; 58 unsigned short bi_ioprio;
61 unsigned short bi_write_hint; 59 unsigned short bi_write_hint;
62 60 blk_status_t bi_status;
63 struct bvec_iter bi_iter; 61 u8 bi_partno;
64 62
65 /* Number of segments in this BIO after 63 /* Number of segments in this BIO after
66 * physical address coalescing is performed. 64 * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
74 unsigned int bi_seg_front_size; 72 unsigned int bi_seg_front_size;
75 unsigned int bi_seg_back_size; 73 unsigned int bi_seg_back_size;
76 74
77 atomic_t __bi_remaining; 75 struct bvec_iter bi_iter;
78 76
77 atomic_t __bi_remaining;
79 bio_end_io_t *bi_end_io; 78 bio_end_io_t *bi_end_io;
80 79
81 void *bi_private; 80 void *bi_private;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8089ca17db9a..0ce8a372d506 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
135struct request { 135struct request {
136 struct list_head queuelist; 136 struct list_head queuelist;
137 union { 137 union {
138 call_single_data_t csd; 138 struct __call_single_data csd;
139 u64 fifo_time; 139 u64 fifo_time;
140 }; 140 };
141 141
@@ -241,14 +241,24 @@ struct request {
241 struct request *next_rq; 241 struct request *next_rq;
242}; 242};
243 243
244static inline bool blk_op_is_scsi(unsigned int op)
245{
246 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
247}
248
249static inline bool blk_op_is_private(unsigned int op)
250{
251 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
252}
253
244static inline bool blk_rq_is_scsi(struct request *rq) 254static inline bool blk_rq_is_scsi(struct request *rq)
245{ 255{
246 return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; 256 return blk_op_is_scsi(req_op(rq));
247} 257}
248 258
249static inline bool blk_rq_is_private(struct request *rq) 259static inline bool blk_rq_is_private(struct request *rq)
250{ 260{
251 return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; 261 return blk_op_is_private(req_op(rq));
252} 262}
253 263
254static inline bool blk_rq_is_passthrough(struct request *rq) 264static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
256 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 266 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
257} 267}
258 268
269static inline bool bio_is_passthrough(struct bio *bio)
270{
271 unsigned op = bio_op(bio);
272
273 return blk_op_is_scsi(op) || blk_op_is_private(op);
274}
275
259static inline unsigned short req_get_ioprio(struct request *req) 276static inline unsigned short req_get_ioprio(struct request *req)
260{ 277{
261 return req->ioprio; 278 return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
948extern void blk_rq_unprep_clone(struct request *rq); 965extern void blk_rq_unprep_clone(struct request *rq);
949extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 966extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
950 struct request *rq); 967 struct request *rq);
951extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 968extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
952extern void blk_delay_queue(struct request_queue *, unsigned long); 969extern void blk_delay_queue(struct request_queue *, unsigned long);
953extern void blk_queue_split(struct request_queue *, struct bio **); 970extern void blk_queue_split(struct request_queue *, struct bio **);
954extern void blk_recount_segments(struct request_queue *, struct bio *); 971extern void blk_recount_segments(struct request_queue *, struct bio *);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e55e4255a210..0b25cf87b6d6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,7 +43,14 @@ struct bpf_map_ops {
43}; 43};
44 44
45struct bpf_map { 45struct bpf_map {
46 atomic_t refcnt; 46 /* 1st cacheline with read-mostly members of which some
47 * are also accessed in fast-path (e.g. ops, max_entries).
48 */
49 const struct bpf_map_ops *ops ____cacheline_aligned;
50 struct bpf_map *inner_map_meta;
51#ifdef CONFIG_SECURITY
52 void *security;
53#endif
47 enum bpf_map_type map_type; 54 enum bpf_map_type map_type;
48 u32 key_size; 55 u32 key_size;
49 u32 value_size; 56 u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
52 u32 pages; 59 u32 pages;
53 u32 id; 60 u32 id;
54 int numa_node; 61 int numa_node;
55 struct user_struct *user; 62 bool unpriv_array;
56 const struct bpf_map_ops *ops; 63 /* 7 bytes hole */
57 struct work_struct work; 64
65 /* 2nd cacheline with misc members to avoid false sharing
66 * particularly with refcounting.
67 */
68 struct user_struct *user ____cacheline_aligned;
69 atomic_t refcnt;
58 atomic_t usercnt; 70 atomic_t usercnt;
59 struct bpf_map *inner_map_meta; 71 struct work_struct work;
60 char name[BPF_OBJ_NAME_LEN]; 72 char name[BPF_OBJ_NAME_LEN];
61#ifdef CONFIG_SECURITY
62 void *security;
63#endif
64}; 73};
65 74
66/* function argument constraints */ 75/* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
221struct bpf_array { 230struct bpf_array {
222 struct bpf_map map; 231 struct bpf_map map;
223 u32 elem_size; 232 u32 elem_size;
233 u32 index_mask;
224 /* 'ownership' of prog_array is claimed by the first program that 234 /* 'ownership' of prog_array is claimed by the first program that
225 * is going to use this map or by the first program which FD is stored 235 * is going to use this map or by the first program which FD is stored
226 * in the map to make sure that all callers and callees have the same 236 * in the map to make sure that all callers and callees have the same
@@ -419,6 +429,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
419 attr->numa_node : NUMA_NO_NODE; 429 attr->numa_node : NUMA_NO_NODE;
420} 430}
421 431
432struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
433
422#else /* !CONFIG_BPF_SYSCALL */ 434#else /* !CONFIG_BPF_SYSCALL */
423static inline struct bpf_prog *bpf_prog_get(u32 ufd) 435static inline struct bpf_prog *bpf_prog_get(u32 ufd)
424{ 436{
@@ -506,6 +518,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
506{ 518{
507 return 0; 519 return 0;
508} 520}
521
522static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
523 enum bpf_prog_type type)
524{
525 return ERR_PTR(-EOPNOTSUPP);
526}
509#endif /* CONFIG_BPF_SYSCALL */ 527#endif /* CONFIG_BPF_SYSCALL */
510 528
511static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 529static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,6 +532,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
514 return bpf_prog_get_type_dev(ufd, type, false); 532 return bpf_prog_get_type_dev(ufd, type, false);
515} 533}
516 534
535bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
536
517int bpf_prog_offload_compile(struct bpf_prog *prog); 537int bpf_prog_offload_compile(struct bpf_prog *prog);
518void bpf_prog_offload_destroy(struct bpf_prog *prog); 538void bpf_prog_offload_destroy(struct bpf_prog *prog);
519 539
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c561b986bab0..1632bb13ad8a 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
15 * In practice this is far bigger than any realistic pointer offset; this limit 15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
17 */ 17 */
18#define BPF_MAX_VAR_OFF (1ULL << 31) 18#define BPF_MAX_VAR_OFF (1 << 29)
19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow. 20 * that converting umax_value to int cannot overflow.
21 */ 21 */
22#define BPF_MAX_VAR_SIZ INT_MAX 22#define BPF_MAX_VAR_SIZ (1 << 29)
23 23
24/* Liveness marks, used for registers and spilled-regs (in stack slots). 24/* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that 25 * Read marks propagate upwards until they find a write mark; they record that
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 2272ded07496..631354acfa72 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -219,7 +219,7 @@
219/* Mark a function definition as prohibited from being cloned. */ 219/* Mark a function definition as prohibited from being cloned. */
220#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) 220#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
221 221
222#ifdef RANDSTRUCT_PLUGIN 222#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
223#define __randomize_layout __attribute__((randomize_layout)) 223#define __randomize_layout __attribute__((randomize_layout))
224#define __no_randomize_layout __attribute__((no_randomize_layout)) 224#define __no_randomize_layout __attribute__((no_randomize_layout))
225#endif 225#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188ed9f65517..52e611ab9a6c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
220/* 220/*
221 * Prevent the compiler from merging or refetching reads or writes. The 221 * Prevent the compiler from merging or refetching reads or writes. The
222 * compiler is also forbidden from reordering successive instances of 222 * compiler is also forbidden from reordering successive instances of
223 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 223 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
224 * compiler is aware of some particular ordering. One way to make the 224 * particular ordering. One way to make the compiler aware of ordering is to
225 * compiler aware of ordering is to put the two invocations of READ_ONCE, 225 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
226 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 226 * statements.
227 * 227 *
228 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 228 * These two macros will also work on aggregate data types like structs or
229 * data types like structs or unions. If the size of the accessed data 229 * unions. If the size of the accessed data type exceeds the word size of
230 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 230 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
231 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 231 * fall back to memcpy(). There's at least two memcpy()s: one for the
232 * least two memcpy()s: one for the __builtin_memcpy() and then one for 232 * __builtin_memcpy() and then one for the macro doing the copy of variable
233 * the macro doing the copy of variable - '__u' allocated on the stack. 233 * - '__u' allocated on the stack.
234 * 234 *
235 * Their two major use cases are: (1) Mediating communication between 235 * Their two major use cases are: (1) Mediating communication between
236 * process-level code and irq/NMI handlers, all running on the same CPU, 236 * process-level code and irq/NMI handlers, all running on the same CPU,
237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
238 * mutilate accesses that either do not require ordering or that interact 238 * mutilate accesses that either do not require ordering or that interact
239 * with an explicit memory barrier or atomic instruction that provides the 239 * with an explicit memory barrier or atomic instruction that provides the
240 * required ordering. 240 * required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
327 compiletime_assert(__native_word(t), \ 327 compiletime_assert(__native_word(t), \
328 "Need native word sized stores/loads for atomicity.") 328 "Need native word sized stores/loads for atomicity.")
329 329
330/*
331 * Prevent the compiler from merging or refetching accesses. The compiler
332 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
333 * but only when the compiler is aware of some particular ordering. One way
334 * to make the compiler aware of ordering is to put the two invocations of
335 * ACCESS_ONCE() in different C statements.
336 *
337 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
338 * on a union member will work as long as the size of the member matches the
339 * size of the union and the size is smaller than word size.
340 *
341 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
342 * between process-level code and irq/NMI handlers, all running on the same CPU,
343 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
344 * mutilate accesses that either do not require ordering or that interact
345 * with an explicit memory barrier or atomic instruction that provides the
346 * required ordering.
347 *
348 * If possible use READ_ONCE()/WRITE_ONCE() instead.
349 */
350#define __ACCESS_ONCE(x) ({ \
351 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
352 (volatile typeof(x) *)&(x); })
353#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
354
355#endif /* __LINUX_COMPILER_H */ 330#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 0662a417febe..519e94915d18 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,9 +10,6 @@
10 */ 10 */
11 11
12#include <linux/wait.h> 12#include <linux/wait.h>
13#ifdef CONFIG_LOCKDEP_COMPLETIONS
14#include <linux/lockdep.h>
15#endif
16 13
17/* 14/*
18 * struct completion - structure used to maintain state for a "completion" 15 * struct completion - structure used to maintain state for a "completion"
@@ -29,58 +26,15 @@
29struct completion { 26struct completion {
30 unsigned int done; 27 unsigned int done;
31 wait_queue_head_t wait; 28 wait_queue_head_t wait;
32#ifdef CONFIG_LOCKDEP_COMPLETIONS
33 struct lockdep_map_cross map;
34#endif
35}; 29};
36 30
37#ifdef CONFIG_LOCKDEP_COMPLETIONS
38static inline void complete_acquire(struct completion *x)
39{
40 lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
41}
42
43static inline void complete_release(struct completion *x)
44{
45 lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
46}
47
48static inline void complete_release_commit(struct completion *x)
49{
50 lock_commit_crosslock((struct lockdep_map *)&x->map);
51}
52
53#define init_completion_map(x, m) \
54do { \
55 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
56 (m)->name, (m)->key, 0); \
57 __init_completion(x); \
58} while (0)
59
60#define init_completion(x) \
61do { \
62 static struct lock_class_key __key; \
63 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
64 "(completion)" #x, \
65 &__key, 0); \
66 __init_completion(x); \
67} while (0)
68#else
69#define init_completion_map(x, m) __init_completion(x) 31#define init_completion_map(x, m) __init_completion(x)
70#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
71static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
72static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
73static inline void complete_release_commit(struct completion *x) {}
74#endif
75 35
76#ifdef CONFIG_LOCKDEP_COMPLETIONS
77#define COMPLETION_INITIALIZER(work) \
78 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
79 STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
80#else
81#define COMPLETION_INITIALIZER(work) \ 36#define COMPLETION_INITIALIZER(work) \
82 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 37 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
83#endif
84 38
85#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 39#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
86 (*({ init_completion_map(&(work), &(map)); &(work); })) 40 (*({ init_completion_map(&(work), &(map)); &(work); }))
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a04ef7c15c6a..7b01bc11c692 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
47extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 47extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
49 49
50extern ssize_t cpu_show_meltdown(struct device *dev,
51 struct device_attribute *attr, char *buf);
52extern ssize_t cpu_show_spectre_v1(struct device *dev,
53 struct device_attribute *attr, char *buf);
54extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf);
56
50extern __printf(4, 5) 57extern __printf(4, 5)
51struct device *cpu_device_create(struct device *parent, void *drvdata, 58struct device *cpu_device_create(struct device *parent, void *drvdata,
52 const struct attribute_group **groups, 59 const struct attribute_group **groups,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 201ab7267986..1a32e558eb11 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -86,7 +86,7 @@ enum cpuhp_state {
86 CPUHP_MM_ZSWP_POOL_PREPARE, 86 CPUHP_MM_ZSWP_POOL_PREPARE,
87 CPUHP_KVM_PPC_BOOK3S_PREPARE, 87 CPUHP_KVM_PPC_BOOK3S_PREPARE,
88 CPUHP_ZCOMP_PREPARE, 88 CPUHP_ZCOMP_PREPARE,
89 CPUHP_TIMERS_DEAD, 89 CPUHP_TIMERS_PREPARE,
90 CPUHP_MIPS_SOC_PREPARE, 90 CPUHP_MIPS_SOC_PREPARE,
91 CPUHP_BP_PREPARE_DYN, 91 CPUHP_BP_PREPARE_DYN,
92 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, 92 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 06097ef30449..b511f6d24b42 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value) 42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
43#define VMCOREINFO_SYMBOL(name) \ 43#define VMCOREINFO_SYMBOL(name) \
44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) 44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
45#define VMCOREINFO_SYMBOL_ARRAY(name) \
46 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
45#define VMCOREINFO_SIZE(name) \ 47#define VMCOREINFO_SIZE(name) \
46 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ 48 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
47 (unsigned long)sizeof(name)) 49 (unsigned long)sizeof(name))
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e1178b..631286535d0f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
83extern void set_groups(struct cred *, struct group_info *); 83extern void set_groups(struct cred *, struct group_info *);
84extern int groups_search(const struct group_info *, kgid_t); 84extern int groups_search(const struct group_info *, kgid_t);
85extern bool may_setgroups(void); 85extern bool may_setgroups(void);
86extern void groups_sort(struct group_info *);
86 87
87/* 88/*
88 * The security context of a task 89 * The security context of a task
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index f36ecc2a5712..3b0ba54cc4d5 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)
216static inline void debugfs_remove_recursive(struct dentry *dentry) 216static inline void debugfs_remove_recursive(struct dentry *dentry)
217{ } 217{ }
218 218
219const struct file_operations *debugfs_real_fops(const struct file *filp);
220
219static inline int debugfs_file_get(struct dentry *dentry) 221static inline int debugfs_file_get(struct dentry *dentry)
220{ 222{
221 return 0; 223 return 0;
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 4178d2493547..5e335b6203f4 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -71,7 +71,7 @@ extern void delayacct_init(void);
71extern void __delayacct_tsk_init(struct task_struct *); 71extern void __delayacct_tsk_init(struct task_struct *);
72extern void __delayacct_tsk_exit(struct task_struct *); 72extern void __delayacct_tsk_exit(struct task_struct *);
73extern void __delayacct_blkio_start(void); 73extern void __delayacct_blkio_start(void);
74extern void __delayacct_blkio_end(void); 74extern void __delayacct_blkio_end(struct task_struct *);
75extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); 75extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
76extern __u64 __delayacct_blkio_ticks(struct task_struct *); 76extern __u64 __delayacct_blkio_ticks(struct task_struct *);
77extern void __delayacct_freepages_start(void); 77extern void __delayacct_freepages_start(void);
@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
122 __delayacct_blkio_start(); 122 __delayacct_blkio_start();
123} 123}
124 124
125static inline void delayacct_blkio_end(void) 125static inline void delayacct_blkio_end(struct task_struct *p)
126{ 126{
127 if (current->delays) 127 if (current->delays)
128 __delayacct_blkio_end(); 128 __delayacct_blkio_end(p);
129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO);
130} 130}
131 131
@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
169{} 169{}
170static inline void delayacct_blkio_start(void) 170static inline void delayacct_blkio_start(void)
171{} 171{}
172static inline void delayacct_blkio_end(void) 172static inline void delayacct_blkio_end(struct task_struct *p)
173{} 173{}
174static inline int delayacct_add_tsk(struct taskstats *d, 174static inline int delayacct_add_tsk(struct taskstats *d,
175 struct task_struct *tsk) 175 struct task_struct *tsk)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e8f8e8fb244d..81ed9b2d84dc 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
704 return ret; 704 return ret;
705} 705}
706 706
707#ifdef CONFIG_HAS_DMA
708static inline int dma_get_cache_alignment(void) 707static inline int dma_get_cache_alignment(void)
709{ 708{
710#ifdef ARCH_DMA_MINALIGN 709#ifdef ARCH_DMA_MINALIGN
@@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
712#endif 711#endif
713 return 1; 712 return 1;
714} 713}
715#endif
716 714
717/* flags for the coherent memory api */ 715/* flags for the coherent memory api */
718#define DMA_MEMORY_EXCLUSIVE 0x01 716#define DMA_MEMORY_EXCLUSIVE 0x01
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d813f7b04da7..29fdf8029cf6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -140,11 +140,13 @@ struct efi_boot_memmap {
140 140
141struct capsule_info { 141struct capsule_info {
142 efi_capsule_header_t header; 142 efi_capsule_header_t header;
143 efi_capsule_header_t *capsule;
143 int reset_type; 144 int reset_type;
144 long index; 145 long index;
145 size_t count; 146 size_t count;
146 size_t total_size; 147 size_t total_size;
147 phys_addr_t *pages; 148 struct page **pages;
149 phys_addr_t *phys;
148 size_t page_bytes_remain; 150 size_t page_bytes_remain;
149}; 151};
150 152
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index f4ff47d4a893..fe0c349684fa 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
755{ 755{
756 if (fscache_cookie_valid(cookie) && PageFsCache(page)) 756 if (fscache_cookie_valid(cookie) && PageFsCache(page))
757 return __fscache_maybe_release_page(cookie, page, gfp); 757 return __fscache_maybe_release_page(cookie, page, gfp);
758 return false; 758 return true;
759} 759}
760 760
761/** 761/**
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2bab81951ced..3319df9727aa 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
332 332
333extern int ftrace_nr_registered_ops(void); 333extern int ftrace_nr_registered_ops(void);
334 334
335struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
336
335bool is_ftrace_trampoline(unsigned long addr); 337bool is_ftrace_trampoline(unsigned long addr);
336 338
337/* 339/*
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 55e672592fa9..7258cd676df4 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -66,9 +66,10 @@ struct gpio_irq_chip {
66 /** 66 /**
67 * @lock_key: 67 * @lock_key:
68 * 68 *
69 * Per GPIO IRQ chip lockdep class. 69 * Per GPIO IRQ chip lockdep classes.
70 */ 70 */
71 struct lock_class_key *lock_key; 71 struct lock_class_key *lock_key;
72 struct lock_class_key *request_key;
72 73
73 /** 74 /**
74 * @parent_handler: 75 * @parent_handler:
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
323 324
324/* add/remove chips */ 325/* add/remove chips */
325extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 326extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
326 struct lock_class_key *lock_key); 327 struct lock_class_key *lock_key,
328 struct lock_class_key *request_key);
327 329
328/** 330/**
329 * gpiochip_add_data() - register a gpio_chip 331 * gpiochip_add_data() - register a gpio_chip
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
350 */ 352 */
351#ifdef CONFIG_LOCKDEP 353#ifdef CONFIG_LOCKDEP
352#define gpiochip_add_data(chip, data) ({ \ 354#define gpiochip_add_data(chip, data) ({ \
353 static struct lock_class_key key; \ 355 static struct lock_class_key lock_key; \
354 gpiochip_add_data_with_key(chip, data, &key); \ 356 static struct lock_class_key request_key; \
357 gpiochip_add_data_with_key(chip, data, &lock_key, \
358 &request_key); \
355 }) 359 })
356#else 360#else
357#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) 361#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
358#endif 362#endif
359 363
360static inline int gpiochip_add(struct gpio_chip *chip) 364static inline int gpiochip_add(struct gpio_chip *chip)
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
429 irq_flow_handler_t handler, 433 irq_flow_handler_t handler,
430 unsigned int type, 434 unsigned int type,
431 bool threaded, 435 bool threaded,
432 struct lock_class_key *lock_key); 436 struct lock_class_key *lock_key,
437 struct lock_class_key *request_key);
433 438
434#ifdef CONFIG_LOCKDEP 439#ifdef CONFIG_LOCKDEP
435 440
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
445 irq_flow_handler_t handler, 450 irq_flow_handler_t handler,
446 unsigned int type) 451 unsigned int type)
447{ 452{
448 static struct lock_class_key key; 453 static struct lock_class_key lock_key;
454 static struct lock_class_key request_key;
449 455
450 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 456 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
451 handler, type, false, &key); 457 handler, type, false,
458 &lock_key, &request_key);
452} 459}
453 460
454static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 461static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
458 unsigned int type) 465 unsigned int type)
459{ 466{
460 467
461 static struct lock_class_key key; 468 static struct lock_class_key lock_key;
469 static struct lock_class_key request_key;
462 470
463 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 471 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
464 handler, type, true, &key); 472 handler, type, true,
473 &lock_key, &request_key);
465} 474}
466#else 475#else
467static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, 476static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
471 unsigned int type) 480 unsigned int type)
472{ 481{
473 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 482 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
474 handler, type, false, NULL); 483 handler, type, false, NULL, NULL);
475} 484}
476 485
477static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 486static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
481 unsigned int type) 490 unsigned int type)
482{ 491{
483 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 492 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
484 handler, type, true, NULL); 493 handler, type, true, NULL, NULL);
485} 494}
486#endif /* CONFIG_LOCKDEP */ 495#endif /* CONFIG_LOCKDEP */
487 496
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f3e97c5f94c9..6c9336626592 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -708,6 +708,7 @@ struct vmbus_channel {
708 u8 monitor_bit; 708 u8 monitor_bit;
709 709
710 bool rescind; /* got rescind msg */ 710 bool rescind; /* got rescind msg */
711 struct completion rescind_event;
711 712
712 u32 ringbuffer_gpadlhandle; 713 u32 ringbuffer_gpadlhandle;
713 714
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 7c3a365f7e12..fa14f834e4ed 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
18 19
19struct idr { 20struct idr {
20 struct radix_tree_root idr_rt; 21 struct radix_tree_root idr_rt;
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
index 34d59bfdce2d..464458d20b16 100644
--- a/include/linux/iio/timer/stm32-lptim-trigger.h
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -16,11 +16,14 @@
16#define LPTIM2_OUT "lptim2_out" 16#define LPTIM2_OUT "lptim2_out"
17#define LPTIM3_OUT "lptim3_out" 17#define LPTIM3_OUT "lptim3_out"
18 18
19#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) 19#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
20bool is_stm32_lptim_trigger(struct iio_trigger *trig); 20bool is_stm32_lptim_trigger(struct iio_trigger *trig);
21#else 21#else
22static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig) 22static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
23{ 23{
24#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
25 pr_warn_once("stm32 lptim_trigger not linked in\n");
26#endif
24 return false; 27 return false;
25} 28}
26#endif 29#endif
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
new file mode 100644
index 000000000000..2710d72de3c9
--- /dev/null
+++ b/include/linux/intel-pti.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) Intel 2011
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * The PTI (Parallel Trace Interface) driver directs trace data routed from
16 * various parts in the system out through the Intel Penwell PTI port and
17 * out of the mobile device for analysis with a debugging tool
18 * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
19 * compact JTAG, standard.
20 *
21 * This header file will allow other parts of the OS to use the
22 * interface to write out it's contents for debugging a mobile system.
23 */
24
25#ifndef LINUX_INTEL_PTI_H_
26#define LINUX_INTEL_PTI_H_
27
28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
29#define PTI_LASTDWORD_DTS 0x30
30
31/* basic structure used as a write address to the PTI HW */
32struct pti_masterchannel {
33 u8 master;
34 u8 channel;
35};
36
37/* the following functions are defined in misc/pti.c */
38void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
39struct pti_masterchannel *pti_request_masterchannel(u8 type,
40 const char *thread_name);
41void pti_release_masterchannel(struct pti_masterchannel *mc);
42
43#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index cb18c6290ca8..8415bf1a9776 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
273 * 100: prefer care-of address 273 * 100: prefer care-of address
274 */ 274 */
275 dontfrag:1, 275 dontfrag:1,
276 autoflowlabel:1; 276 autoflowlabel:1,
277 autoflowlabel_set:1;
277 __u8 min_hopcount; 278 __u8 min_hopcount;
278 __u8 tclass; 279 __u8 tclass;
279 __be32 rcv_flowinfo; 280 __be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e140f69163b6..a0231e96a578 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -212,6 +212,7 @@ struct irq_data {
212 * mask. Applies only to affinity managed irqs. 212 * mask. Applies only to affinity managed irqs.
213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target 213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set 214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
215 * IRQD_CAN_RESERVE - Can use reservation mode
215 */ 216 */
216enum { 217enum {
217 IRQD_TRIGGER_MASK = 0xf, 218 IRQD_TRIGGER_MASK = 0xf,
@@ -233,6 +234,7 @@ enum {
233 IRQD_MANAGED_SHUTDOWN = (1 << 23), 234 IRQD_MANAGED_SHUTDOWN = (1 << 23),
234 IRQD_SINGLE_TARGET = (1 << 24), 235 IRQD_SINGLE_TARGET = (1 << 24),
235 IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 236 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
237 IRQD_CAN_RESERVE = (1 << 26),
236}; 238};
237 239
238#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) 240#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
377 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; 379 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
378} 380}
379 381
382static inline void irqd_set_can_reserve(struct irq_data *d)
383{
384 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
385}
386
387static inline void irqd_clr_can_reserve(struct irq_data *d)
388{
389 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
390}
391
392static inline bool irqd_can_reserve(struct irq_data *d)
393{
394 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
395}
396
380#undef __irqd_to_state 397#undef __irqd_to_state
381 398
382static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 399static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd418955962b..25b33b664537 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
230 data->chip = chip; 230 data->chip = chip;
231} 231}
232 232
233static inline int irq_balancing_disabled(unsigned int irq) 233static inline bool irq_balancing_disabled(unsigned int irq)
234{ 234{
235 struct irq_desc *desc; 235 struct irq_desc *desc;
236 236
@@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
238 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; 238 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
239} 239}
240 240
241static inline int irq_is_percpu(unsigned int irq) 241static inline bool irq_is_percpu(unsigned int irq)
242{ 242{
243 struct irq_desc *desc; 243 struct irq_desc *desc;
244 244
@@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)
246 return desc->status_use_accessors & IRQ_PER_CPU; 246 return desc->status_use_accessors & IRQ_PER_CPU;
247} 247}
248 248
249static inline int irq_is_percpu_devid(unsigned int irq) 249static inline bool irq_is_percpu_devid(unsigned int irq)
250{ 250{
251 struct irq_desc *desc; 251 struct irq_desc *desc;
252 252
@@ -255,12 +255,15 @@ static inline int irq_is_percpu_devid(unsigned int irq)
255} 255}
256 256
257static inline void 257static inline void
258irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) 258irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
259 struct lock_class_key *request_class)
259{ 260{
260 struct irq_desc *desc = irq_to_desc(irq); 261 struct irq_desc *desc = irq_to_desc(irq);
261 262
262 if (desc) 263 if (desc) {
263 lockdep_set_class(&desc->lock, class); 264 lockdep_set_class(&desc->lock, lock_class);
265 lockdep_set_class(&desc->request_mutex, request_class);
266 }
264} 267}
265 268
266#ifdef CONFIG_IRQ_PREFLOW_FASTEOI 269#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index a34355d19546..48c7e86bb556 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -113,7 +113,7 @@ struct irq_domain_ops {
113 unsigned int nr_irqs, void *arg); 113 unsigned int nr_irqs, void *arg);
114 void (*free)(struct irq_domain *d, unsigned int virq, 114 void (*free)(struct irq_domain *d, unsigned int virq,
115 unsigned int nr_irqs); 115 unsigned int nr_irqs);
116 int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); 116 int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
119 unsigned long *out_hwirq, unsigned int *out_type); 119 unsigned long *out_hwirq, unsigned int *out_type);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 46cb57d5eb13..1b3996ff3f16 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -27,22 +27,18 @@
27# define trace_hardirq_enter() \ 27# define trace_hardirq_enter() \
28do { \ 28do { \
29 current->hardirq_context++; \ 29 current->hardirq_context++; \
30 crossrelease_hist_start(XHLOCK_HARD); \
31} while (0) 30} while (0)
32# define trace_hardirq_exit() \ 31# define trace_hardirq_exit() \
33do { \ 32do { \
34 current->hardirq_context--; \ 33 current->hardirq_context--; \
35 crossrelease_hist_end(XHLOCK_HARD); \
36} while (0) 34} while (0)
37# define lockdep_softirq_enter() \ 35# define lockdep_softirq_enter() \
38do { \ 36do { \
39 current->softirq_context++; \ 37 current->softirq_context++; \
40 crossrelease_hist_start(XHLOCK_SOFT); \
41} while (0) 38} while (0)
42# define lockdep_softirq_exit() \ 39# define lockdep_softirq_exit() \
43do { \ 40do { \
44 current->softirq_context--; \ 41 current->softirq_context--; \
45 crossrelease_hist_end(XHLOCK_SOFT); \
46} while (0) 42} while (0)
47# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 43# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
48#else 44#else
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c7b368c734af..e0340ca08d98 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
160extern int jump_label_text_reserved(void *start, void *end); 160extern int jump_label_text_reserved(void *start, void *end);
161extern void static_key_slow_inc(struct static_key *key); 161extern void static_key_slow_inc(struct static_key *key);
162extern void static_key_slow_dec(struct static_key *key); 162extern void static_key_slow_dec(struct static_key *key);
163extern void static_key_slow_inc_cpuslocked(struct static_key *key);
164extern void static_key_slow_dec_cpuslocked(struct static_key *key);
163extern void jump_label_apply_nops(struct module *mod); 165extern void jump_label_apply_nops(struct module *mod);
164extern int static_key_count(struct static_key *key); 166extern int static_key_count(struct static_key *key);
165extern void static_key_enable(struct static_key *key); 167extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
222 atomic_dec(&key->enabled); 224 atomic_dec(&key->enabled);
223} 225}
224 226
227#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
228#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
229
225static inline int jump_label_text_reserved(void *start, void *end) 230static inline int jump_label_text_reserved(void *start, void *end)
226{ 231{
227 return 0; 232 return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
416 421
417#define static_branch_inc(x) static_key_slow_inc(&(x)->key) 422#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
418#define static_branch_dec(x) static_key_slow_dec(&(x)->key) 423#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
424#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
425#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
419 426
420/* 427/*
421 * Normal usage; boolean enable/disable. 428 * Normal usage; boolean enable/disable.
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/include/linux/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 893d6d606cd0..6bdd4b9f6611 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -232,7 +232,7 @@ struct kvm_vcpu {
232 struct mutex mutex; 232 struct mutex mutex;
233 struct kvm_run *run; 233 struct kvm_run *run;
234 234
235 int guest_fpu_loaded, guest_xcr0_loaded; 235 int guest_xcr0_loaded;
236 struct swait_queue_head wq; 236 struct swait_queue_head wq;
237 struct pid __rcu *pid; 237 struct pid __rcu *pid;
238 int sigset_active; 238 int sigset_active;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a842551fe044..3251d9c0d313 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -158,12 +158,6 @@ struct lockdep_map {
158 int cpu; 158 int cpu;
159 unsigned long ip; 159 unsigned long ip;
160#endif 160#endif
161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
162 /*
163 * Whether it's a crosslock.
164 */
165 int cross;
166#endif
167}; 161};
168 162
169static inline void lockdep_copy_map(struct lockdep_map *to, 163static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,96 +261,9 @@ struct held_lock {
267 unsigned int hardirqs_off:1; 261 unsigned int hardirqs_off:1;
268 unsigned int references:12; /* 32 bits */ 262 unsigned int references:12; /* 32 bits */
269 unsigned int pin_count; 263 unsigned int pin_count;
270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 /*
272 * Generation id.
273 *
274 * A value of cross_gen_id will be stored when holding this,
275 * which is globally increased whenever each crosslock is held.
276 */
277 unsigned int gen_id;
278#endif
279};
280
281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
282#define MAX_XHLOCK_TRACE_ENTRIES 5
283
284/*
285 * This is for keeping locks waiting for commit so that true dependencies
286 * can be added at commit step.
287 */
288struct hist_lock {
289 /*
290 * Id for each entry in the ring buffer. This is used to
291 * decide whether the ring buffer was overwritten or not.
292 *
293 * For example,
294 *
295 * |<----------- hist_lock ring buffer size ------->|
296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
298 *
299 * where 'p' represents an acquisition in process
300 * context, 'i' represents an acquisition in irq
301 * context.
302 *
303 * In this example, the ring buffer was overwritten by
304 * acquisitions in irq context, that should be detected on
305 * rollback or commit.
306 */
307 unsigned int hist_id;
308
309 /*
310 * Seperate stack_trace data. This will be used at commit step.
311 */
312 struct stack_trace trace;
313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
314
315 /*
316 * Seperate hlock instance. This will be used at commit step.
317 *
318 * TODO: Use a smaller data structure containing only necessary
319 * data. However, we should make lockdep code able to handle the
320 * smaller one first.
321 */
322 struct held_lock hlock;
323}; 264};
324 265
325/* 266/*
326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
327 * be called instead of lockdep_init_map().
328 */
329struct cross_lock {
330 /*
331 * When more than one acquisition of crosslocks are overlapped,
332 * we have to perform commit for them based on cross_gen_id of
333 * the first acquisition, which allows us to add more true
334 * dependencies.
335 *
336 * Moreover, when no acquisition of a crosslock is in progress,
337 * we should not perform commit because the lock might not exist
338 * any more, which might cause incorrect memory access. So we
339 * have to track the number of acquisitions of a crosslock.
340 */
341 int nr_acquire;
342
343 /*
344 * Seperate hlock instance. This will be used at commit step.
345 *
346 * TODO: Use a smaller data structure containing only necessary
347 * data. However, we should make lockdep code able to handle the
348 * smaller one first.
349 */
350 struct held_lock hlock;
351};
352
353struct lockdep_map_cross {
354 struct lockdep_map map;
355 struct cross_lock xlock;
356};
357#endif
358
359/*
360 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
361 */ 268 */
362extern void lockdep_info(void); 269extern void lockdep_info(void);
@@ -560,37 +467,6 @@ enum xhlock_context_t {
560 XHLOCK_CTX_NR, 467 XHLOCK_CTX_NR,
561}; 468};
562 469
563#ifdef CONFIG_LOCKDEP_CROSSRELEASE
564extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
565 const char *name,
566 struct lock_class_key *key,
567 int subclass);
568extern void lock_commit_crosslock(struct lockdep_map *lock);
569
570/*
571 * What we essencially have to initialize is 'nr_acquire'. Other members
572 * will be initialized in add_xlock().
573 */
574#define STATIC_CROSS_LOCK_INIT() \
575 { .nr_acquire = 0,}
576
577#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
578 { .map.name = (_name), .map.key = (void *)(_key), \
579 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
580
581/*
582 * To initialize a lockdep_map statically use this macro.
583 * Note that _name must not be NULL.
584 */
585#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
586 { .name = (_name), .key = (void *)(_key), .cross = 0, }
587
588extern void crossrelease_hist_start(enum xhlock_context_t c);
589extern void crossrelease_hist_end(enum xhlock_context_t c);
590extern void lockdep_invariant_state(bool force);
591extern void lockdep_init_task(struct task_struct *task);
592extern void lockdep_free_task(struct task_struct *task);
593#else /* !CROSSRELEASE */
594#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 470#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
595/* 471/*
596 * To initialize a lockdep_map statically use this macro. 472 * To initialize a lockdep_map statically use this macro.
@@ -599,12 +475,9 @@ extern void lockdep_free_task(struct task_struct *task);
599#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 475#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
600 { .name = (_name), .key = (void *)(_key), } 476 { .name = (_name), .key = (void *)(_key), }
601 477
602static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
603static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
604static inline void lockdep_invariant_state(bool force) {} 478static inline void lockdep_invariant_state(bool force) {}
605static inline void lockdep_init_task(struct task_struct *task) {} 479static inline void lockdep_init_task(struct task_struct *task) {}
606static inline void lockdep_free_task(struct task_struct *task) {} 480static inline void lockdep_free_task(struct task_struct *task) {}
607#endif /* CROSSRELEASE */
608 481
609#ifdef CONFIG_LOCK_STAT 482#ifdef CONFIG_LOCK_STAT
610 483
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index a2a1318a3d0c..c3d3f04d8cc6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -915,10 +915,10 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
915#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) 915#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
916 916
917enum dev_aspm_mode { 917enum dev_aspm_mode {
918 DEV_ASPM_DISABLE = 0,
919 DEV_ASPM_DYNAMIC, 918 DEV_ASPM_DYNAMIC,
920 DEV_ASPM_BACKDOOR, 919 DEV_ASPM_BACKDOOR,
921 DEV_ASPM_STATIC, 920 DEV_ASPM_STATIC,
921 DEV_ASPM_DISABLE,
922}; 922};
923 923
924/* 924/*
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index c403151133e9..fb7e8b205eb9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/completion.h> 37#include <linux/completion.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/irq.h>
39#include <linux/spinlock_types.h> 40#include <linux/spinlock_types.h>
40#include <linux/semaphore.h> 41#include <linux/semaphore.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
@@ -567,6 +568,7 @@ struct mlx5_core_sriov {
567}; 568};
568 569
569struct mlx5_irq_info { 570struct mlx5_irq_info {
571 cpumask_var_t mask;
570 char name[MLX5_MAX_IRQ_NAME]; 572 char name[MLX5_MAX_IRQ_NAME];
571}; 573};
572 574
@@ -1062,7 +1064,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
1062 enum mlx5_eq_type type); 1064 enum mlx5_eq_type type);
1063int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 1065int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1064int mlx5_start_eqs(struct mlx5_core_dev *dev); 1066int mlx5_start_eqs(struct mlx5_core_dev *dev);
1065int mlx5_stop_eqs(struct mlx5_core_dev *dev); 1067void mlx5_stop_eqs(struct mlx5_core_dev *dev);
1066int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, 1068int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1067 unsigned int *irqn); 1069 unsigned int *irqn);
1068int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 1070int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1269,7 +1271,23 @@ enum {
1269static inline const struct cpumask * 1271static inline const struct cpumask *
1270mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) 1272mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1271{ 1273{
1272 return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector); 1274 const struct cpumask *mask;
1275 struct irq_desc *desc;
1276 unsigned int irq;
1277 int eqn;
1278 int err;
1279
1280 err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1281 if (err)
1282 return NULL;
1283
1284 desc = irq_to_desc(irq);
1285#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
1286 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
1287#else
1288 mask = desc->irq_common_data.affinity;
1289#endif
1290 return mask;
1273} 1291}
1274 1292
1275#endif /* MLX5_DRIVER_H */ 1293#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 7e88c8e7f374..7ac7bd76c7af 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -147,7 +147,7 @@ enum {
147 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 147 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
148 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 148 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
149 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, 149 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
150 MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, 150 MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
151 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, 151 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
152 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, 152 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
153 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, 153 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@@ -1030,8 +1030,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1030 u8 log_max_wq_sz[0x5]; 1030 u8 log_max_wq_sz[0x5];
1031 1031
1032 u8 nic_vport_change_event[0x1]; 1032 u8 nic_vport_change_event[0x1];
1033 u8 disable_local_lb[0x1]; 1033 u8 disable_local_lb_uc[0x1];
1034 u8 reserved_at_3e2[0x9]; 1034 u8 disable_local_lb_mc[0x1];
1035 u8 reserved_at_3e3[0x8];
1035 u8 log_max_vlan_list[0x5]; 1036 u8 log_max_vlan_list[0x5];
1036 u8 reserved_at_3f0[0x3]; 1037 u8 reserved_at_3f0[0x3];
1037 u8 log_max_current_mc_list[0x5]; 1038 u8 log_max_current_mc_list[0x5];
@@ -7257,7 +7258,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
7257 u8 vxlan_udp_port[0x10]; 7258 u8 vxlan_udp_port[0x10];
7258}; 7259};
7259 7260
7260struct mlx5_ifc_set_rate_limit_out_bits { 7261struct mlx5_ifc_set_pp_rate_limit_out_bits {
7261 u8 status[0x8]; 7262 u8 status[0x8];
7262 u8 reserved_at_8[0x18]; 7263 u8 reserved_at_8[0x18];
7263 7264
@@ -7266,7 +7267,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
7266 u8 reserved_at_40[0x40]; 7267 u8 reserved_at_40[0x40];
7267}; 7268};
7268 7269
7269struct mlx5_ifc_set_rate_limit_in_bits { 7270struct mlx5_ifc_set_pp_rate_limit_in_bits {
7270 u8 opcode[0x10]; 7271 u8 opcode[0x10];
7271 u8 reserved_at_10[0x10]; 7272 u8 reserved_at_10[0x10];
7272 7273
@@ -7279,6 +7280,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
7279 u8 reserved_at_60[0x20]; 7280 u8 reserved_at_60[0x20];
7280 7281
7281 u8 rate_limit[0x20]; 7282 u8 rate_limit[0x20];
7283
7284 u8 reserved_at_a0[0x160];
7282}; 7285};
7283 7286
7284struct mlx5_ifc_access_register_out_bits { 7287struct mlx5_ifc_access_register_out_bits {
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 49b4257ce1ea..f3075d6c7e82 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -85,7 +85,7 @@ struct netlink_ext_ack {
85 * to the lack of an output buffer.) 85 * to the lack of an output buffer.)
86 */ 86 */
87#define NL_SET_ERR_MSG(extack, msg) do { \ 87#define NL_SET_ERR_MSG(extack, msg) do { \
88 static const char __msg[] = (msg); \ 88 static const char __msg[] = msg; \
89 struct netlink_ext_ack *__extack = (extack); \ 89 struct netlink_ext_ack *__extack = (extack); \
90 \ 90 \
91 if (__extack) \ 91 if (__extack) \
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
101} while (0) 101} while (0)
102 102
103#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ 103#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
104 static const char __msg[] = (msg); \ 104 static const char __msg[] = msg; \
105 struct netlink_ext_ack *__extack = (extack); \ 105 struct netlink_ext_ack *__extack = (extack); \
106 \ 106 \
107 if (__extack) { \ 107 if (__extack) { \
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 01c91d874a57..5bad038ac012 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
67} 67}
68 68
69/* 69/*
70 * Use this helper if tsk->mm != mm and the victim mm needs a special
71 * handling. This is guaranteed to stay true after once set.
72 */
73static inline bool mm_is_oom_victim(struct mm_struct *mm)
74{
75 return test_bit(MMF_OOM_VICTIM, &mm->flags);
76}
77
78/*
70 * Checks whether a page fault on the given mm is still reliable. 79 * Checks whether a page fault on the given mm is still reliable.
71 * This is no longer true if the oom reaper started to reap the 80 * This is no longer true if the oom reaper started to reap the
72 * address space which is reflected by MMF_UNSTABLE flag set in 81 * address space which is reflected by MMF_UNSTABLE flag set in
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0403894147a3..c170c9250c8b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 1674static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1675 unsigned int devfn) 1675 unsigned int devfn)
1676{ return NULL; } 1676{ return NULL; }
1677static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1678 unsigned int bus, unsigned int devfn)
1679{ return NULL; }
1677 1680
1678static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1681static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1679static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1682static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2c9c87d8a0c1..7546822a1d74 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -15,6 +15,7 @@
15#define _LINUX_PERF_EVENT_H 15#define _LINUX_PERF_EVENT_H
16 16
17#include <uapi/linux/perf_event.h> 17#include <uapi/linux/perf_event.h>
18#include <uapi/linux/bpf_perf_event.h>
18 19
19/* 20/*
20 * Kernel-internal data types and definitions: 21 * Kernel-internal data types and definitions:
@@ -787,7 +788,7 @@ struct perf_output_handle {
787}; 788};
788 789
789struct bpf_perf_event_data_kern { 790struct bpf_perf_event_data_kern {
790 struct pt_regs *regs; 791 bpf_user_pt_regs_t *regs;
791 struct perf_sample_data *data; 792 struct perf_sample_data *data;
792 struct perf_event *event; 793 struct perf_event *event;
793}; 794};
@@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
1177 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 1178 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1178# define perf_instruction_pointer(regs) instruction_pointer(regs) 1179# define perf_instruction_pointer(regs) instruction_pointer(regs)
1179#endif 1180#endif
1181#ifndef perf_arch_bpf_user_pt_regs
1182# define perf_arch_bpf_user_pt_regs(regs) regs
1183#endif
1180 1184
1181static inline bool has_branch_stack(struct perf_event *event) 1185static inline bool has_branch_stack(struct perf_event *event)
1182{ 1186{
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 65d39115f06d..492ed473ba7e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
765extern int pm_generic_poweroff(struct device *dev); 765extern int pm_generic_poweroff(struct device *dev);
766extern void pm_generic_complete(struct device *dev); 766extern void pm_generic_complete(struct device *dev);
767 767
768extern void dev_pm_skip_next_resume_phases(struct device *dev);
768extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); 769extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
769 770
770#else /* !CONFIG_PM_SLEEP */ 771#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index b3ea01a3197e..0174883a935a 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -1,43 +1,11 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) Intel 2011 2#ifndef _INCLUDE_PTI_H
3 * 3#define _INCLUDE_PTI_H
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * The PTI (Parallel Trace Interface) driver directs trace data routed from
16 * various parts in the system out through the Intel Penwell PTI port and
17 * out of the mobile device for analysis with a debugging tool
18 * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
19 * compact JTAG, standard.
20 *
21 * This header file will allow other parts of the OS to use the
22 * interface to write out it's contents for debugging a mobile system.
23 */
24 4
25#ifndef PTI_H_ 5#ifdef CONFIG_PAGE_TABLE_ISOLATION
26#define PTI_H_ 6#include <asm/pti.h>
7#else
8static inline void pti_init(void) { }
9#endif
27 10
28/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ 11#endif
29#define PTI_LASTDWORD_DTS 0x30
30
31/* basic structure used as a write address to the PTI HW */
32struct pti_masterchannel {
33 u8 master;
34 u8 channel;
35};
36
37/* the following functions are defined in misc/pti.c */
38void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
39struct pti_masterchannel *pti_request_masterchannel(u8 type,
40 const char *thread_name);
41void pti_release_masterchannel(struct pti_masterchannel *mc);
42
43#endif /*PTI_H_*/
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 37b4bb2545b3..d72b2e7dd500 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
101 101
102/* Note: callers invoking this in a loop must use a compiler barrier, 102/* Note: callers invoking this in a loop must use a compiler barrier,
103 * for example cpu_relax(). Callers must hold producer_lock. 103 * for example cpu_relax(). Callers must hold producer_lock.
104 * Callers are responsible for making sure pointer that is being queued
105 * points to a valid data.
104 */ 106 */
105static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 107static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
106{ 108{
107 if (unlikely(!r->size) || r->queue[r->producer]) 109 if (unlikely(!r->size) || r->queue[r->producer])
108 return -ENOSPC; 110 return -ENOSPC;
109 111
112 /* Make sure the pointer we are storing points to a valid data. */
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb();
115
110 r->queue[r->producer++] = ptr; 116 r->queue[r->producer++] = ptr;
111 if (unlikely(r->producer >= r->size)) 117 if (unlikely(r->producer >= r->size))
112 r->producer = 0; 118 r->producer = 0;
@@ -168,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
168 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. 174 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
169 * If ring is never resized, and if the pointer is merely 175 * If ring is never resized, and if the pointer is merely
170 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. 176 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
177 * However, if called outside the lock, and if some other CPU
178 * consumes ring entries at the same time, the value returned
179 * is not guaranteed to be correct.
180 * In this case - to avoid incorrectly detecting the ring
181 * as empty - the CPU consuming the ring entries is responsible
182 * for either consuming all ring entries until the ring is empty,
183 * or synchronizing with some other CPU and causing it to
184 * execute __ptr_ring_peek and/or consume the ring enteries
185 * after the synchronization point.
171 */ 186 */
172static inline void *__ptr_ring_peek(struct ptr_ring *r) 187static inline void *__ptr_ring_peek(struct ptr_ring *r)
173{ 188{
@@ -176,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
176 return NULL; 191 return NULL;
177} 192}
178 193
179/* Note: callers invoking this in a loop must use a compiler barrier, 194/* See __ptr_ring_peek above for locking rules. */
180 * for example cpu_relax(). Callers must take consumer_lock
181 * if the ring is ever resized - see e.g. ptr_ring_empty.
182 */
183static inline bool __ptr_ring_empty(struct ptr_ring *r) 195static inline bool __ptr_ring_empty(struct ptr_ring *r)
184{ 196{
185 return !__ptr_ring_peek(r); 197 return !__ptr_ring_peek(r);
@@ -275,6 +287,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
275 if (ptr) 287 if (ptr)
276 __ptr_ring_discard_one(r); 288 __ptr_ring_discard_one(r);
277 289
290 /* Make sure anyone accessing data through the pointer is up to date. */
291 /* Pairs with smp_wmb in __ptr_ring_produce. */
292 smp_read_barrier_depends();
278 return ptr; 293 return ptr;
279} 294}
280 295
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d574361943ea..fcbeed4053ef 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
99 struct rb_root *root); 99 struct rb_root *root);
100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
101 struct rb_root *root); 101 struct rb_root *root);
102extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
103 struct rb_root_cached *root);
102 104
103static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 105static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
104 struct rb_node **rb_link) 106 struct rb_node **rb_link)
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index a328e8181e49..e4b257ff881b 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,44 +101,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
101} 101}
102 102
103/** 103/**
104 * hlist_nulls_add_tail_rcu
105 * @n: the element to add to the hash list.
106 * @h: the list to add to.
107 *
108 * Description:
109 * Adds the specified element to the end of the specified hlist_nulls,
110 * while permitting racing traversals. NOTE: tail insertion requires
111 * list traversal.
112 *
113 * The caller must take whatever precautions are necessary
114 * (such as holding appropriate locks) to avoid racing
115 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
116 * or hlist_nulls_del_rcu(), running on this same list.
117 * However, it is perfectly legal to run concurrently with
118 * the _rcu list-traversal primitives, such as
119 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
120 * problems on Alpha CPUs. Regardless of the type of CPU, the
121 * list-traversal primitive must be guarded by rcu_read_lock().
122 */
123static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
124 struct hlist_nulls_head *h)
125{
126 struct hlist_nulls_node *i, *last = NULL;
127
128 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
129 i = hlist_nulls_next_rcu(i))
130 last = i;
131
132 if (last) {
133 n->next = last->next;
134 n->pprev = &last->next;
135 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
136 } else {
137 hlist_nulls_add_head_rcu(n, h);
138 }
139}
140
141/**
142 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 104 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
143 * @tpos: the type * to use as a loop cursor. 105 * @tpos: the type * to use as a loop cursor.
144 * @pos: the &struct hlist_nulls_node to use as a loop cursor. 106 * @pos: the &struct hlist_nulls_node to use as a loop cursor.
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..857a72ceb794 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -10,9 +10,6 @@
10 */ 10 */
11typedef struct { 11typedef struct {
12 arch_rwlock_t raw_lock; 12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK 13#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu; 14 unsigned int magic, owner_cpu;
18 void *owner; 15 void *owner;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21991d668d35..d2588263a989 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -849,17 +849,6 @@ struct task_struct {
849 struct held_lock held_locks[MAX_LOCK_DEPTH]; 849 struct held_lock held_locks[MAX_LOCK_DEPTH];
850#endif 850#endif
851 851
852#ifdef CONFIG_LOCKDEP_CROSSRELEASE
853#define MAX_XHLOCKS_NR 64UL
854 struct hist_lock *xhlocks; /* Crossrelease history locks */
855 unsigned int xhlock_idx;
856 /* For restoring at history boundaries */
857 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
858 unsigned int hist_id;
859 /* For overwrite check at each context exit */
860 unsigned int hist_id_save[XHLOCK_CTX_NR];
861#endif
862
863#ifdef CONFIG_UBSAN 852#ifdef CONFIG_UBSAN
864 unsigned int in_ubsan; 853 unsigned int in_ubsan;
865#endif 854#endif
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
1503 __set_task_comm(tsk, from, false); 1492 __set_task_comm(tsk, from, false);
1504} 1493}
1505 1494
1506extern char *get_task_comm(char *to, struct task_struct *tsk); 1495extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1496#define get_task_comm(buf, tsk) ({ \
1497 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1498 __get_task_comm(buf, sizeof(buf), tsk); \
1499})
1507 1500
1508#ifdef CONFIG_SMP 1501#ifdef CONFIG_SMP
1509void scheduler_ipi(void); 1502void scheduler_ipi(void);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 9c8847395b5e..ec912d01126f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ 70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
73#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
74 75
75#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index e69402d4a8ae..d609e6dc5bad 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
184 struct serdev_device *serdev = ctrl->serdev; 184 struct serdev_device *serdev = ctrl->serdev;
185 185
186 if (!serdev || !serdev->ops->receive_buf) 186 if (!serdev || !serdev->ops->receive_buf)
187 return -EINVAL; 187 return 0;
188 188
189 return serdev->ops->receive_buf(serdev, data, count); 189 return serdev->ops->receive_buf(serdev, data, count);
190} 190}
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index ff3642d267f7..94081e9a5010 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -17,7 +17,6 @@ struct sh_eth_plat_data {
17 unsigned char mac_addr[ETH_ALEN]; 17 unsigned char mac_addr[ETH_ALEN];
18 unsigned no_ether_link:1; 18 unsigned no_ether_link:1;
19 unsigned ether_link_active_low:1; 19 unsigned ether_link_active_low:1;
20 unsigned needs_init:1;
21}; 20};
22 21
23#endif 22#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bc486ef23f20..a38c80e9f91e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
1406} 1406}
1407 1407
1408/* 1408/*
1409 * If users == 1, we are the only owner and are can avoid redundant 1409 * If users == 1, we are the only owner and can avoid redundant atomic changes.
1410 * atomic change.
1411 */ 1410 */
1412 1411
1413/** 1412/**
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7b2170bfd6e7..bc6bb325d1bf 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
126 * for that name. This appears in the sysfs "modalias" attribute 126 * for that name. This appears in the sysfs "modalias" attribute
127 * for driver coldplugging, and in uevents used for hotplugging 127 * for driver coldplugging, and in uevents used for hotplugging
128 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when 128 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
129 * when not using a GPIO line) 129 * not using a GPIO line)
130 * 130 *
131 * @statistics: statistics for the spi_device 131 * @statistics: statistics for the spi_device
132 * 132 *
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a39186194cd6..3bf273538840 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -107,16 +107,11 @@ do { \
107 107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109 109
110#ifdef CONFIG_GENERIC_LOCKBREAK
111#define raw_spin_is_contended(lock) ((lock)->break_lock)
112#else
113
114#ifdef arch_spin_is_contended 110#ifdef arch_spin_is_contended
115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 111#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116#else 112#else
117#define raw_spin_is_contended(lock) (((void)(lock), 0)) 113#define raw_spin_is_contended(lock) (((void)(lock), 0))
118#endif /*arch_spin_is_contended*/ 114#endif /*arch_spin_is_contended*/
119#endif
120 115
121/* 116/*
122 * This barrier must provide two things: 117 * This barrier must provide two things:
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb13a5d..24b4e6f2c1a2 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -19,9 +19,6 @@
19 19
20typedef struct raw_spinlock { 20typedef struct raw_spinlock {
21 arch_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock;
24#endif
25#ifdef CONFIG_DEBUG_SPINLOCK 22#ifdef CONFIG_DEBUG_SPINLOCK
26 unsigned int magic, owner_cpu; 23 unsigned int magic, owner_cpu;
27 void *owner; 24 void *owner;
diff --git a/include/linux/string.h b/include/linux/string.h
index 410ecf17de3c..cfd83eb2f926 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
259{ 259{
260 __kernel_size_t ret; 260 __kernel_size_t ret;
261 size_t p_size = __builtin_object_size(p, 0); 261 size_t p_size = __builtin_object_size(p, 0);
262 if (p_size == (size_t)-1) 262
263 /* Work around gcc excess stack consumption issue */
264 if (p_size == (size_t)-1 ||
265 (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
263 return __builtin_strlen(p); 266 return __builtin_strlen(p);
264 ret = strnlen(p, p_size); 267 ret = strnlen(p, p_size);
265 if (p_size <= ret) 268 if (p_size <= ret)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 9c5a2628d6ce..1d3877c39a00 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); 124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
125} 125}
126 126
127static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
128{
129 return swp_offset(entry);
130}
131
127static inline struct page *device_private_entry_to_page(swp_entry_t entry) 132static inline struct page *device_private_entry_to_page(swp_entry_t entry)
128{ 133{
129 return pfn_to_page(swp_offset(entry)); 134 return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
154 return false; 159 return false;
155} 160}
156 161
162static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
163{
164 return 0;
165}
166
157static inline struct page *device_private_entry_to_page(swp_entry_t entry) 167static inline struct page *device_private_entry_to_page(swp_entry_t entry)
158{ 168{
159 return NULL; 169 return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
189 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); 199 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
190} 200}
191 201
202static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
203{
204 return swp_offset(entry);
205}
206
192static inline struct page *migration_entry_to_page(swp_entry_t entry) 207static inline struct page *migration_entry_to_page(swp_entry_t entry)
193{ 208{
194 struct page *p = pfn_to_page(swp_offset(entry)); 209 struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
218{ 233{
219 return 0; 234 return 0;
220} 235}
236
237static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
238{
239 return 0;
240}
241
221static inline struct page *migration_entry_to_page(swp_entry_t entry) 242static inline struct page *migration_entry_to_page(swp_entry_t entry)
222{ 243{
223 return NULL; 244 return NULL;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index e32dfe098e82..40839c02d28c 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -117,6 +117,12 @@ struct attribute_group {
117 .show = _name##_show, \ 117 .show = _name##_show, \
118} 118}
119 119
120#define __ATTR_RO_MODE(_name, _mode) { \
121 .attr = { .name = __stringify(_name), \
122 .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
123 .show = _name##_show, \
124}
125
120#define __ATTR_WO(_name) { \ 126#define __ATTR_WO(_name) { \
121 .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ 127 .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
122 .store = _name##_store, \ 128 .store = _name##_store, \
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index df5d97a85e1a..ca4a6361389b 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -224,7 +224,8 @@ struct tcp_sock {
224 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ 224 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
225 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ 225 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
226 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ 226 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
227 unused:3; 227 is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
228 unused:2;
228 u8 nonagle : 4,/* Disable Nagle algorithm? */ 229 u8 nonagle : 4,/* Disable Nagle algorithm? */
229 thin_lto : 1,/* Use linear timeouts for thin streams */ 230 thin_lto : 1,/* Use linear timeouts for thin streams */
230 unused1 : 1, 231 unused1 : 1,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f442d1a42025..7cc35921218e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);
119extern void tick_nohz_irq_exit(void); 119extern void tick_nohz_irq_exit(void);
120extern ktime_t tick_nohz_get_sleep_length(void); 120extern ktime_t tick_nohz_get_sleep_length(void);
121extern unsigned long tick_nohz_get_idle_calls(void); 121extern unsigned long tick_nohz_get_idle_calls(void);
122extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
122extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 123extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
123extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 124extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
124#else /* !CONFIG_NO_HZ_COMMON */ 125#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 04af640ea95b..2448f9cc48a3 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
207unsigned long round_jiffies_up_relative(unsigned long j); 207unsigned long round_jiffies_up_relative(unsigned long j);
208 208
209#ifdef CONFIG_HOTPLUG_CPU 209#ifdef CONFIG_HOTPLUG_CPU
210int timers_prepare_cpu(unsigned int cpu);
210int timers_dead_cpu(unsigned int cpu); 211int timers_dead_cpu(unsigned int cpu);
211#else 212#else
212#define timers_dead_cpu NULL 213#define timers_prepare_cpu NULL
214#define timers_dead_cpu NULL
213#endif 215#endif
214 216
215#endif 217#endif
diff --git a/include/linux/trace.h b/include/linux/trace.h
index d24991c1fef3..b95ffb2188ab 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -18,7 +18,7 @@
18 */ 18 */
19struct trace_export { 19struct trace_export {
20 struct trace_export __rcu *next; 20 struct trace_export __rcu *next;
21 void (*write)(const void *, unsigned int); 21 void (*write)(struct trace_export *, const void *, unsigned int);
22}; 22};
23 23
24int register_ftrace_export(struct trace_export *export); 24int register_ftrace_export(struct trace_export *export);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index a69877734c4e..e2ec3582e549 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -82,6 +82,7 @@ struct usbnet {
82# define EVENT_RX_KILL 10 82# define EVENT_RX_KILL 10
83# define EVENT_LINK_CHANGE 11 83# define EVENT_LINK_CHANGE 11
84# define EVENT_SET_RX_MODE 12 84# define EVENT_SET_RX_MODE 12
85# define EVENT_NO_IP_ALIGN 13
85}; 86};
86 87
87static inline struct usb_driver *driver_of(struct usb_interface *intf) 88static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/net/arp.h b/include/net/arp.h
index dc8cd47f883b..977aabfcdc03 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
20 20
21static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) 21static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
22{ 22{
23 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
24 key = INADDR_ANY;
25
23 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); 26 return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
24} 27}
25 28
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8b8118a7fadb..fb94a8bd8ab5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
815 u8 count; 815 u8 count;
816}; 816};
817 817
818#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
819
818/** 820/**
819 * struct iface_combination_params - input parameters for interface combinations 821 * struct iface_combination_params - input parameters for interface combinations
820 * 822 *
@@ -3226,7 +3228,6 @@ struct cfg80211_ops {
3226 * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. 3228 * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
3227 * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing 3229 * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
3228 * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. 3230 * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
3229 * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
3230 * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the 3231 * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
3231 * firmware. 3232 * firmware.
3232 * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. 3233 * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
diff --git a/include/net/dst.h b/include/net/dst.h
index b091fd536098..d49d607dd2b3 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -521,4 +521,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
521} 521}
522#endif 522#endif
523 523
524static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
525{
526 struct dst_entry *dst = skb_dst(skb);
527
528 if (dst && dst->ops->update_pmtu)
529 dst->ops->update_pmtu(dst, NULL, skb, mtu);
530}
531
524#endif /* _NET_DST_H */ 532#endif /* _NET_DST_H */
diff --git a/include/net/gue.h b/include/net/gue.h
index 2fdb29ca74c2..fdad41469b65 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -44,10 +44,10 @@ struct guehdr {
44#else 44#else
45#error "Please fix <asm/byteorder.h>" 45#error "Please fix <asm/byteorder.h>"
46#endif 46#endif
47 __u8 proto_ctype; 47 __u8 proto_ctype;
48 __u16 flags; 48 __be16 flags;
49 }; 49 };
50 __u32 word; 50 __be32 word;
51 }; 51 };
52}; 52};
53 53
@@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags)
84 * if there is an unknown standard or private flags, or the options length for 84 * if there is an unknown standard or private flags, or the options length for
85 * the flags exceeds the options length specific in hlen of the GUE header. 85 * the flags exceeds the options length specific in hlen of the GUE header.
86 */ 86 */
87static inline int validate_gue_flags(struct guehdr *guehdr, 87static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
88 size_t optlen)
89{ 88{
89 __be16 flags = guehdr->flags;
90 size_t len; 90 size_t len;
91 __be32 flags = guehdr->flags;
92 91
93 if (flags & ~GUE_FLAGS_ALL) 92 if (flags & ~GUE_FLAGS_ALL)
94 return 1; 93 return 1;
@@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr,
101 /* Private flags are last four bytes accounted in 100 /* Private flags are last four bytes accounted in
102 * guehdr_flags_len 101 * guehdr_flags_len
103 */ 102 */
104 flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); 103 __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
104 len - GUE_LEN_PRIV);
105 105
106 if (flags & ~GUE_PFLAGS_ALL) 106 if (pflags & ~GUE_PFLAGS_ALL)
107 return 1; 107 return 1;
108 108
109 len += guehdr_priv_flags_len(flags); 109 len += guehdr_priv_flags_len(pflags);
110 if (len > optlen) 110 if (len > optlen)
111 return 1; 111 return 1;
112 } 112 }
diff --git a/include/net/ip.h b/include/net/ip.h
index 9896f46cbbf1..af8addbaa3c1 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -34,6 +34,7 @@
34#include <net/flow_dissector.h> 34#include <net/flow_dissector.h>
35 35
36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
37#define IPV4_MIN_MTU 68 /* RFC 791 */
37 38
38struct sock; 39struct sock;
39 40
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f73797e2fa60..221238254eb7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -331,6 +331,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
331 int flags); 331 int flags);
332int ip6_flowlabel_init(void); 332int ip6_flowlabel_init(void);
333void ip6_flowlabel_cleanup(void); 333void ip6_flowlabel_cleanup(void);
334bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
334 335
335static inline void fl6_sock_release(struct ip6_flowlabel *fl) 336static inline void fl6_sock_release(struct ip6_flowlabel *fl)
336{ 337{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 10f99dafd5ac..049008493faf 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
223 return net1 == net2; 223 return net1 == net2;
224} 224}
225 225
226static inline int check_net(const struct net *net)
227{
228 return atomic_read(&net->count) != 0;
229}
230
226void net_drop_ns(void *); 231void net_drop_ns(void *);
227 232
228#else 233#else
@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
247 return 1; 252 return 1;
248} 253}
249 254
255static inline int check_net(const struct net *net)
256{
257 return 1;
258}
259
250#define net_drop_ns NULL 260#define net_drop_ns NULL
251#endif 261#endif
252 262
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 0105445cab83..753ac9361154 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -522,7 +522,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
522{ 522{
523 switch (layer) { 523 switch (layer) {
524 case TCF_LAYER_LINK: 524 case TCF_LAYER_LINK:
525 return skb->data; 525 return skb_mac_header(skb);
526 case TCF_LAYER_NETWORK: 526 case TCF_LAYER_NETWORK:
527 return skb_network_header(skb); 527 return skb_network_header(skb);
528 case TCF_LAYER_TRANSPORT: 528 case TCF_LAYER_TRANSPORT:
@@ -694,9 +694,7 @@ struct tc_cls_matchall_offload {
694}; 694};
695 695
696enum tc_clsbpf_command { 696enum tc_clsbpf_command {
697 TC_CLSBPF_ADD, 697 TC_CLSBPF_OFFLOAD,
698 TC_CLSBPF_REPLACE,
699 TC_CLSBPF_DESTROY,
700 TC_CLSBPF_STATS, 698 TC_CLSBPF_STATS,
701}; 699};
702 700
@@ -705,6 +703,7 @@ struct tc_cls_bpf_offload {
705 enum tc_clsbpf_command command; 703 enum tc_clsbpf_command command;
706 struct tcf_exts *exts; 704 struct tcf_exts *exts;
707 struct bpf_prog *prog; 705 struct bpf_prog *prog;
706 struct bpf_prog *oldprog;
708 const char *name; 707 const char *name;
709 bool exts_integrated; 708 bool exts_integrated;
710 u32 gen_flags; 709 u32 gen_flags;
diff --git a/include/net/red.h b/include/net/red.h
index 9a9347710701..9665582c4687 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -168,6 +168,17 @@ static inline void red_set_vars(struct red_vars *v)
168 v->qcount = -1; 168 v->qcount = -1;
169} 169}
170 170
171static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
172{
173 if (fls(qth_min) + Wlog > 32)
174 return false;
175 if (fls(qth_max) + Wlog > 32)
176 return false;
177 if (qth_max < qth_min)
178 return false;
179 return true;
180}
181
171static inline void red_set_parms(struct red_parms *p, 182static inline void red_set_parms(struct red_parms *p,
172 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, 183 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
173 u8 Scell_log, u8 *stab, u32 max_P) 184 u8 Scell_log, u8 *stab, u32 max_P)
@@ -179,7 +190,7 @@ static inline void red_set_parms(struct red_parms *p,
179 p->qth_max = qth_max << Wlog; 190 p->qth_max = qth_max << Wlog;
180 p->Wlog = Wlog; 191 p->Wlog = Wlog;
181 p->Plog = Plog; 192 p->Plog = Plog;
182 if (delta < 0) 193 if (delta <= 0)
183 delta = 1; 194 delta = 1;
184 p->qth_delta = delta; 195 p->qth_delta = delta;
185 if (!max_P) { 196 if (!max_P) {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 65d0d25f2648..becf86aa4ac6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
71 * qdisc_tree_decrease_qlen() should stop. 71 * qdisc_tree_decrease_qlen() should stop.
72 */ 72 */
73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
74#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
74 u32 limit; 75 u32 limit;
75 const struct Qdisc_ops *ops; 76 const struct Qdisc_ops *ops;
76 struct qdisc_size_table __rcu *stab; 77 struct qdisc_size_table __rcu *stab;
@@ -178,6 +179,7 @@ struct Qdisc_ops {
178 const struct Qdisc_class_ops *cl_ops; 179 const struct Qdisc_class_ops *cl_ops;
179 char id[IFNAMSIZ]; 180 char id[IFNAMSIZ];
180 int priv_size; 181 int priv_size;
182 unsigned int static_flags;
181 183
182 int (*enqueue)(struct sk_buff *skb, 184 int (*enqueue)(struct sk_buff *skb,
183 struct Qdisc *sch, 185 struct Qdisc *sch,
@@ -443,6 +445,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
443 unsigned int len); 445 unsigned int len);
444struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 446struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
445 const struct Qdisc_ops *ops); 447 const struct Qdisc_ops *ops);
448void qdisc_free(struct Qdisc *qdisc);
446struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 449struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
447 const struct Qdisc_ops *ops, u32 parentid); 450 const struct Qdisc_ops *ops, u32 parentid);
448void __qdisc_calculate_pkt_len(struct sk_buff *skb, 451void __qdisc_calculate_pkt_len(struct sk_buff *skb,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 16f949eef52f..9a5ccf03a59b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -503,7 +503,8 @@ struct sctp_datamsg {
503 /* Did the messenge fail to send? */ 503 /* Did the messenge fail to send? */
504 int send_error; 504 int send_error;
505 u8 send_failed:1, 505 u8 send_failed:1,
506 can_delay; /* should this message be Nagle delayed */ 506 can_delay:1, /* should this message be Nagle delayed */
507 abandoned:1; /* should this message be abandoned */
507}; 508};
508 509
509struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, 510struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
@@ -965,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
965void sctp_transport_burst_reset(struct sctp_transport *); 966void sctp_transport_burst_reset(struct sctp_transport *);
966unsigned long sctp_transport_timeout(struct sctp_transport *); 967unsigned long sctp_transport_timeout(struct sctp_transport *);
967void sctp_transport_reset(struct sctp_transport *t); 968void sctp_transport_reset(struct sctp_transport *t);
968void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); 969bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
969void sctp_transport_immediate_rtx(struct sctp_transport *); 970void sctp_transport_immediate_rtx(struct sctp_transport *);
970void sctp_transport_dst_release(struct sctp_transport *t); 971void sctp_transport_dst_release(struct sctp_transport *t);
971void sctp_transport_dst_confirm(struct sctp_transport *t); 972void sctp_transport_dst_confirm(struct sctp_transport *t);
diff --git a/include/net/sock.h b/include/net/sock.h
index 79e1a2c7912c..7a7b14e9628a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -685,11 +685,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
685 685
686static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 686static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
687{ 687{
688 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 688 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
689 sk->sk_family == AF_INET6)
690 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
691 else
692 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
693} 689}
694 690
695static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 691static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -1518,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
1518 return sk->sk_lock.owned; 1514 return sk->sk_lock.owned;
1519} 1515}
1520 1516
1517static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1518{
1519 return sk->sk_lock.owned;
1520}
1521
1521/* no reclassification while locks are held */ 1522/* no reclassification while locks are held */
1522static inline bool sock_allow_reclassification(const struct sock *csk) 1523static inline bool sock_allow_reclassification(const struct sock *csk)
1523{ 1524{
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index 524cee4f4c81..01dbfea32672 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -14,7 +14,6 @@ struct tcf_sample {
14 struct psample_group __rcu *psample_group; 14 struct psample_group __rcu *psample_group;
15 u32 psample_group_num; 15 u32 psample_group_num;
16 struct list_head tcfm_list; 16 struct list_head tcfm_list;
17 struct rcu_head rcu;
18}; 17};
19#define to_sample(a) ((struct tcf_sample *)a) 18#define to_sample(a) ((struct tcf_sample *)a)
20 19
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4e09398009c1..6da880d2f022 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -844,12 +844,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
844} 844}
845#endif 845#endif
846 846
847/* TCP_SKB_CB reference means this can not be used from early demux */
848static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) 847static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
849{ 848{
850#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 849#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
851 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 850 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
852 skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 851 skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
853 return true; 852 return true;
854#endif 853#endif
855 return false; 854 return false;
@@ -1056,7 +1055,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1056void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, 1055void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1057 struct rate_sample *rs); 1056 struct rate_sample *rs);
1058void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, 1057void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1059 struct rate_sample *rs); 1058 bool is_sack_reneg, struct rate_sample *rs);
1060void tcp_rate_check_app_limited(struct sock *sk); 1059void tcp_rate_check_app_limited(struct sock *sk);
1061 1060
1062/* These functions determine how the current flow behaves in respect of SACK 1061/* These functions determine how the current flow behaves in respect of SACK
diff --git a/include/net/tls.h b/include/net/tls.h
index 936cfc5cab7d..9185e53a743c 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
170 170
171static inline void tls_err_abort(struct sock *sk) 171static inline void tls_err_abort(struct sock *sk)
172{ 172{
173 sk->sk_err = -EBADMSG; 173 sk->sk_err = EBADMSG;
174 sk->sk_error_report(sk); 174 sk->sk_error_report(sk);
175} 175}
176 176
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 13223396dc64..f96391e84a8a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
146 np_applied:1, 146 np_applied:1,
147 instance_applied:1, 147 instance_applied:1,
148 version:2, 148 version:2,
149reserved_flags2:2; 149 reserved_flags2:2;
150#elif defined(__BIG_ENDIAN_BITFIELD) 150#elif defined(__BIG_ENDIAN_BITFIELD)
151 u8 reserved_flags2:2, 151 u8 reserved_flags2:2,
152 version:2, 152 version:2,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc28a98ce97c..ae35991b5877 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x);
1570int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); 1570int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1571int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1571int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1572int xfrm_input_resume(struct sk_buff *skb, int nexthdr); 1572int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1573int xfrm_trans_queue(struct sk_buff *skb,
1574 int (*finish)(struct net *, struct sock *,
1575 struct sk_buff *));
1573int xfrm_output_resume(struct sk_buff *skb, int err); 1576int xfrm_output_resume(struct sk_buff *skb, int err);
1574int xfrm_output(struct sock *sk, struct sk_buff *skb); 1577int xfrm_output(struct sock *sk, struct sk_buff *skb);
1575int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1578int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 0f9cbf96c093..6df6fe0c2198 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -159,11 +159,11 @@ struct expander_device {
159 159
160struct sata_device { 160struct sata_device {
161 unsigned int class; 161 unsigned int class;
162 struct smp_resp rps_resp; /* report_phy_sata_resp */
163 u8 port_no; /* port number, if this is a PM (Port) */ 162 u8 port_no; /* port number, if this is a PM (Port) */
164 163
165 struct ata_port *ap; 164 struct ata_port *ap;
166 struct ata_host ata_host; 165 struct ata_host ata_host;
166 struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
167 u8 fis[ATA_RESP_FIS_SIZE]; 167 u8 fis[ATA_RESP_FIS_SIZE];
168}; 168};
169 169
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 758607226bfd..2cd449328aee 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
134 134
135 TP_STRUCT__entry( 135 TP_STRUCT__entry(
136 __string( name, core->name ) 136 __string( name, core->name )
137 __string( pname, parent->name ) 137 __string( pname, parent ? parent->name : "none" )
138 ), 138 ),
139 139
140 TP_fast_assign( 140 TP_fast_assign(
141 __assign_str(name, core->name); 141 __assign_str(name, core->name);
142 __assign_str(pname, parent->name); 142 __assign_str(pname, parent ? parent->name : "none");
143 ), 143 ),
144 144
145 TP_printk("%s %s", __get_str(name), __get_str(pname)) 145 TP_printk("%s %s", __get_str(name), __get_str(pname))
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index e4b0b8e09932..2c735a3e6613 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
211 { KVM_TRACE_MMIO_WRITE, "write" } 211 { KVM_TRACE_MMIO_WRITE, "write" }
212 212
213TRACE_EVENT(kvm_mmio, 213TRACE_EVENT(kvm_mmio,
214 TP_PROTO(int type, int len, u64 gpa, u64 val), 214 TP_PROTO(int type, int len, u64 gpa, void *val),
215 TP_ARGS(type, len, gpa, val), 215 TP_ARGS(type, len, gpa, val),
216 216
217 TP_STRUCT__entry( 217 TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
225 __entry->type = type; 225 __entry->type = type;
226 __entry->len = len; 226 __entry->len = len;
227 __entry->gpa = gpa; 227 __entry->gpa = gpa;
228 __entry->val = val; 228 __entry->val = 0;
229 if (val)
230 memcpy(&__entry->val, val,
231 min_t(u32, sizeof(__entry->val), len));
229 ), 232 ),
230 233
231 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", 234 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index f5024c560d8f..9c4eb33c5a1d 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
56 56
57#include <trace/define_trace.h> 57#include <trace/define_trace.h>
58 58
59#else /* !CONFIG_PREEMPTIRQ_EVENTS */ 59#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
60 60
61#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
61#define trace_irq_enable(...) 62#define trace_irq_enable(...)
62#define trace_irq_disable(...) 63#define trace_irq_disable(...)
63#define trace_preempt_enable(...)
64#define trace_preempt_disable(...)
65#define trace_irq_enable_rcuidle(...) 64#define trace_irq_enable_rcuidle(...)
66#define trace_irq_disable_rcuidle(...) 65#define trace_irq_disable_rcuidle(...)
66#endif
67
68#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
69#define trace_preempt_enable(...)
70#define trace_preempt_disable(...)
67#define trace_preempt_enable_rcuidle(...) 71#define trace_preempt_enable_rcuidle(...)
68#define trace_preempt_disable_rcuidle(...) 72#define trace_preempt_disable_rcuidle(...)
69
70#endif 73#endif
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 07cccca6cbf1..ab34c561f26b 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -25,6 +25,35 @@
25 tcp_state_name(TCP_CLOSING), \ 25 tcp_state_name(TCP_CLOSING), \
26 tcp_state_name(TCP_NEW_SYN_RECV)) 26 tcp_state_name(TCP_NEW_SYN_RECV))
27 27
28#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
29 do { \
30 struct in6_addr *pin6; \
31 \
32 pin6 = (struct in6_addr *)__entry->saddr_v6; \
33 ipv6_addr_set_v4mapped(saddr, pin6); \
34 pin6 = (struct in6_addr *)__entry->daddr_v6; \
35 ipv6_addr_set_v4mapped(daddr, pin6); \
36 } while (0)
37
38#if IS_ENABLED(CONFIG_IPV6)
39#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
40 do { \
41 if (sk->sk_family == AF_INET6) { \
42 struct in6_addr *pin6; \
43 \
44 pin6 = (struct in6_addr *)__entry->saddr_v6; \
45 *pin6 = saddr6; \
46 pin6 = (struct in6_addr *)__entry->daddr_v6; \
47 *pin6 = daddr6; \
48 } else { \
49 TP_STORE_V4MAPPED(__entry, saddr, daddr); \
50 } \
51 } while (0)
52#else
53#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
54 TP_STORE_V4MAPPED(__entry, saddr, daddr)
55#endif
56
28/* 57/*
29 * tcp event with arguments sk and skb 58 * tcp event with arguments sk and skb
30 * 59 *
@@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
50 79
51 TP_fast_assign( 80 TP_fast_assign(
52 struct inet_sock *inet = inet_sk(sk); 81 struct inet_sock *inet = inet_sk(sk);
53 struct in6_addr *pin6;
54 __be32 *p32; 82 __be32 *p32;
55 83
56 __entry->skbaddr = skb; 84 __entry->skbaddr = skb;
@@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
65 p32 = (__be32 *) __entry->daddr; 93 p32 = (__be32 *) __entry->daddr;
66 *p32 = inet->inet_daddr; 94 *p32 = inet->inet_daddr;
67 95
68#if IS_ENABLED(CONFIG_IPV6) 96 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
69 if (sk->sk_family == AF_INET6) { 97 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
70 pin6 = (struct in6_addr *)__entry->saddr_v6;
71 *pin6 = sk->sk_v6_rcv_saddr;
72 pin6 = (struct in6_addr *)__entry->daddr_v6;
73 *pin6 = sk->sk_v6_daddr;
74 } else
75#endif
76 {
77 pin6 = (struct in6_addr *)__entry->saddr_v6;
78 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
79 pin6 = (struct in6_addr *)__entry->daddr_v6;
80 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
81 }
82 ), 98 ),
83 99
84 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 100 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
127 143
128 TP_fast_assign( 144 TP_fast_assign(
129 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
130 struct in6_addr *pin6;
131 __be32 *p32; 146 __be32 *p32;
132 147
133 __entry->skaddr = sk; 148 __entry->skaddr = sk;
@@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
141 p32 = (__be32 *) __entry->daddr; 156 p32 = (__be32 *) __entry->daddr;
142 *p32 = inet->inet_daddr; 157 *p32 = inet->inet_daddr;
143 158
144#if IS_ENABLED(CONFIG_IPV6) 159 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
145 if (sk->sk_family == AF_INET6) { 160 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
146 pin6 = (struct in6_addr *)__entry->saddr_v6;
147 *pin6 = sk->sk_v6_rcv_saddr;
148 pin6 = (struct in6_addr *)__entry->daddr_v6;
149 *pin6 = sk->sk_v6_daddr;
150 } else
151#endif
152 {
153 pin6 = (struct in6_addr *)__entry->saddr_v6;
154 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
155 pin6 = (struct in6_addr *)__entry->daddr_v6;
156 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
157 }
158 ), 161 ),
159 162
160 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 163 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
197 200
198 TP_fast_assign( 201 TP_fast_assign(
199 struct inet_sock *inet = inet_sk(sk); 202 struct inet_sock *inet = inet_sk(sk);
200 struct in6_addr *pin6;
201 __be32 *p32; 203 __be32 *p32;
202 204
203 __entry->skaddr = sk; 205 __entry->skaddr = sk;
@@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
213 p32 = (__be32 *) __entry->daddr; 215 p32 = (__be32 *) __entry->daddr;
214 *p32 = inet->inet_daddr; 216 *p32 = inet->inet_daddr;
215 217
216#if IS_ENABLED(CONFIG_IPV6) 218 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
217 if (sk->sk_family == AF_INET6) { 219 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
218 pin6 = (struct in6_addr *)__entry->saddr_v6;
219 *pin6 = sk->sk_v6_rcv_saddr;
220 pin6 = (struct in6_addr *)__entry->daddr_v6;
221 *pin6 = sk->sk_v6_daddr;
222 } else
223#endif
224 {
225 pin6 = (struct in6_addr *)__entry->saddr_v6;
226 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
227 pin6 = (struct in6_addr *)__entry->daddr_v6;
228 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
229 }
230 ), 220 ),
231 221
232 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", 222 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
256 246
257 TP_fast_assign( 247 TP_fast_assign(
258 struct inet_request_sock *ireq = inet_rsk(req); 248 struct inet_request_sock *ireq = inet_rsk(req);
259 struct in6_addr *pin6;
260 __be32 *p32; 249 __be32 *p32;
261 250
262 __entry->skaddr = sk; 251 __entry->skaddr = sk;
@@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
271 p32 = (__be32 *) __entry->daddr; 260 p32 = (__be32 *) __entry->daddr;
272 *p32 = ireq->ir_rmt_addr; 261 *p32 = ireq->ir_rmt_addr;
273 262
274#if IS_ENABLED(CONFIG_IPV6) 263 TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
275 if (sk->sk_family == AF_INET6) { 264 ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
276 pin6 = (struct in6_addr *)__entry->saddr_v6;
277 *pin6 = ireq->ir_v6_loc_addr;
278 pin6 = (struct in6_addr *)__entry->daddr_v6;
279 *pin6 = ireq->ir_v6_rmt_addr;
280 } else
281#endif
282 {
283 pin6 = (struct in6_addr *)__entry->saddr_v6;
284 ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
285 pin6 = (struct in6_addr *)__entry->daddr_v6;
286 ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
287 }
288 ), 265 ),
289 266
290 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 267 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 4cd0f05d0113..8989a92c571a 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -8,6 +8,7 @@
8#include <linux/netdevice.h> 8#include <linux/netdevice.h>
9#include <linux/filter.h> 9#include <linux/filter.h>
10#include <linux/tracepoint.h> 10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
11 12
12#define __XDP_ACT_MAP(FN) \ 13#define __XDP_ACT_MAP(FN) \
13 FN(ABORTED) \ 14 FN(ABORTED) \
diff --git a/include/uapi/asm-generic/bpf_perf_event.h b/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644
index 000000000000..53815d2cd047
--- /dev/null
+++ b/include/uapi/asm-generic/bpf_perf_event.h
@@ -0,0 +1,9 @@
1#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
2#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
3
4#include <linux/ptrace.h>
5
6/* Export kernel pt_regs structure */
7typedef struct pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
index af549d4ecf1b..8f95303f9d80 100644
--- a/include/uapi/linux/bpf_perf_event.h
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -8,11 +8,10 @@
8#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ 8#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
9#define _UAPI__LINUX_BPF_PERF_EVENT_H__ 9#define _UAPI__LINUX_BPF_PERF_EVENT_H__
10 10
11#include <linux/types.h> 11#include <asm/bpf_perf_event.h>
12#include <linux/ptrace.h>
13 12
14struct bpf_perf_event_data { 13struct bpf_perf_event_data {
15 struct pt_regs regs; 14 bpf_user_pt_regs_t regs;
16 __u64 sample_period; 15 __u64 sample_period;
17}; 16};
18 17
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 3ee3bf7c8526..144de4d2f385 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,6 +23,7 @@
23#define _UAPI_LINUX_IF_ETHER_H 23#define _UAPI_LINUX_IF_ETHER_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/libc-compat.h>
26 27
27/* 28/*
28 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 29 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -149,11 +150,13 @@
149 * This is an Ethernet frame header. 150 * This is an Ethernet frame header.
150 */ 151 */
151 152
153#if __UAPI_DEF_ETHHDR
152struct ethhdr { 154struct ethhdr {
153 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 155 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
154 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 156 unsigned char h_source[ETH_ALEN]; /* source ether addr */
155 __be16 h_proto; /* packet type ID field */ 157 __be16 h_proto; /* packet type ID field */
156} __attribute__((packed)); 158} __attribute__((packed));
159#endif
157 160
158 161
159#endif /* _UAPI_LINUX_IF_ETHER_H */ 162#endif /* _UAPI_LINUX_IF_ETHER_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 282d7613fce8..8fb90a0819c3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150 934#define KVM_CAP_S390_AIS_MIGRATION 150
935#define KVM_CAP_PPC_GET_CPU_CHAR 151
936#define KVM_CAP_S390_BPB 152
935 937
936#ifdef KVM_CAP_IRQ_ROUTING 938#ifdef KVM_CAP_IRQ_ROUTING
937 939
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
1261#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) 1263#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
1262/* Available with KVM_CAP_PPC_RADIX_MMU */ 1264/* Available with KVM_CAP_PPC_RADIX_MMU */
1263#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) 1265#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
1266/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
1267#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
1264 1268
1265/* ioctl for vm fd */ 1269/* ioctl for vm fd */
1266#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) 1270#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 282875cf8056..fc29efaa918c 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -168,47 +168,106 @@
168 168
169/* If we did not see any headers from any supported C libraries, 169/* If we did not see any headers from any supported C libraries,
170 * or we are being included in the kernel, then define everything 170 * or we are being included in the kernel, then define everything
171 * that we need. */ 171 * that we need. Check for previous __UAPI_* definitions to give
172 * unsupported C libraries a way to opt out of any kernel definition. */
172#else /* !defined(__GLIBC__) */ 173#else /* !defined(__GLIBC__) */
173 174
174/* Definitions for if.h */ 175/* Definitions for if.h */
176#ifndef __UAPI_DEF_IF_IFCONF
175#define __UAPI_DEF_IF_IFCONF 1 177#define __UAPI_DEF_IF_IFCONF 1
178#endif
179#ifndef __UAPI_DEF_IF_IFMAP
176#define __UAPI_DEF_IF_IFMAP 1 180#define __UAPI_DEF_IF_IFMAP 1
181#endif
182#ifndef __UAPI_DEF_IF_IFNAMSIZ
177#define __UAPI_DEF_IF_IFNAMSIZ 1 183#define __UAPI_DEF_IF_IFNAMSIZ 1
184#endif
185#ifndef __UAPI_DEF_IF_IFREQ
178#define __UAPI_DEF_IF_IFREQ 1 186#define __UAPI_DEF_IF_IFREQ 1
187#endif
179/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 188/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
189#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
180#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 190#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
191#endif
181/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 192/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
193#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
182#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 194#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
195#endif
183 196
184/* Definitions for in.h */ 197/* Definitions for in.h */
198#ifndef __UAPI_DEF_IN_ADDR
185#define __UAPI_DEF_IN_ADDR 1 199#define __UAPI_DEF_IN_ADDR 1
200#endif
201#ifndef __UAPI_DEF_IN_IPPROTO
186#define __UAPI_DEF_IN_IPPROTO 1 202#define __UAPI_DEF_IN_IPPROTO 1
203#endif
204#ifndef __UAPI_DEF_IN_PKTINFO
187#define __UAPI_DEF_IN_PKTINFO 1 205#define __UAPI_DEF_IN_PKTINFO 1
206#endif
207#ifndef __UAPI_DEF_IP_MREQ
188#define __UAPI_DEF_IP_MREQ 1 208#define __UAPI_DEF_IP_MREQ 1
209#endif
210#ifndef __UAPI_DEF_SOCKADDR_IN
189#define __UAPI_DEF_SOCKADDR_IN 1 211#define __UAPI_DEF_SOCKADDR_IN 1
212#endif
213#ifndef __UAPI_DEF_IN_CLASS
190#define __UAPI_DEF_IN_CLASS 1 214#define __UAPI_DEF_IN_CLASS 1
215#endif
191 216
192/* Definitions for in6.h */ 217/* Definitions for in6.h */
218#ifndef __UAPI_DEF_IN6_ADDR
193#define __UAPI_DEF_IN6_ADDR 1 219#define __UAPI_DEF_IN6_ADDR 1
220#endif
221#ifndef __UAPI_DEF_IN6_ADDR_ALT
194#define __UAPI_DEF_IN6_ADDR_ALT 1 222#define __UAPI_DEF_IN6_ADDR_ALT 1
223#endif
224#ifndef __UAPI_DEF_SOCKADDR_IN6
195#define __UAPI_DEF_SOCKADDR_IN6 1 225#define __UAPI_DEF_SOCKADDR_IN6 1
226#endif
227#ifndef __UAPI_DEF_IPV6_MREQ
196#define __UAPI_DEF_IPV6_MREQ 1 228#define __UAPI_DEF_IPV6_MREQ 1
229#endif
230#ifndef __UAPI_DEF_IPPROTO_V6
197#define __UAPI_DEF_IPPROTO_V6 1 231#define __UAPI_DEF_IPPROTO_V6 1
232#endif
233#ifndef __UAPI_DEF_IPV6_OPTIONS
198#define __UAPI_DEF_IPV6_OPTIONS 1 234#define __UAPI_DEF_IPV6_OPTIONS 1
235#endif
236#ifndef __UAPI_DEF_IN6_PKTINFO
199#define __UAPI_DEF_IN6_PKTINFO 1 237#define __UAPI_DEF_IN6_PKTINFO 1
238#endif
239#ifndef __UAPI_DEF_IP6_MTUINFO
200#define __UAPI_DEF_IP6_MTUINFO 1 240#define __UAPI_DEF_IP6_MTUINFO 1
241#endif
201 242
202/* Definitions for ipx.h */ 243/* Definitions for ipx.h */
244#ifndef __UAPI_DEF_SOCKADDR_IPX
203#define __UAPI_DEF_SOCKADDR_IPX 1 245#define __UAPI_DEF_SOCKADDR_IPX 1
246#endif
247#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
204#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 248#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
249#endif
250#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
205#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 251#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
252#endif
253#ifndef __UAPI_DEF_IPX_CONFIG_DATA
206#define __UAPI_DEF_IPX_CONFIG_DATA 1 254#define __UAPI_DEF_IPX_CONFIG_DATA 1
255#endif
256#ifndef __UAPI_DEF_IPX_ROUTE_DEF
207#define __UAPI_DEF_IPX_ROUTE_DEF 1 257#define __UAPI_DEF_IPX_ROUTE_DEF 1
258#endif
208 259
209/* Definitions for xattr.h */ 260/* Definitions for xattr.h */
261#ifndef __UAPI_DEF_XATTR
210#define __UAPI_DEF_XATTR 1 262#define __UAPI_DEF_XATTR 1
263#endif
211 264
212#endif /* __GLIBC__ */ 265#endif /* __GLIBC__ */
213 266
267/* Definitions for if_ether.h */
268/* allow libcs like musl to deactivate this, glibc does not implement this. */
269#ifndef __UAPI_DEF_ETHHDR
270#define __UAPI_DEF_ETHHDR 1
271#endif
272
214#endif /* _UAPI_LIBC_COMPAT_H */ 273#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 3fea7709a441..57ccfb32e87f 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
36 36
37#define NF_CT_STATE_INVALID_BIT (1 << 0) 37#define NF_CT_STATE_INVALID_BIT (1 << 0)
38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) 38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
39#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) 39#define NF_CT_STATE_UNTRACKED_BIT (1 << 6)
40 40
41/* Bitset representing status of connection. */ 41/* Bitset representing status of connection. */
42enum ip_conntrack_status { 42enum ip_conntrack_status {
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 4265d7f9e1f2..dcfab5e3b55c 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr {
363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ 363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ 364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
365 OVS_TUNNEL_KEY_ATTR_PAD, 365 OVS_TUNNEL_KEY_ATTR_PAD,
366 OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* be32 ERSPAN index. */
367 __OVS_TUNNEL_KEY_ATTR_MAX 366 __OVS_TUNNEL_KEY_ATTR_MAX
368}; 367};
369 368
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index af3cc2f4e1ad..37b5096ae97b 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -256,7 +256,6 @@ struct tc_red_qopt {
256#define TC_RED_ECN 1 256#define TC_RED_ECN 1
257#define TC_RED_HARDDROP 2 257#define TC_RED_HARDDROP 2
258#define TC_RED_ADAPTATIVE 4 258#define TC_RED_ADAPTATIVE 4
259#define TC_RED_OFFLOADED 8
260}; 259};
261 260
262struct tc_red_xstats { 261struct tc_red_xstats {
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index d8b5f80c2ea6..843e29aa3cac 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -557,6 +557,7 @@ enum {
557 TCA_PAD, 557 TCA_PAD,
558 TCA_DUMP_INVISIBLE, 558 TCA_DUMP_INVISIBLE,
559 TCA_CHAIN, 559 TCA_CHAIN,
560 TCA_HW_OFFLOAD,
560 __TCA_MAX 561 __TCA_MAX
561}; 562};
562 563
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 41a0a81b01e6..c4c79aa331bd 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -880,6 +880,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
880 __u8 bReserved; 880 __u8 bReserved;
881} __attribute__((packed)); 881} __attribute__((packed));
882 882
883#define USB_DT_USB_WIRELESS_CAP_SIZE 11
884
883/* USB 2.0 Extension descriptor */ 885/* USB 2.0 Extension descriptor */
884#define USB_CAP_TYPE_EXT 2 886#define USB_CAP_TYPE_EXT 2
885 887
@@ -1072,6 +1074,7 @@ struct usb_ptm_cap_descriptor {
1072 __u8 bDevCapabilityType; 1074 __u8 bDevCapabilityType;
1073} __attribute__((packed)); 1075} __attribute__((packed));
1074 1076
1077#define USB_DT_USB_PTM_ID_SIZE 3
1075/* 1078/*
1076 * The size of the descriptor for the Sublink Speed Attribute Count 1079 * The size of the descriptor for the Sublink Speed Attribute Count
1077 * (SSAC) specified in bmAttributes[4:0]. 1080 * (SSAC) specified in bmAttributes[4:0].
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 4914b93a23f2..61f410fd74e4 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void)
44{ 44{
45} 45}
46#endif 46#endif
47
48#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
49struct resource;
50void arch_xen_balloon_init(struct resource *hostmem_resource);
51#endif
diff --git a/init/Kconfig b/init/Kconfig
index 2934249fba46..a9a2e2c86671 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,10 +461,15 @@ endmenu # "CPU/Task time and stats accounting"
461 461
462config CPU_ISOLATION 462config CPU_ISOLATION
463 bool "CPU isolation" 463 bool "CPU isolation"
464 depends on SMP || COMPILE_TEST
465 default y
464 help 466 help
465 Make sure that CPUs running critical tasks are not disturbed by 467 Make sure that CPUs running critical tasks are not disturbed by
466 any source of "noise" such as unbound workqueues, timers, kthreads... 468 any source of "noise" such as unbound workqueues, timers, kthreads...
467 Unbound jobs get offloaded to housekeeping CPUs. 469 Unbound jobs get offloaded to housekeeping CPUs. This is driven by
470 the "isolcpus=" boot parameter.
471
472 Say Y if unsure.
468 473
469source "kernel/rcu/Kconfig" 474source "kernel/rcu/Kconfig"
470 475
@@ -1392,6 +1397,13 @@ config BPF_SYSCALL
1392 Enable the bpf() system call that allows to manipulate eBPF 1397 Enable the bpf() system call that allows to manipulate eBPF
1393 programs and maps via file descriptors. 1398 programs and maps via file descriptors.
1394 1399
1400config BPF_JIT_ALWAYS_ON
1401 bool "Permanently enable BPF JIT and remove BPF interpreter"
1402 depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1403 help
1404 Enables BPF JIT and removes BPF interpreter to avoid
1405 speculative execution of BPF instructions by the interpreter
1406
1395config USERFAULTFD 1407config USERFAULTFD
1396 bool "Enable userfaultfd() system call" 1408 bool "Enable userfaultfd() system call"
1397 select ANON_INODES 1409 select ANON_INODES
diff --git a/init/main.c b/init/main.c
index dfec3809e740..a8100b954839 100644
--- a/init/main.c
+++ b/init/main.c
@@ -75,6 +75,7 @@
75#include <linux/slab.h> 75#include <linux/slab.h>
76#include <linux/perf_event.h> 76#include <linux/perf_event.h>
77#include <linux/ptrace.h> 77#include <linux/ptrace.h>
78#include <linux/pti.h>
78#include <linux/blkdev.h> 79#include <linux/blkdev.h>
79#include <linux/elevator.h> 80#include <linux/elevator.h>
80#include <linux/sched_clock.h> 81#include <linux/sched_clock.h>
@@ -504,6 +505,10 @@ static void __init mm_init(void)
504 pgtable_init(); 505 pgtable_init();
505 vmalloc_init(); 506 vmalloc_init();
506 ioremap_huge_init(); 507 ioremap_huge_init();
508 /* Should be run before the first non-init thread is created */
509 init_espfix_bsp();
510 /* Should be run after espfix64 is set up. */
511 pti_init();
507} 512}
508 513
509asmlinkage __visible void __init start_kernel(void) 514asmlinkage __visible void __init start_kernel(void)
@@ -589,6 +594,12 @@ asmlinkage __visible void __init start_kernel(void)
589 radix_tree_init(); 594 radix_tree_init();
590 595
591 /* 596 /*
597 * Set up housekeeping before setting up workqueues to allow the unbound
598 * workqueue to take non-housekeeping into account.
599 */
600 housekeeping_init();
601
602 /*
592 * Allow workqueue creation and work item queueing/cancelling 603 * Allow workqueue creation and work item queueing/cancelling
593 * early. Work item execution depends on kthreads and starts after 604 * early. Work item execution depends on kthreads and starts after
594 * workqueue_init(). 605 * workqueue_init().
@@ -605,7 +616,6 @@ asmlinkage __visible void __init start_kernel(void)
605 early_irq_init(); 616 early_irq_init();
606 init_IRQ(); 617 init_IRQ();
607 tick_init(); 618 tick_init();
608 housekeeping_init();
609 rcu_init_nohz(); 619 rcu_init_nohz();
610 init_timers(); 620 init_timers();
611 hrtimers_init(); 621 hrtimers_init();
@@ -674,10 +684,6 @@ asmlinkage __visible void __init start_kernel(void)
674 if (efi_enabled(EFI_RUNTIME_SERVICES)) 684 if (efi_enabled(EFI_RUNTIME_SERVICES))
675 efi_enter_virtual_mode(); 685 efi_enter_virtual_mode();
676#endif 686#endif
677#ifdef CONFIG_X86_ESPFIX64
678 /* Should be run before the first non-init thread is created */
679 init_espfix_bsp();
680#endif
681 thread_stack_cache_init(); 687 thread_stack_cache_init();
682 cred_init(); 688 cred_init();
683 fork_init(); 689 fork_init();
diff --git a/kernel/acct.c b/kernel/acct.c
index d15c0ee4d955..addf7732fb56 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
102{ 102{
103 struct kstatfs sbuf; 103 struct kstatfs sbuf;
104 104
105 if (time_is_before_jiffies(acct->needcheck)) 105 if (time_is_after_jiffies(acct->needcheck))
106 goto out; 106 goto out;
107 107
108 /* May block */ 108 /* May block */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7c25426d3cf5..ab94d304a634 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53{ 53{
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr); 55 int numa_node = bpf_map_attr_numa_node(attr);
56 u32 elem_size, index_mask, max_entries;
57 bool unpriv = !capable(CAP_SYS_ADMIN);
56 struct bpf_array *array; 58 struct bpf_array *array;
57 u64 array_size; 59 u64 array_size, mask64;
58 u32 elem_size;
59 60
60 /* check sanity of attributes */ 61 /* check sanity of attributes */
61 if (attr->max_entries == 0 || attr->key_size != 4 || 62 if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,32 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
72 73
73 elem_size = round_up(attr->value_size, 8); 74 elem_size = round_up(attr->value_size, 8);
74 75
76 max_entries = attr->max_entries;
77
78 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
79 * upper most bit set in u32 space is undefined behavior due to
80 * resulting 1U << 32, so do it manually here in u64 space.
81 */
82 mask64 = fls_long(max_entries - 1);
83 mask64 = 1ULL << mask64;
84 mask64 -= 1;
85
86 index_mask = mask64;
87 if (unpriv) {
88 /* round up array size to nearest power of 2,
89 * since cpu will speculate within index_mask limits
90 */
91 max_entries = index_mask + 1;
92 /* Check for overflows. */
93 if (max_entries < attr->max_entries)
94 return ERR_PTR(-E2BIG);
95 }
96
75 array_size = sizeof(*array); 97 array_size = sizeof(*array);
76 if (percpu) 98 if (percpu)
77 array_size += (u64) attr->max_entries * sizeof(void *); 99 array_size += (u64) max_entries * sizeof(void *);
78 else 100 else
79 array_size += (u64) attr->max_entries * elem_size; 101 array_size += (u64) max_entries * elem_size;
80 102
81 /* make sure there is no u32 overflow later in round_up() */ 103 /* make sure there is no u32 overflow later in round_up() */
82 if (array_size >= U32_MAX - PAGE_SIZE) 104 if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +108,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array = bpf_map_area_alloc(array_size, numa_node); 108 array = bpf_map_area_alloc(array_size, numa_node);
87 if (!array) 109 if (!array)
88 return ERR_PTR(-ENOMEM); 110 return ERR_PTR(-ENOMEM);
111 array->index_mask = index_mask;
112 array->map.unpriv_array = unpriv;
89 113
90 /* copy mandatory map attributes */ 114 /* copy mandatory map attributes */
91 array->map.map_type = attr->map_type; 115 array->map.map_type = attr->map_type;
@@ -121,12 +145,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
121 if (unlikely(index >= array->map.max_entries)) 145 if (unlikely(index >= array->map.max_entries))
122 return NULL; 146 return NULL;
123 147
124 return array->value + array->elem_size * index; 148 return array->value + array->elem_size * (index & array->index_mask);
125} 149}
126 150
127/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 151/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
128static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 152static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
129{ 153{
154 struct bpf_array *array = container_of(map, struct bpf_array, map);
130 struct bpf_insn *insn = insn_buf; 155 struct bpf_insn *insn = insn_buf;
131 u32 elem_size = round_up(map->value_size, 8); 156 u32 elem_size = round_up(map->value_size, 8);
132 const int ret = BPF_REG_0; 157 const int ret = BPF_REG_0;
@@ -135,7 +160,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
135 160
136 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 161 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
137 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 162 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
138 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 163 if (map->unpriv_array) {
164 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
165 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
166 } else {
167 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
168 }
139 169
140 if (is_power_of_2(elem_size)) { 170 if (is_power_of_2(elem_size)) {
141 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 171 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +187,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
157 if (unlikely(index >= array->map.max_entries)) 187 if (unlikely(index >= array->map.max_entries))
158 return NULL; 188 return NULL;
159 189
160 return this_cpu_ptr(array->pptrs[index]); 190 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
161} 191}
162 192
163int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 193int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +207,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
177 */ 207 */
178 size = round_up(map->value_size, 8); 208 size = round_up(map->value_size, 8);
179 rcu_read_lock(); 209 rcu_read_lock();
180 pptr = array->pptrs[index]; 210 pptr = array->pptrs[index & array->index_mask];
181 for_each_possible_cpu(cpu) { 211 for_each_possible_cpu(cpu) {
182 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 212 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
183 off += size; 213 off += size;
@@ -225,10 +255,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
225 return -EEXIST; 255 return -EEXIST;
226 256
227 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
228 memcpy(this_cpu_ptr(array->pptrs[index]), 258 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
229 value, map->value_size); 259 value, map->value_size);
230 else 260 else
231 memcpy(array->value + array->elem_size * index, 261 memcpy(array->value +
262 array->elem_size * (index & array->index_mask),
232 value, map->value_size); 263 value, map->value_size);
233 return 0; 264 return 0;
234} 265}
@@ -262,7 +293,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
262 */ 293 */
263 size = round_up(map->value_size, 8); 294 size = round_up(map->value_size, 8);
264 rcu_read_lock(); 295 rcu_read_lock();
265 pptr = array->pptrs[index]; 296 pptr = array->pptrs[index & array->index_mask];
266 for_each_possible_cpu(cpu) { 297 for_each_possible_cpu(cpu) {
267 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 298 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
268 off += size; 299 off += size;
@@ -613,6 +644,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
613static u32 array_of_map_gen_lookup(struct bpf_map *map, 644static u32 array_of_map_gen_lookup(struct bpf_map *map,
614 struct bpf_insn *insn_buf) 645 struct bpf_insn *insn_buf)
615{ 646{
647 struct bpf_array *array = container_of(map, struct bpf_array, map);
616 u32 elem_size = round_up(map->value_size, 8); 648 u32 elem_size = round_up(map->value_size, 8);
617 struct bpf_insn *insn = insn_buf; 649 struct bpf_insn *insn = insn_buf;
618 const int ret = BPF_REG_0; 650 const int ret = BPF_REG_0;
@@ -621,7 +653,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
621 653
622 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 654 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
623 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 655 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
624 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 656 if (map->unpriv_array) {
657 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
658 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
659 } else {
660 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
661 }
625 if (is_power_of_2(elem_size)) 662 if (is_power_of_2(elem_size))
626 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 663 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
627 else 664 else
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b9f8686a84cf..7949e8b8f94e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
767} 767}
768EXPORT_SYMBOL_GPL(__bpf_call_base); 768EXPORT_SYMBOL_GPL(__bpf_call_base);
769 769
770#ifndef CONFIG_BPF_JIT_ALWAYS_ON
770/** 771/**
771 * __bpf_prog_run - run eBPF program on a given context 772 * __bpf_prog_run - run eBPF program on a given context
772 * @ctx: is the data we are operating on 773 * @ctx: is the data we are operating on
@@ -955,7 +956,7 @@ select_insn:
955 DST = tmp; 956 DST = tmp;
956 CONT; 957 CONT;
957 ALU_MOD_X: 958 ALU_MOD_X:
958 if (unlikely(SRC == 0)) 959 if (unlikely((u32)SRC == 0))
959 return 0; 960 return 0;
960 tmp = (u32) DST; 961 tmp = (u32) DST;
961 DST = do_div(tmp, (u32) SRC); 962 DST = do_div(tmp, (u32) SRC);
@@ -974,7 +975,7 @@ select_insn:
974 DST = div64_u64(DST, SRC); 975 DST = div64_u64(DST, SRC);
975 CONT; 976 CONT;
976 ALU_DIV_X: 977 ALU_DIV_X:
977 if (unlikely(SRC == 0)) 978 if (unlikely((u32)SRC == 0))
978 return 0; 979 return 0;
979 tmp = (u32) DST; 980 tmp = (u32) DST;
980 do_div(tmp, (u32) SRC); 981 do_div(tmp, (u32) SRC);
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1317EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1318EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1318}; 1319};
1319 1320
1321#else
1322static unsigned int __bpf_prog_ret0(const void *ctx,
1323 const struct bpf_insn *insn)
1324{
1325 return 0;
1326}
1327#endif
1328
1320bool bpf_prog_array_compatible(struct bpf_array *array, 1329bool bpf_prog_array_compatible(struct bpf_array *array,
1321 const struct bpf_prog *fp) 1330 const struct bpf_prog *fp)
1322{ 1331{
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1364 */ 1373 */
1365struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1374struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1366{ 1375{
1376#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1367 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1377 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1368 1378
1369 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1379 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380#else
1381 fp->bpf_func = __bpf_prog_ret0;
1382#endif
1370 1383
1371 /* eBPF JITs can rewrite the program in case constant 1384 /* eBPF JITs can rewrite the program in case constant
1372 * blinding is active. However, in case of error during 1385 * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1376 */ 1389 */
1377 if (!bpf_prog_is_dev_bound(fp->aux)) { 1390 if (!bpf_prog_is_dev_bound(fp->aux)) {
1378 fp = bpf_int_jit_compile(fp); 1391 fp = bpf_int_jit_compile(fp);
1392#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393 if (!fp->jited) {
1394 *err = -ENOTSUPP;
1395 return fp;
1396 }
1397#endif
1379 } else { 1398 } else {
1380 *err = bpf_prog_offload_compile(fp); 1399 *err = bpf_prog_offload_compile(fp);
1381 if (*err) 1400 if (*err)
@@ -1447,7 +1466,8 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1447 rcu_read_lock(); 1466 rcu_read_lock();
1448 prog = rcu_dereference(progs)->progs; 1467 prog = rcu_dereference(progs)->progs;
1449 for (; *prog; prog++) 1468 for (; *prog; prog++)
1450 cnt++; 1469 if (*prog != &dummy_bpf_prog.prog)
1470 cnt++;
1451 rcu_read_unlock(); 1471 rcu_read_unlock();
1452 return cnt; 1472 return cnt;
1453} 1473}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index e469e05c8e83..3905d4bc5b80 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab)
114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
115 htab->map.key_size); 115 htab->map.key_size);
116 free_percpu(pptr); 116 free_percpu(pptr);
117 cond_resched();
117 } 118 }
118free_elems: 119free_elems:
119 bpf_map_area_free(htab->elems); 120 bpf_map_area_free(htab->elems);
@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab)
159 goto free_elems; 160 goto free_elems;
160 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 161 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
161 pptr); 162 pptr);
163 cond_resched();
162 } 164 }
163 165
164skip_percpu_elems: 166skip_percpu_elems:
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 01aaef1a77c5..5bb5e49ef4c3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -368,7 +368,45 @@ out:
368 putname(pname); 368 putname(pname);
369 return ret; 369 return ret;
370} 370}
371EXPORT_SYMBOL_GPL(bpf_obj_get_user); 371
372static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
373{
374 struct bpf_prog *prog;
375 int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
376 if (ret)
377 return ERR_PTR(ret);
378
379 if (inode->i_op == &bpf_map_iops)
380 return ERR_PTR(-EINVAL);
381 if (inode->i_op != &bpf_prog_iops)
382 return ERR_PTR(-EACCES);
383
384 prog = inode->i_private;
385
386 ret = security_bpf_prog(prog);
387 if (ret < 0)
388 return ERR_PTR(ret);
389
390 if (!bpf_prog_get_ok(prog, &type, false))
391 return ERR_PTR(-EINVAL);
392
393 return bpf_prog_inc(prog);
394}
395
396struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
397{
398 struct bpf_prog *prog;
399 struct path path;
400 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
401 if (ret)
402 return ERR_PTR(ret);
403 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
404 if (!IS_ERR(prog))
405 touch_atime(&path);
406 path_put(&path);
407 return prog;
408}
409EXPORT_SYMBOL(bpf_prog_get_type_path);
372 410
373static void bpf_evict_inode(struct inode *inode) 411static void bpf_evict_inode(struct inode *inode)
374{ 412{
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 68ec884440b7..8455b89d1bbf 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -1,3 +1,18 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
1#include <linux/bpf.h> 16#include <linux/bpf.h>
2#include <linux/bpf_verifier.h> 17#include <linux/bpf_verifier.h>
3#include <linux/bug.h> 18#include <linux/bug.h>
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5ee2e41893d9..1712d319c2d8 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
591 591
592 write_lock_bh(&sock->sk_callback_lock); 592 write_lock_bh(&sock->sk_callback_lock);
593 psock = smap_psock_sk(sock); 593 psock = smap_psock_sk(sock);
594 smap_list_remove(psock, &stab->sock_map[i]); 594 /* This check handles a racing sock event that can get the
595 smap_release_sock(psock, sock); 595 * sk_callback_lock before this case but after xchg happens
596 * causing the refcnt to hit zero and sock user data (psock)
597 * to be null and queued for garbage collection.
598 */
599 if (likely(psock)) {
600 smap_list_remove(psock, &stab->sock_map[i]);
601 smap_release_sock(psock, sock);
602 }
596 write_unlock_bh(&sock->sk_callback_lock); 603 write_unlock_bh(&sock->sk_callback_lock);
597 } 604 }
598 rcu_read_unlock(); 605 rcu_read_unlock();
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2c4cfeaa8d5e..5cb783fc8224 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1057} 1057}
1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059 1059
1060static bool bpf_prog_get_ok(struct bpf_prog *prog, 1060bool bpf_prog_get_ok(struct bpf_prog *prog,
1061 enum bpf_prog_type *attach_type, bool attach_drv) 1061 enum bpf_prog_type *attach_type, bool attach_drv)
1062{ 1062{
1063 /* not an attachment, just a refcount inc, always allow */ 1063 /* not an attachment, just a refcount inc, always allow */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d4593571c404..13551e623501 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
978 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 978 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
979} 979}
980 980
981static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
982{
983 const struct bpf_reg_state *reg = cur_regs(env) + regno;
984
985 return reg->type == PTR_TO_CTX;
986}
987
981static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 988static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
982 const struct bpf_reg_state *reg, 989 const struct bpf_reg_state *reg,
983 int off, int size, bool strict) 990 int off, int size, bool strict)
@@ -1059,6 +1066,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1059 break; 1066 break;
1060 case PTR_TO_STACK: 1067 case PTR_TO_STACK:
1061 pointer_desc = "stack "; 1068 pointer_desc = "stack ";
1069 /* The stack spill tracking logic in check_stack_write()
1070 * and check_stack_read() relies on stack accesses being
1071 * aligned.
1072 */
1073 strict = true;
1062 break; 1074 break;
1063 default: 1075 default:
1064 break; 1076 break;
@@ -1067,6 +1079,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1067 strict); 1079 strict);
1068} 1080}
1069 1081
1082/* truncate register to smaller size (in bytes)
1083 * must be called with size < BPF_REG_SIZE
1084 */
1085static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1086{
1087 u64 mask;
1088
1089 /* clear high bits in bit representation */
1090 reg->var_off = tnum_cast(reg->var_off, size);
1091
1092 /* fix arithmetic bounds */
1093 mask = ((u64)1 << (size * 8)) - 1;
1094 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1095 reg->umin_value &= mask;
1096 reg->umax_value &= mask;
1097 } else {
1098 reg->umin_value = 0;
1099 reg->umax_value = mask;
1100 }
1101 reg->smin_value = reg->umin_value;
1102 reg->smax_value = reg->umax_value;
1103}
1104
1070/* check whether memory at (regno + off) is accessible for t = (read | write) 1105/* check whether memory at (regno + off) is accessible for t = (read | write)
1071 * if t==write, value_regno is a register which value is stored into memory 1106 * if t==write, value_regno is a register which value is stored into memory
1072 * if t==read, value_regno is a register which will receive the value from memory 1107 * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1235,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1200 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1235 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1201 regs[value_regno].type == SCALAR_VALUE) { 1236 regs[value_regno].type == SCALAR_VALUE) {
1202 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1237 /* b/h/w load zero-extends, mark upper bits as known 0 */
1203 regs[value_regno].var_off = 1238 coerce_reg_to_size(&regs[value_regno], size);
1204 tnum_cast(regs[value_regno].var_off, size);
1205 __update_reg_bounds(&regs[value_regno]);
1206 } 1239 }
1207 return err; 1240 return err;
1208} 1241}
@@ -1232,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1232 return -EACCES; 1265 return -EACCES;
1233 } 1266 }
1234 1267
1268 if (is_ctx_reg(env, insn->dst_reg)) {
1269 verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
1270 insn->dst_reg);
1271 return -EACCES;
1272 }
1273
1235 /* check whether atomic_add can read the memory */ 1274 /* check whether atomic_add can read the memory */
1236 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1275 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1237 BPF_SIZE(insn->code), BPF_READ, -1); 1276 BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1282,6 +1321,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1282 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); 1321 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1283 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1322 verbose(env, "invalid variable stack read R%d var_off=%s\n",
1284 regno, tn_buf); 1323 regno, tn_buf);
1324 return -EACCES;
1285 } 1325 }
1286 off = regs[regno].off + regs[regno].var_off.value; 1326 off = regs[regno].off + regs[regno].var_off.value;
1287 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1327 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1674,7 +1714,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1674 return -EINVAL; 1714 return -EINVAL;
1675 } 1715 }
1676 1716
1717 /* With LD_ABS/IND some JITs save/restore skb from r1. */
1677 changes_data = bpf_helper_changes_pkt_data(fn->func); 1718 changes_data = bpf_helper_changes_pkt_data(fn->func);
1719 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
1720 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
1721 func_id_name(func_id), func_id);
1722 return -EINVAL;
1723 }
1678 1724
1679 memset(&meta, 0, sizeof(meta)); 1725 memset(&meta, 0, sizeof(meta));
1680 meta.pkt_access = fn->pkt_access; 1726 meta.pkt_access = fn->pkt_access;
@@ -1696,6 +1742,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1696 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1742 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1697 if (err) 1743 if (err)
1698 return err; 1744 return err;
1745 if (func_id == BPF_FUNC_tail_call) {
1746 if (meta.map_ptr == NULL) {
1747 verbose(env, "verifier bug\n");
1748 return -EINVAL;
1749 }
1750 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1751 }
1699 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1752 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1700 if (err) 1753 if (err)
1701 return err; 1754 return err;
@@ -1766,14 +1819,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1766 return 0; 1819 return 0;
1767} 1820}
1768 1821
1769static void coerce_reg_to_32(struct bpf_reg_state *reg)
1770{
1771 /* clear high 32 bits */
1772 reg->var_off = tnum_cast(reg->var_off, 4);
1773 /* Update bounds */
1774 __update_reg_bounds(reg);
1775}
1776
1777static bool signed_add_overflows(s64 a, s64 b) 1822static bool signed_add_overflows(s64 a, s64 b)
1778{ 1823{
1779 /* Do the add in u64, where overflow is well-defined */ 1824 /* Do the add in u64, where overflow is well-defined */
@@ -1794,6 +1839,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
1794 return res > a; 1839 return res > a;
1795} 1840}
1796 1841
1842static bool check_reg_sane_offset(struct bpf_verifier_env *env,
1843 const struct bpf_reg_state *reg,
1844 enum bpf_reg_type type)
1845{
1846 bool known = tnum_is_const(reg->var_off);
1847 s64 val = reg->var_off.value;
1848 s64 smin = reg->smin_value;
1849
1850 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
1851 verbose(env, "math between %s pointer and %lld is not allowed\n",
1852 reg_type_str[type], val);
1853 return false;
1854 }
1855
1856 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
1857 verbose(env, "%s pointer offset %d is not allowed\n",
1858 reg_type_str[type], reg->off);
1859 return false;
1860 }
1861
1862 if (smin == S64_MIN) {
1863 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
1864 reg_type_str[type]);
1865 return false;
1866 }
1867
1868 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
1869 verbose(env, "value %lld makes %s pointer be out of bounds\n",
1870 smin, reg_type_str[type]);
1871 return false;
1872 }
1873
1874 return true;
1875}
1876
1797/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 1877/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1798 * Caller should also handle BPF_MOV case separately. 1878 * Caller should also handle BPF_MOV case separately.
1799 * If we return -EACCES, caller may want to try again treating pointer as a 1879 * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1815,44 +1895,36 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1815 1895
1816 dst_reg = &regs[dst]; 1896 dst_reg = &regs[dst];
1817 1897
1818 if (WARN_ON_ONCE(known && (smin_val != smax_val))) { 1898 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
1819 print_verifier_state(env, env->cur_state); 1899 smin_val > smax_val || umin_val > umax_val) {
1820 verbose(env, 1900 /* Taint dst register if offset had invalid bounds derived from
1821 "verifier internal error: known but bad sbounds\n"); 1901 * e.g. dead branches.
1822 return -EINVAL; 1902 */
1823 } 1903 __mark_reg_unknown(dst_reg);
1824 if (WARN_ON_ONCE(known && (umin_val != umax_val))) { 1904 return 0;
1825 print_verifier_state(env, env->cur_state);
1826 verbose(env,
1827 "verifier internal error: known but bad ubounds\n");
1828 return -EINVAL;
1829 } 1905 }
1830 1906
1831 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1907 if (BPF_CLASS(insn->code) != BPF_ALU64) {
1832 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 1908 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
1833 if (!env->allow_ptr_leaks) 1909 verbose(env,
1834 verbose(env, 1910 "R%d 32-bit pointer arithmetic prohibited\n",
1835 "R%d 32-bit pointer arithmetic prohibited\n", 1911 dst);
1836 dst);
1837 return -EACCES; 1912 return -EACCES;
1838 } 1913 }
1839 1914
1840 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1915 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1841 if (!env->allow_ptr_leaks) 1916 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1842 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 1917 dst);
1843 dst);
1844 return -EACCES; 1918 return -EACCES;
1845 } 1919 }
1846 if (ptr_reg->type == CONST_PTR_TO_MAP) { 1920 if (ptr_reg->type == CONST_PTR_TO_MAP) {
1847 if (!env->allow_ptr_leaks) 1921 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1848 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 1922 dst);
1849 dst);
1850 return -EACCES; 1923 return -EACCES;
1851 } 1924 }
1852 if (ptr_reg->type == PTR_TO_PACKET_END) { 1925 if (ptr_reg->type == PTR_TO_PACKET_END) {
1853 if (!env->allow_ptr_leaks) 1926 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1854 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 1927 dst);
1855 dst);
1856 return -EACCES; 1928 return -EACCES;
1857 } 1929 }
1858 1930
@@ -1862,6 +1934,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1862 dst_reg->type = ptr_reg->type; 1934 dst_reg->type = ptr_reg->type;
1863 dst_reg->id = ptr_reg->id; 1935 dst_reg->id = ptr_reg->id;
1864 1936
1937 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
1938 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
1939 return -EINVAL;
1940
1865 switch (opcode) { 1941 switch (opcode) {
1866 case BPF_ADD: 1942 case BPF_ADD:
1867 /* We can take a fixed offset as long as it doesn't overflow 1943 /* We can take a fixed offset as long as it doesn't overflow
@@ -1915,9 +1991,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1915 case BPF_SUB: 1991 case BPF_SUB:
1916 if (dst_reg == off_reg) { 1992 if (dst_reg == off_reg) {
1917 /* scalar -= pointer. Creates an unknown scalar */ 1993 /* scalar -= pointer. Creates an unknown scalar */
1918 if (!env->allow_ptr_leaks) 1994 verbose(env, "R%d tried to subtract pointer from scalar\n",
1919 verbose(env, "R%d tried to subtract pointer from scalar\n", 1995 dst);
1920 dst);
1921 return -EACCES; 1996 return -EACCES;
1922 } 1997 }
1923 /* We don't allow subtraction from FP, because (according to 1998 /* We don't allow subtraction from FP, because (according to
@@ -1925,9 +2000,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1925 * be able to deal with it. 2000 * be able to deal with it.
1926 */ 2001 */
1927 if (ptr_reg->type == PTR_TO_STACK) { 2002 if (ptr_reg->type == PTR_TO_STACK) {
1928 if (!env->allow_ptr_leaks) 2003 verbose(env, "R%d subtraction from stack pointer prohibited\n",
1929 verbose(env, "R%d subtraction from stack pointer prohibited\n", 2004 dst);
1930 dst);
1931 return -EACCES; 2005 return -EACCES;
1932 } 2006 }
1933 if (known && (ptr_reg->off - smin_val == 2007 if (known && (ptr_reg->off - smin_val ==
@@ -1976,28 +2050,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1976 case BPF_AND: 2050 case BPF_AND:
1977 case BPF_OR: 2051 case BPF_OR:
1978 case BPF_XOR: 2052 case BPF_XOR:
1979 /* bitwise ops on pointers are troublesome, prohibit for now. 2053 /* bitwise ops on pointers are troublesome, prohibit. */
1980 * (However, in principle we could allow some cases, e.g. 2054 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1981 * ptr &= ~3 which would reduce min_value by 3.) 2055 dst, bpf_alu_string[opcode >> 4]);
1982 */
1983 if (!env->allow_ptr_leaks)
1984 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1985 dst, bpf_alu_string[opcode >> 4]);
1986 return -EACCES; 2056 return -EACCES;
1987 default: 2057 default:
1988 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 2058 /* other operators (e.g. MUL,LSH) produce non-pointer results */
1989 if (!env->allow_ptr_leaks) 2059 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
1990 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 2060 dst, bpf_alu_string[opcode >> 4]);
1991 dst, bpf_alu_string[opcode >> 4]);
1992 return -EACCES; 2061 return -EACCES;
1993 } 2062 }
1994 2063
2064 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2065 return -EINVAL;
2066
1995 __update_reg_bounds(dst_reg); 2067 __update_reg_bounds(dst_reg);
1996 __reg_deduce_bounds(dst_reg); 2068 __reg_deduce_bounds(dst_reg);
1997 __reg_bound_offset(dst_reg); 2069 __reg_bound_offset(dst_reg);
1998 return 0; 2070 return 0;
1999} 2071}
2000 2072
2073/* WARNING: This function does calculations on 64-bit values, but the actual
2074 * execution may occur on 32-bit values. Therefore, things like bitshifts
2075 * need extra checks in the 32-bit case.
2076 */
2001static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2077static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2002 struct bpf_insn *insn, 2078 struct bpf_insn *insn,
2003 struct bpf_reg_state *dst_reg, 2079 struct bpf_reg_state *dst_reg,
@@ -2008,12 +2084,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2008 bool src_known, dst_known; 2084 bool src_known, dst_known;
2009 s64 smin_val, smax_val; 2085 s64 smin_val, smax_val;
2010 u64 umin_val, umax_val; 2086 u64 umin_val, umax_val;
2087 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2011 2088
2012 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2013 /* 32-bit ALU ops are (32,32)->64 */
2014 coerce_reg_to_32(dst_reg);
2015 coerce_reg_to_32(&src_reg);
2016 }
2017 smin_val = src_reg.smin_value; 2089 smin_val = src_reg.smin_value;
2018 smax_val = src_reg.smax_value; 2090 smax_val = src_reg.smax_value;
2019 umin_val = src_reg.umin_value; 2091 umin_val = src_reg.umin_value;
@@ -2021,6 +2093,21 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2021 src_known = tnum_is_const(src_reg.var_off); 2093 src_known = tnum_is_const(src_reg.var_off);
2022 dst_known = tnum_is_const(dst_reg->var_off); 2094 dst_known = tnum_is_const(dst_reg->var_off);
2023 2095
2096 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2097 smin_val > smax_val || umin_val > umax_val) {
2098 /* Taint dst register if offset had invalid bounds derived from
2099 * e.g. dead branches.
2100 */
2101 __mark_reg_unknown(dst_reg);
2102 return 0;
2103 }
2104
2105 if (!src_known &&
2106 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2107 __mark_reg_unknown(dst_reg);
2108 return 0;
2109 }
2110
2024 switch (opcode) { 2111 switch (opcode) {
2025 case BPF_ADD: 2112 case BPF_ADD:
2026 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2113 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2149,9 +2236,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2149 __update_reg_bounds(dst_reg); 2236 __update_reg_bounds(dst_reg);
2150 break; 2237 break;
2151 case BPF_LSH: 2238 case BPF_LSH:
2152 if (umax_val > 63) { 2239 if (umax_val >= insn_bitness) {
2153 /* Shifts greater than 63 are undefined. This includes 2240 /* Shifts greater than 31 or 63 are undefined.
2154 * shifts by a negative number. 2241 * This includes shifts by a negative number.
2155 */ 2242 */
2156 mark_reg_unknown(env, regs, insn->dst_reg); 2243 mark_reg_unknown(env, regs, insn->dst_reg);
2157 break; 2244 break;
@@ -2177,27 +2264,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2177 __update_reg_bounds(dst_reg); 2264 __update_reg_bounds(dst_reg);
2178 break; 2265 break;
2179 case BPF_RSH: 2266 case BPF_RSH:
2180 if (umax_val > 63) { 2267 if (umax_val >= insn_bitness) {
2181 /* Shifts greater than 63 are undefined. This includes 2268 /* Shifts greater than 31 or 63 are undefined.
2182 * shifts by a negative number. 2269 * This includes shifts by a negative number.
2183 */ 2270 */
2184 mark_reg_unknown(env, regs, insn->dst_reg); 2271 mark_reg_unknown(env, regs, insn->dst_reg);
2185 break; 2272 break;
2186 } 2273 }
2187 /* BPF_RSH is an unsigned shift, so make the appropriate casts */ 2274 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
2188 if (dst_reg->smin_value < 0) { 2275 * be negative, then either:
2189 if (umin_val) { 2276 * 1) src_reg might be zero, so the sign bit of the result is
2190 /* Sign bit will be cleared */ 2277 * unknown, so we lose our signed bounds
2191 dst_reg->smin_value = 0; 2278 * 2) it's known negative, thus the unsigned bounds capture the
2192 } else { 2279 * signed bounds
2193 /* Lost sign bit information */ 2280 * 3) the signed bounds cross zero, so they tell us nothing
2194 dst_reg->smin_value = S64_MIN; 2281 * about the result
2195 dst_reg->smax_value = S64_MAX; 2282 * If the value in dst_reg is known nonnegative, then again the
2196 } 2283 * unsigned bounts capture the signed bounds.
2197 } else { 2284 * Thus, in all cases it suffices to blow away our signed bounds
2198 dst_reg->smin_value = 2285 * and rely on inferring new ones from the unsigned bounds and
2199 (u64)(dst_reg->smin_value) >> umax_val; 2286 * var_off of the result.
2200 } 2287 */
2288 dst_reg->smin_value = S64_MIN;
2289 dst_reg->smax_value = S64_MAX;
2201 if (src_known) 2290 if (src_known)
2202 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2291 dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2203 umin_val); 2292 umin_val);
@@ -2213,6 +2302,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2213 break; 2302 break;
2214 } 2303 }
2215 2304
2305 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2306 /* 32-bit ALU ops are (32,32)->32 */
2307 coerce_reg_to_size(dst_reg, 4);
2308 coerce_reg_to_size(&src_reg, 4);
2309 }
2310
2216 __reg_deduce_bounds(dst_reg); 2311 __reg_deduce_bounds(dst_reg);
2217 __reg_bound_offset(dst_reg); 2312 __reg_bound_offset(dst_reg);
2218 return 0; 2313 return 0;
@@ -2227,7 +2322,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2227 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; 2322 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2228 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2323 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2229 u8 opcode = BPF_OP(insn->code); 2324 u8 opcode = BPF_OP(insn->code);
2230 int rc;
2231 2325
2232 dst_reg = &regs[insn->dst_reg]; 2326 dst_reg = &regs[insn->dst_reg];
2233 src_reg = NULL; 2327 src_reg = NULL;
@@ -2238,43 +2332,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2238 if (src_reg->type != SCALAR_VALUE) { 2332 if (src_reg->type != SCALAR_VALUE) {
2239 if (dst_reg->type != SCALAR_VALUE) { 2333 if (dst_reg->type != SCALAR_VALUE) {
2240 /* Combining two pointers by any ALU op yields 2334 /* Combining two pointers by any ALU op yields
2241 * an arbitrary scalar. 2335 * an arbitrary scalar. Disallow all math except
2336 * pointer subtraction
2242 */ 2337 */
2243 if (!env->allow_ptr_leaks) { 2338 if (opcode == BPF_SUB){
2244 verbose(env, "R%d pointer %s pointer prohibited\n", 2339 mark_reg_unknown(env, regs, insn->dst_reg);
2245 insn->dst_reg, 2340 return 0;
2246 bpf_alu_string[opcode >> 4]);
2247 return -EACCES;
2248 } 2341 }
2249 mark_reg_unknown(env, regs, insn->dst_reg); 2342 verbose(env, "R%d pointer %s pointer prohibited\n",
2250 return 0; 2343 insn->dst_reg,
2344 bpf_alu_string[opcode >> 4]);
2345 return -EACCES;
2251 } else { 2346 } else {
2252 /* scalar += pointer 2347 /* scalar += pointer
2253 * This is legal, but we have to reverse our 2348 * This is legal, but we have to reverse our
2254 * src/dest handling in computing the range 2349 * src/dest handling in computing the range
2255 */ 2350 */
2256 rc = adjust_ptr_min_max_vals(env, insn, 2351 return adjust_ptr_min_max_vals(env, insn,
2257 src_reg, dst_reg); 2352 src_reg, dst_reg);
2258 if (rc == -EACCES && env->allow_ptr_leaks) {
2259 /* scalar += unknown scalar */
2260 __mark_reg_unknown(&off_reg);
2261 return adjust_scalar_min_max_vals(
2262 env, insn,
2263 dst_reg, off_reg);
2264 }
2265 return rc;
2266 } 2353 }
2267 } else if (ptr_reg) { 2354 } else if (ptr_reg) {
2268 /* pointer += scalar */ 2355 /* pointer += scalar */
2269 rc = adjust_ptr_min_max_vals(env, insn, 2356 return adjust_ptr_min_max_vals(env, insn,
2270 dst_reg, src_reg); 2357 dst_reg, src_reg);
2271 if (rc == -EACCES && env->allow_ptr_leaks) {
2272 /* unknown scalar += scalar */
2273 __mark_reg_unknown(dst_reg);
2274 return adjust_scalar_min_max_vals(
2275 env, insn, dst_reg, *src_reg);
2276 }
2277 return rc;
2278 } 2358 }
2279 } else { 2359 } else {
2280 /* Pretend the src is a reg with a known value, since we only 2360 /* Pretend the src is a reg with a known value, since we only
@@ -2283,17 +2363,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2283 off_reg.type = SCALAR_VALUE; 2363 off_reg.type = SCALAR_VALUE;
2284 __mark_reg_known(&off_reg, insn->imm); 2364 __mark_reg_known(&off_reg, insn->imm);
2285 src_reg = &off_reg; 2365 src_reg = &off_reg;
2286 if (ptr_reg) { /* pointer += K */ 2366 if (ptr_reg) /* pointer += K */
2287 rc = adjust_ptr_min_max_vals(env, insn, 2367 return adjust_ptr_min_max_vals(env, insn,
2288 ptr_reg, src_reg); 2368 ptr_reg, src_reg);
2289 if (rc == -EACCES && env->allow_ptr_leaks) {
2290 /* unknown scalar += K */
2291 __mark_reg_unknown(dst_reg);
2292 return adjust_scalar_min_max_vals(
2293 env, insn, dst_reg, off_reg);
2294 }
2295 return rc;
2296 }
2297 } 2369 }
2298 2370
2299 /* Got here implies adding two SCALAR_VALUEs */ 2371 /* Got here implies adding two SCALAR_VALUEs */
@@ -2390,17 +2462,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2390 return -EACCES; 2462 return -EACCES;
2391 } 2463 }
2392 mark_reg_unknown(env, regs, insn->dst_reg); 2464 mark_reg_unknown(env, regs, insn->dst_reg);
2393 /* high 32 bits are known zero. */ 2465 coerce_reg_to_size(&regs[insn->dst_reg], 4);
2394 regs[insn->dst_reg].var_off = tnum_cast(
2395 regs[insn->dst_reg].var_off, 4);
2396 __update_reg_bounds(&regs[insn->dst_reg]);
2397 } 2466 }
2398 } else { 2467 } else {
2399 /* case: R = imm 2468 /* case: R = imm
2400 * remember the value we stored into this reg 2469 * remember the value we stored into this reg
2401 */ 2470 */
2402 regs[insn->dst_reg].type = SCALAR_VALUE; 2471 regs[insn->dst_reg].type = SCALAR_VALUE;
2403 __mark_reg_known(regs + insn->dst_reg, insn->imm); 2472 if (BPF_CLASS(insn->code) == BPF_ALU64) {
2473 __mark_reg_known(regs + insn->dst_reg,
2474 insn->imm);
2475 } else {
2476 __mark_reg_known(regs + insn->dst_reg,
2477 (u32)insn->imm);
2478 }
2404 } 2479 }
2405 2480
2406 } else if (opcode > BPF_END) { 2481 } else if (opcode > BPF_END) {
@@ -2436,6 +2511,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2436 return -EINVAL; 2511 return -EINVAL;
2437 } 2512 }
2438 2513
2514 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
2515 verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
2516 return -EINVAL;
2517 }
2518
2439 if ((opcode == BPF_LSH || opcode == BPF_RSH || 2519 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2440 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 2520 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2441 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 2521 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -3431,15 +3511,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3431 return range_within(rold, rcur) && 3511 return range_within(rold, rcur) &&
3432 tnum_in(rold->var_off, rcur->var_off); 3512 tnum_in(rold->var_off, rcur->var_off);
3433 } else { 3513 } else {
3434 /* if we knew anything about the old value, we're not 3514 /* We're trying to use a pointer in place of a scalar.
3435 * equal, because we can't know anything about the 3515 * Even if the scalar was unbounded, this could lead to
3436 * scalar value of the pointer in the new value. 3516 * pointer leaks because scalars are allowed to leak
3517 * while pointers are not. We could make this safe in
3518 * special cases if root is calling us, but it's
3519 * probably not worth the hassle.
3437 */ 3520 */
3438 return rold->umin_value == 0 && 3521 return false;
3439 rold->umax_value == U64_MAX &&
3440 rold->smin_value == S64_MIN &&
3441 rold->smax_value == S64_MAX &&
3442 tnum_is_unknown(rold->var_off);
3443 } 3522 }
3444 case PTR_TO_MAP_VALUE: 3523 case PTR_TO_MAP_VALUE:
3445 /* If the new min/max/var_off satisfy the old ones and 3524 /* If the new min/max/var_off satisfy the old ones and
@@ -3932,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
3932 if (err) 4011 if (err)
3933 return err; 4012 return err;
3934 4013
4014 if (is_ctx_reg(env, insn->dst_reg)) {
4015 verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4016 insn->dst_reg);
4017 return -EACCES;
4018 }
4019
3935 /* check that memory (dst_reg + off) is writeable */ 4020 /* check that memory (dst_reg + off) is writeable */
3936 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4021 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3937 BPF_SIZE(insn->code), BPF_WRITE, 4022 BPF_SIZE(insn->code), BPF_WRITE,
@@ -4384,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4384 int i, cnt, delta = 0; 4469 int i, cnt, delta = 0;
4385 4470
4386 for (i = 0; i < insn_cnt; i++, insn++) { 4471 for (i = 0; i < insn_cnt; i++, insn++) {
4472 if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
4473 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
4474 /* due to JIT bugs clear upper 32-bits of src register
4475 * before div/mod operation
4476 */
4477 insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
4478 insn_buf[1] = *insn;
4479 cnt = 2;
4480 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4481 if (!new_prog)
4482 return -ENOMEM;
4483
4484 delta += cnt - 1;
4485 env->prog = prog = new_prog;
4486 insn = new_prog->insnsi + i + delta;
4487 continue;
4488 }
4489
4387 if (insn->code != (BPF_JMP | BPF_CALL)) 4490 if (insn->code != (BPF_JMP | BPF_CALL))
4388 continue; 4491 continue;
4389 4492
@@ -4407,6 +4510,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4407 */ 4510 */
4408 insn->imm = 0; 4511 insn->imm = 0;
4409 insn->code = BPF_JMP | BPF_TAIL_CALL; 4512 insn->code = BPF_JMP | BPF_TAIL_CALL;
4513
4514 /* instead of changing every JIT dealing with tail_call
4515 * emit two extra insns:
4516 * if (index >= max_entries) goto out;
4517 * index &= array->index_mask;
4518 * to avoid out-of-bounds cpu speculation
4519 */
4520 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4521 if (map_ptr == BPF_MAP_PTR_POISON) {
4522 verbose(env, "tail_call abusing map_ptr\n");
4523 return -EINVAL;
4524 }
4525 if (!map_ptr->unpriv_array)
4526 continue;
4527 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4528 map_ptr->max_entries, 2);
4529 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4530 container_of(map_ptr,
4531 struct bpf_array,
4532 map)->index_mask);
4533 insn_buf[2] = *insn;
4534 cnt = 3;
4535 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4536 if (!new_prog)
4537 return -ENOMEM;
4538
4539 delta += cnt - 1;
4540 env->prog = prog = new_prog;
4541 insn = new_prog->insnsi + i + delta;
4410 continue; 4542 continue;
4411 } 4543 }
4412 4544
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 024085daab1a..a2c05d2476ac 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
123 */ 123 */
124 do { 124 do {
125 css_task_iter_start(&from->self, 0, &it); 125 css_task_iter_start(&from->self, 0, &it);
126 task = css_task_iter_next(&it); 126
127 do {
128 task = css_task_iter_next(&it);
129 } while (task && (task->flags & PF_EXITING));
130
127 if (task) 131 if (task)
128 get_task_struct(task); 132 get_task_struct(task);
129 css_task_iter_end(&it); 133 css_task_iter_end(&it);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0b1ffe147f24..7e4c44538119 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1398 cft->name); 1398 cft->name);
1399 else 1399 else
1400 strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1400 strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1401 return buf; 1401 return buf;
1402} 1402}
1403 1403
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
1864 1864
1865 root->flags = opts->flags; 1865 root->flags = opts->flags;
1866 if (opts->release_agent) 1866 if (opts->release_agent)
1867 strcpy(root->release_agent_path, opts->release_agent); 1867 strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
1868 if (opts->name) 1868 if (opts->name)
1869 strcpy(root->name, opts->name); 1869 strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
1870 if (opts->cpuset_clone_children) 1870 if (opts->cpuset_clone_children)
1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1872} 1872}
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
4125 4125
4126static void css_task_iter_advance(struct css_task_iter *it) 4126static void css_task_iter_advance(struct css_task_iter *it)
4127{ 4127{
4128 struct list_head *l = it->task_pos; 4128 struct list_head *next;
4129 4129
4130 lockdep_assert_held(&css_set_lock); 4130 lockdep_assert_held(&css_set_lock);
4131 WARN_ON_ONCE(!l);
4132
4133repeat: 4131repeat:
4134 /* 4132 /*
4135 * Advance iterator to find next entry. cset->tasks is consumed 4133 * Advance iterator to find next entry. cset->tasks is consumed
4136 * first and then ->mg_tasks. After ->mg_tasks, we move onto the 4134 * first and then ->mg_tasks. After ->mg_tasks, we move onto the
4137 * next cset. 4135 * next cset.
4138 */ 4136 */
4139 l = l->next; 4137 next = it->task_pos->next;
4140 4138
4141 if (l == it->tasks_head) 4139 if (next == it->tasks_head)
4142 l = it->mg_tasks_head->next; 4140 next = it->mg_tasks_head->next;
4143 4141
4144 if (l == it->mg_tasks_head) 4142 if (next == it->mg_tasks_head)
4145 css_task_iter_advance_css_set(it); 4143 css_task_iter_advance_css_set(it);
4146 else 4144 else
4147 it->task_pos = l; 4145 it->task_pos = next;
4148 4146
4149 /* if PROCS, skip over tasks which aren't group leaders */ 4147 /* if PROCS, skip over tasks which aren't group leaders */
4150 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && 4148 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4449,6 +4447,7 @@ static struct cftype cgroup_base_files[] = {
4449 }, 4447 },
4450 { 4448 {
4451 .name = "cgroup.threads", 4449 .name = "cgroup.threads",
4450 .flags = CFTYPE_NS_DELEGATABLE,
4452 .release = cgroup_procs_release, 4451 .release = cgroup_procs_release,
4453 .seq_start = cgroup_threads_start, 4452 .seq_start = cgroup_threads_start,
4454 .seq_next = cgroup_procs_next, 4453 .seq_next = cgroup_procs_next,
diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c
index 5f780d8f6a9d..9caeda610249 100644
--- a/kernel/cgroup/debug.c
+++ b/kernel/cgroup/debug.c
@@ -50,7 +50,7 @@ static int current_css_set_read(struct seq_file *seq, void *v)
50 50
51 spin_lock_irq(&css_set_lock); 51 spin_lock_irq(&css_set_lock);
52 rcu_read_lock(); 52 rcu_read_lock();
53 cset = rcu_dereference(current->cgroups); 53 cset = task_css_set(current);
54 refcnt = refcount_read(&cset->refcount); 54 refcnt = refcount_read(&cset->refcount);
55 seq_printf(seq, "css_set %pK %d", cset, refcnt); 55 seq_printf(seq, "css_set %pK %d", cset, refcnt);
56 if (refcnt > cset->nr_tasks) 56 if (refcnt > cset->nr_tasks)
@@ -96,7 +96,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
96 96
97 spin_lock_irq(&css_set_lock); 97 spin_lock_irq(&css_set_lock);
98 rcu_read_lock(); 98 rcu_read_lock();
99 cset = rcu_dereference(current->cgroups); 99 cset = task_css_set(current);
100 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 100 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
101 struct cgroup *c = link->cgrp; 101 struct cgroup *c = link->cgrp;
102 102
diff --git a/kernel/cgroup/stat.c b/kernel/cgroup/stat.c
index 133b465691d6..1e111dd455c4 100644
--- a/kernel/cgroup/stat.c
+++ b/kernel/cgroup/stat.c
@@ -296,8 +296,12 @@ int cgroup_stat_init(struct cgroup *cgrp)
296 } 296 }
297 297
298 /* ->updated_children list is self terminated */ 298 /* ->updated_children list is self terminated */
299 for_each_possible_cpu(cpu) 299 for_each_possible_cpu(cpu) {
300 cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp; 300 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
301
302 cstat->updated_children = cgrp;
303 u64_stats_init(&cstat->sync);
304 }
301 305
302 prev_cputime_init(&cgrp->stat.prev_cputime); 306 prev_cputime_init(&cgrp->stat.prev_cputime);
303 307
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 04892a82f6ac..53f7dc65f9a3 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -80,19 +80,19 @@ static struct lockdep_map cpuhp_state_down_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); 80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
81 81
82 82
83static void inline cpuhp_lock_acquire(bool bringup) 83static inline void cpuhp_lock_acquire(bool bringup)
84{ 84{
85 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 85 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
86} 86}
87 87
88static void inline cpuhp_lock_release(bool bringup) 88static inline void cpuhp_lock_release(bool bringup)
89{ 89{
90 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); 90 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
91} 91}
92#else 92#else
93 93
94static void inline cpuhp_lock_acquire(bool bringup) { } 94static inline void cpuhp_lock_acquire(bool bringup) { }
95static void inline cpuhp_lock_release(bool bringup) { } 95static inline void cpuhp_lock_release(bool bringup) { }
96 96
97#endif 97#endif
98 98
@@ -780,8 +780,8 @@ static int takedown_cpu(unsigned int cpu)
780 BUG_ON(cpu_online(cpu)); 780 BUG_ON(cpu_online(cpu));
781 781
782 /* 782 /*
783 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all 783 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
784 * runnable tasks from the cpu, there's only the idle task left now 784 * all runnable tasks from the CPU, there's only the idle task left now
785 * that the migration thread is done doing the stop_machine thing. 785 * that the migration thread is done doing the stop_machine thing.
786 * 786 *
787 * Wait for the stop thread to go away. 787 * Wait for the stop thread to go away.
@@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1277 * before blk_mq_queue_reinit_notify() from notify_dead(), 1277 * before blk_mq_queue_reinit_notify() from notify_dead(),
1278 * otherwise a RCU stall occurs. 1278 * otherwise a RCU stall occurs.
1279 */ 1279 */
1280 [CPUHP_TIMERS_DEAD] = { 1280 [CPUHP_TIMERS_PREPARE] = {
1281 .name = "timers:dead", 1281 .name = "timers:dead",
1282 .startup.single = NULL, 1282 .startup.single = timers_prepare_cpu,
1283 .teardown.single = timers_dead_cpu, 1283 .teardown.single = timers_dead_cpu,
1284 }, 1284 },
1285 /* Kicks the plugged cpu into life */ 1285 /* Kicks the plugged cpu into life */
@@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1289 .teardown.single = NULL, 1289 .teardown.single = NULL,
1290 .cant_stop = true, 1290 .cant_stop = true,
1291 }, 1291 },
1292 [CPUHP_AP_SMPCFD_DYING] = {
1293 .name = "smpcfd:dying",
1294 .startup.single = NULL,
1295 .teardown.single = smpcfd_dying_cpu,
1296 },
1297 /* 1292 /*
1298 * Handled on controll processor until the plugged processor manages 1293 * Handled on controll processor until the plugged processor manages
1299 * this itself. 1294 * this itself.
@@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1335 .startup.single = NULL, 1330 .startup.single = NULL,
1336 .teardown.single = rcutree_dying_cpu, 1331 .teardown.single = rcutree_dying_cpu,
1337 }, 1332 },
1333 [CPUHP_AP_SMPCFD_DYING] = {
1334 .name = "smpcfd:dying",
1335 .startup.single = NULL,
1336 .teardown.single = smpcfd_dying_cpu,
1337 },
1338 /* Entry state on starting. Interrupts enabled from here on. Transient 1338 /* Entry state on starting. Interrupts enabled from here on. Transient
1339 * state for synchronsization */ 1339 * state for synchronsization */
1340 [CPUHP_AP_ONLINE] = { 1340 [CPUHP_AP_ONLINE] = {
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b3663896278e..4f63597c824d 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
410 VMCOREINFO_SYMBOL(contig_page_data); 410 VMCOREINFO_SYMBOL(contig_page_data);
411#endif 411#endif
412#ifdef CONFIG_SPARSEMEM 412#ifdef CONFIG_SPARSEMEM
413 VMCOREINFO_SYMBOL(mem_section); 413 VMCOREINFO_SYMBOL_ARRAY(mem_section);
414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); 414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
415 VMCOREINFO_STRUCT_SIZE(mem_section); 415 VMCOREINFO_STRUCT_SIZE(mem_section);
416 VMCOREINFO_OFFSET(mem_section, section_mem_map); 416 VMCOREINFO_OFFSET(mem_section, section_mem_map);
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index e74be38245ad..ed5d34925ad0 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -350,7 +350,7 @@ poll_again:
350 } 350 }
351 kdb_printf("\n"); 351 kdb_printf("\n");
352 for (i = 0; i < count; i++) { 352 for (i = 0; i < count; i++) {
353 if (kallsyms_symbol_next(p_tmp, i) < 0) 353 if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
354 break; 354 break;
355 kdb_printf("%s ", p_tmp); 355 kdb_printf("%s ", p_tmp);
356 *(p_tmp + len) = '\0'; 356 *(p_tmp + len) = '\0';
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 4a1c33416b6a..e2764d767f18 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
51 * Finish delay accounting for a statistic using its timestamps (@start), 51 * Finish delay accounting for a statistic using its timestamps (@start),
52 * accumalator (@total) and @count 52 * accumalator (@total) and @count
53 */ 53 */
54static void delayacct_end(u64 *start, u64 *total, u32 *count) 54static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
55{ 55{
56 s64 ns = ktime_get_ns() - *start; 56 s64 ns = ktime_get_ns() - *start;
57 unsigned long flags; 57 unsigned long flags;
58 58
59 if (ns > 0) { 59 if (ns > 0) {
60 spin_lock_irqsave(&current->delays->lock, flags); 60 spin_lock_irqsave(lock, flags);
61 *total += ns; 61 *total += ns;
62 (*count)++; 62 (*count)++;
63 spin_unlock_irqrestore(&current->delays->lock, flags); 63 spin_unlock_irqrestore(lock, flags);
64 } 64 }
65} 65}
66 66
@@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
69 current->delays->blkio_start = ktime_get_ns(); 69 current->delays->blkio_start = ktime_get_ns();
70} 70}
71 71
72void __delayacct_blkio_end(void) 72/*
73 * We cannot rely on the `current` macro, as we haven't yet switched back to
74 * the process being woken.
75 */
76void __delayacct_blkio_end(struct task_struct *p)
73{ 77{
74 if (current->delays->flags & DELAYACCT_PF_SWAPIN) 78 struct task_delay_info *delays = p->delays;
75 /* Swapin block I/O */ 79 u64 *total;
76 delayacct_end(&current->delays->blkio_start, 80 u32 *count;
77 &current->delays->swapin_delay, 81
78 &current->delays->swapin_count); 82 if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
79 else /* Other block I/O */ 83 total = &delays->swapin_delay;
80 delayacct_end(&current->delays->blkio_start, 84 count = &delays->swapin_count;
81 &current->delays->blkio_delay, 85 } else {
82 &current->delays->blkio_count); 86 total = &delays->blkio_delay;
87 count = &delays->blkio_count;
88 }
89
90 delayacct_end(&delays->lock, &delays->blkio_start, total, count);
83} 91}
84 92
85int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) 93int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
153 161
154void __delayacct_freepages_end(void) 162void __delayacct_freepages_end(void)
155{ 163{
156 delayacct_end(&current->delays->freepages_start, 164 delayacct_end(
157 &current->delays->freepages_delay, 165 &current->delays->lock,
158 &current->delays->freepages_count); 166 &current->delays->freepages_start,
167 &current->delays->freepages_delay,
168 &current->delays->freepages_count);
159} 169}
160 170
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 16beab4767e1..5d8f4031f8d5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1231,6 +1231,10 @@ static void put_ctx(struct perf_event_context *ctx)
1231 * perf_event_context::lock 1231 * perf_event_context::lock
1232 * perf_event::mmap_mutex 1232 * perf_event::mmap_mutex
1233 * mmap_sem 1233 * mmap_sem
1234 *
1235 * cpu_hotplug_lock
1236 * pmus_lock
1237 * cpuctx->mutex / perf_event_context::mutex
1234 */ 1238 */
1235static struct perf_event_context * 1239static struct perf_event_context *
1236perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 1240perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
@@ -4196,6 +4200,7 @@ int perf_event_release_kernel(struct perf_event *event)
4196{ 4200{
4197 struct perf_event_context *ctx = event->ctx; 4201 struct perf_event_context *ctx = event->ctx;
4198 struct perf_event *child, *tmp; 4202 struct perf_event *child, *tmp;
4203 LIST_HEAD(free_list);
4199 4204
4200 /* 4205 /*
4201 * If we got here through err_file: fput(event_file); we will not have 4206 * If we got here through err_file: fput(event_file); we will not have
@@ -4268,8 +4273,7 @@ again:
4268 struct perf_event, child_list); 4273 struct perf_event, child_list);
4269 if (tmp == child) { 4274 if (tmp == child) {
4270 perf_remove_from_context(child, DETACH_GROUP); 4275 perf_remove_from_context(child, DETACH_GROUP);
4271 list_del(&child->child_list); 4276 list_move(&child->child_list, &free_list);
4272 free_event(child);
4273 /* 4277 /*
4274 * This matches the refcount bump in inherit_event(); 4278 * This matches the refcount bump in inherit_event();
4275 * this can't be the last reference. 4279 * this can't be the last reference.
@@ -4284,6 +4288,11 @@ again:
4284 } 4288 }
4285 mutex_unlock(&event->child_mutex); 4289 mutex_unlock(&event->child_mutex);
4286 4290
4291 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
4292 list_del(&child->child_list);
4293 free_event(child);
4294 }
4295
4287no_ctx: 4296no_ctx:
4288 put_event(event); /* Must be the 'last' reference */ 4297 put_event(event); /* Must be the 'last' reference */
4289 return 0; 4298 return 0;
@@ -6639,6 +6648,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6639 struct perf_namespaces_event *namespaces_event = data; 6648 struct perf_namespaces_event *namespaces_event = data;
6640 struct perf_output_handle handle; 6649 struct perf_output_handle handle;
6641 struct perf_sample_data sample; 6650 struct perf_sample_data sample;
6651 u16 header_size = namespaces_event->event_id.header.size;
6642 int ret; 6652 int ret;
6643 6653
6644 if (!perf_event_namespaces_match(event)) 6654 if (!perf_event_namespaces_match(event))
@@ -6649,7 +6659,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6649 ret = perf_output_begin(&handle, event, 6659 ret = perf_output_begin(&handle, event,
6650 namespaces_event->event_id.header.size); 6660 namespaces_event->event_id.header.size);
6651 if (ret) 6661 if (ret)
6652 return; 6662 goto out;
6653 6663
6654 namespaces_event->event_id.pid = perf_event_pid(event, 6664 namespaces_event->event_id.pid = perf_event_pid(event,
6655 namespaces_event->task); 6665 namespaces_event->task);
@@ -6661,6 +6671,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
6661 perf_event__output_id_sample(event, &handle, &sample); 6671 perf_event__output_id_sample(event, &handle, &sample);
6662 6672
6663 perf_output_end(&handle); 6673 perf_output_end(&handle);
6674out:
6675 namespaces_event->event_id.header.size = header_size;
6664} 6676}
6665 6677
6666static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, 6678static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
@@ -7987,11 +7999,11 @@ static void bpf_overflow_handler(struct perf_event *event,
7987{ 7999{
7988 struct bpf_perf_event_data_kern ctx = { 8000 struct bpf_perf_event_data_kern ctx = {
7989 .data = data, 8001 .data = data,
7990 .regs = regs,
7991 .event = event, 8002 .event = event,
7992 }; 8003 };
7993 int ret = 0; 8004 int ret = 0;
7994 8005
8006 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
7995 preempt_disable(); 8007 preempt_disable();
7996 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) 8008 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
7997 goto out; 8009 goto out;
@@ -8513,6 +8525,29 @@ fail_clear_files:
8513 return ret; 8525 return ret;
8514} 8526}
8515 8527
8528static int
8529perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
8530{
8531 struct perf_event_context *ctx = event->ctx;
8532 int ret;
8533
8534 /*
8535 * Beware, here be dragons!!
8536 *
8537 * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
8538 * stuff does not actually need it. So temporarily drop ctx->mutex. As per
8539 * perf_event_ctx_lock() we already have a reference on ctx.
8540 *
8541 * This can result in event getting moved to a different ctx, but that
8542 * does not affect the tracepoint state.
8543 */
8544 mutex_unlock(&ctx->mutex);
8545 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
8546 mutex_lock(&ctx->mutex);
8547
8548 return ret;
8549}
8550
8516static int perf_event_set_filter(struct perf_event *event, void __user *arg) 8551static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8517{ 8552{
8518 char *filter_str; 8553 char *filter_str;
@@ -8529,8 +8564,7 @@ static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8529 8564
8530 if (IS_ENABLED(CONFIG_EVENT_TRACING) && 8565 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8531 event->attr.type == PERF_TYPE_TRACEPOINT) 8566 event->attr.type == PERF_TYPE_TRACEPOINT)
8532 ret = ftrace_profile_set_filter(event, event->attr.config, 8567 ret = perf_tracepoint_set_filter(event, filter_str);
8533 filter_str);
8534 else if (has_addr_filter(event)) 8568 else if (has_addr_filter(event))
8535 ret = perf_event_set_addr_filter(event, filter_str); 8569 ret = perf_event_set_addr_filter(event, filter_str);
8536 8570
@@ -9165,7 +9199,13 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
9165 if (!try_module_get(pmu->module)) 9199 if (!try_module_get(pmu->module))
9166 return -ENODEV; 9200 return -ENODEV;
9167 9201
9168 if (event->group_leader != event) { 9202 /*
9203 * A number of pmu->event_init() methods iterate the sibling_list to,
9204 * for example, validate if the group fits on the PMU. Therefore,
9205 * if this is a sibling event, acquire the ctx->mutex to protect
9206 * the sibling_list.
9207 */
9208 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
9169 /* 9209 /*
9170 * This ctx->mutex can nest when we're called through 9210 * This ctx->mutex can nest when we're called through
9171 * inheritance. See the perf_event_ctx_lock_nested() comment. 9211 * inheritance. See the perf_event_ctx_lock_nested() comment.
diff --git a/kernel/exit.c b/kernel/exit.c
index 6b4298a41167..995453d9fb55 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1755,3 +1755,12 @@ Efault:
1755 return -EFAULT; 1755 return -EFAULT;
1756} 1756}
1757#endif 1757#endif
1758
1759__weak void abort(void)
1760{
1761 BUG();
1762
1763 /* if that doesn't kill us, halt */
1764 panic("Oops failed to kill thread");
1765}
1766EXPORT_SYMBOL(abort);
diff --git a/kernel/fork.c b/kernel/fork.c
index 432eadf6b58c..2295fc69717f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
721 goto out; 721 goto out;
722 } 722 }
723 /* a new mm has just been created */ 723 /* a new mm has just been created */
724 arch_dup_mmap(oldmm, mm); 724 retval = arch_dup_mmap(oldmm, mm);
725 retval = 0;
726out: 725out:
727 up_write(&mm->mmap_sem); 726 up_write(&mm->mmap_sem);
728 flush_tlb_mm(oldmm); 727 flush_tlb_mm(oldmm);
diff --git a/kernel/futex.c b/kernel/futex.c
index 76ed5921117a..7f719d110908 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1582,8 +1582,8 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1582{ 1582{
1583 unsigned int op = (encoded_op & 0x70000000) >> 28; 1583 unsigned int op = (encoded_op & 0x70000000) >> 28;
1584 unsigned int cmp = (encoded_op & 0x0f000000) >> 24; 1584 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1585 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12); 1585 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1586 int cmparg = sign_extend32(encoded_op & 0x00000fff, 12); 1586 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1587 int oldval, ret; 1587 int oldval, ret;
1588 1588
1589 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { 1589 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
@@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1878 struct futex_q *this, *next; 1878 struct futex_q *this, *next;
1879 DEFINE_WAKE_Q(wake_q); 1879 DEFINE_WAKE_Q(wake_q);
1880 1880
1881 if (nr_wake < 0 || nr_requeue < 0)
1882 return -EINVAL;
1883
1881 /* 1884 /*
1882 * When PI not supported: return -ENOSYS if requeue_pi is true, 1885 * When PI not supported: return -ENOSYS if requeue_pi is true,
1883 * consequently the compiler knows requeue_pi is always false past 1886 * consequently the compiler knows requeue_pi is always false past
@@ -2294,34 +2297,33 @@ static void unqueue_me_pi(struct futex_q *q)
2294 spin_unlock(q->lock_ptr); 2297 spin_unlock(q->lock_ptr);
2295} 2298}
2296 2299
2297/*
2298 * Fixup the pi_state owner with the new owner.
2299 *
2300 * Must be called with hash bucket lock held and mm->sem held for non
2301 * private futexes.
2302 */
2303static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 2300static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2304 struct task_struct *newowner) 2301 struct task_struct *argowner)
2305{ 2302{
2306 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2307 struct futex_pi_state *pi_state = q->pi_state; 2303 struct futex_pi_state *pi_state = q->pi_state;
2308 u32 uval, uninitialized_var(curval), newval; 2304 u32 uval, uninitialized_var(curval), newval;
2309 struct task_struct *oldowner; 2305 struct task_struct *oldowner, *newowner;
2306 u32 newtid;
2310 int ret; 2307 int ret;
2311 2308
2309 lockdep_assert_held(q->lock_ptr);
2310
2312 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 2311 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2313 2312
2314 oldowner = pi_state->owner; 2313 oldowner = pi_state->owner;
2315 /* Owner died? */
2316 if (!pi_state->owner)
2317 newtid |= FUTEX_OWNER_DIED;
2318 2314
2319 /* 2315 /*
2320 * We are here either because we stole the rtmutex from the 2316 * We are here because either:
2321 * previous highest priority waiter or we are the highest priority 2317 *
2322 * waiter but have failed to get the rtmutex the first time. 2318 * - we stole the lock and pi_state->owner needs updating to reflect
2319 * that (@argowner == current),
2320 *
2321 * or:
2323 * 2322 *
2324 * We have to replace the newowner TID in the user space variable. 2323 * - someone stole our lock and we need to fix things to point to the
2324 * new owner (@argowner == NULL).
2325 *
2326 * Either way, we have to replace the TID in the user space variable.
2325 * This must be atomic as we have to preserve the owner died bit here. 2327 * This must be atomic as we have to preserve the owner died bit here.
2326 * 2328 *
2327 * Note: We write the user space value _before_ changing the pi_state 2329 * Note: We write the user space value _before_ changing the pi_state
@@ -2334,6 +2336,45 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2334 * in the PID check in lookup_pi_state. 2336 * in the PID check in lookup_pi_state.
2335 */ 2337 */
2336retry: 2338retry:
2339 if (!argowner) {
2340 if (oldowner != current) {
2341 /*
2342 * We raced against a concurrent self; things are
2343 * already fixed up. Nothing to do.
2344 */
2345 ret = 0;
2346 goto out_unlock;
2347 }
2348
2349 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2350 /* We got the lock after all, nothing to fix. */
2351 ret = 0;
2352 goto out_unlock;
2353 }
2354
2355 /*
2356 * Since we just failed the trylock; there must be an owner.
2357 */
2358 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2359 BUG_ON(!newowner);
2360 } else {
2361 WARN_ON_ONCE(argowner != current);
2362 if (oldowner == current) {
2363 /*
2364 * We raced against a concurrent self; things are
2365 * already fixed up. Nothing to do.
2366 */
2367 ret = 0;
2368 goto out_unlock;
2369 }
2370 newowner = argowner;
2371 }
2372
2373 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2374 /* Owner died? */
2375 if (!pi_state->owner)
2376 newtid |= FUTEX_OWNER_DIED;
2377
2337 if (get_futex_value_locked(&uval, uaddr)) 2378 if (get_futex_value_locked(&uval, uaddr))
2338 goto handle_fault; 2379 goto handle_fault;
2339 2380
@@ -2434,9 +2475,9 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2434 * Got the lock. We might not be the anticipated owner if we 2475 * Got the lock. We might not be the anticipated owner if we
2435 * did a lock-steal - fix up the PI-state in that case: 2476 * did a lock-steal - fix up the PI-state in that case:
2436 * 2477 *
2437 * We can safely read pi_state->owner without holding wait_lock 2478 * Speculative pi_state->owner read (we don't hold wait_lock);
2438 * because we now own the rt_mutex, only the owner will attempt 2479 * since we own the lock pi_state->owner == current is the
2439 * to change it. 2480 * stable state, anything else needs more attention.
2440 */ 2481 */
2441 if (q->pi_state->owner != current) 2482 if (q->pi_state->owner != current)
2442 ret = fixup_pi_state_owner(uaddr, q, current); 2483 ret = fixup_pi_state_owner(uaddr, q, current);
@@ -2444,6 +2485,19 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2444 } 2485 }
2445 2486
2446 /* 2487 /*
2488 * If we didn't get the lock; check if anybody stole it from us. In
2489 * that case, we need to fix up the uval to point to them instead of
2490 * us, otherwise bad things happen. [10]
2491 *
2492 * Another speculative read; pi_state->owner == current is unstable
2493 * but needs our attention.
2494 */
2495 if (q->pi_state->owner == current) {
2496 ret = fixup_pi_state_owner(uaddr, q, NULL);
2497 goto out;
2498 }
2499
2500 /*
2447 * Paranoia check. If we did not take the lock, then we should not be 2501 * Paranoia check. If we did not take the lock, then we should not be
2448 * the owner of the rt_mutex. 2502 * the owner of the rt_mutex.
2449 */ 2503 */
diff --git a/kernel/groups.c b/kernel/groups.c
index e357bc800111..daae2f2dc6d4 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b)
86 return gid_gt(a, b) - gid_lt(a, b); 86 return gid_gt(a, b) - gid_lt(a, b);
87} 87}
88 88
89static void groups_sort(struct group_info *group_info) 89void groups_sort(struct group_info *group_info)
90{ 90{
91 sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), 91 sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid),
92 gid_cmp, NULL); 92 gid_cmp, NULL);
93} 93}
94EXPORT_SYMBOL(groups_sort);
94 95
95/* a simple bsearch */ 96/* a simple bsearch */
96int groups_search(const struct group_info *group_info, kgid_t grp) 97int groups_search(const struct group_info *group_info, kgid_t grp)
@@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
122void set_groups(struct cred *new, struct group_info *group_info) 123void set_groups(struct cred *new, struct group_info *group_info)
123{ 124{
124 put_group_info(new->group_info); 125 put_group_info(new->group_info);
125 groups_sort(group_info);
126 get_group_info(group_info); 126 get_group_info(group_info);
127 new->group_info = group_info; 127 new->group_info = group_info;
128} 128}
@@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
206 return retval; 206 return retval;
207 } 207 }
208 208
209 groups_sort(group_info);
209 retval = set_current_groups(group_info); 210 retval = set_current_groups(group_info);
210 put_group_info(group_info); 211 put_group_info(group_info);
211 212
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 17f05ef8f575..e4d3819a91cc 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -12,6 +12,11 @@
12 12
13static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 13static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
14{ 14{
15 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
16
17 if (!__ratelimit(&ratelimit))
18 return;
19
15 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", 20 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
16 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 21 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
17 printk("->handle_irq(): %p, ", desc->handle_irq); 22 printk("->handle_irq(): %p, ", desc->handle_irq);
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 7f608ac39653..acfaaef8672a 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -113,6 +113,7 @@ static const struct irq_bit_descr irqdata_states[] = {
113 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), 113 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
114 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), 114 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
115 BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), 115 BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
116 BIT_MASK_DESCR(IRQD_CAN_RESERVE),
116 117
117 BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), 118 BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
118 119
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index c26c5bb6b491..508c03dfef25 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -364,10 +364,11 @@ irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
364EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); 364EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
365 365
366/* 366/*
367 * Separate lockdep class for interrupt chip which can nest irq_desc 367 * Separate lockdep classes for interrupt chip which can nest irq_desc
368 * lock. 368 * lock and request mutex.
369 */ 369 */
370static struct lock_class_key irq_nested_lock_class; 370static struct lock_class_key irq_nested_lock_class;
371static struct lock_class_key irq_nested_request_class;
371 372
372/* 373/*
373 * irq_map_generic_chip - Map a generic chip for an irq domain 374 * irq_map_generic_chip - Map a generic chip for an irq domain
@@ -409,7 +410,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
409 set_bit(idx, &gc->installed); 410 set_bit(idx, &gc->installed);
410 411
411 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK) 412 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
412 irq_set_lockdep_class(virq, &irq_nested_lock_class); 413 irq_set_lockdep_class(virq, &irq_nested_lock_class,
414 &irq_nested_request_class);
413 415
414 if (chip->irq_calc_mask) 416 if (chip->irq_calc_mask)
415 chip->irq_calc_mask(data); 417 chip->irq_calc_mask(data);
@@ -479,7 +481,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
479 continue; 481 continue;
480 482
481 if (flags & IRQ_GC_INIT_NESTED_LOCK) 483 if (flags & IRQ_GC_INIT_NESTED_LOCK)
482 irq_set_lockdep_class(i, &irq_nested_lock_class); 484 irq_set_lockdep_class(i, &irq_nested_lock_class,
485 &irq_nested_request_class);
483 486
484 if (!(flags & IRQ_GC_NO_MASK)) { 487 if (!(flags & IRQ_GC_NO_MASK)) {
485 struct irq_data *d = irq_get_irq_data(i); 488 struct irq_data *d = irq_get_irq_data(i);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 07d08ca701ec..ab19371eab9b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -440,7 +440,7 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
440#endif /* !CONFIG_GENERIC_PENDING_IRQ */ 440#endif /* !CONFIG_GENERIC_PENDING_IRQ */
441 441
442#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) 442#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
443static inline int irq_domain_activate_irq(struct irq_data *data, bool early) 443static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
444{ 444{
445 irqd_set_activated(data); 445 irqd_set_activated(data);
446 return 0; 446 return 0;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 4f4f60015e8a..62068ad46930 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1693,7 +1693,7 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1693 } 1693 }
1694} 1694}
1695 1695
1696static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) 1696static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
1697{ 1697{
1698 int ret = 0; 1698 int ret = 0;
1699 1699
@@ -1702,9 +1702,9 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
1702 1702
1703 if (irqd->parent_data) 1703 if (irqd->parent_data)
1704 ret = __irq_domain_activate_irq(irqd->parent_data, 1704 ret = __irq_domain_activate_irq(irqd->parent_data,
1705 early); 1705 reserve);
1706 if (!ret && domain->ops->activate) { 1706 if (!ret && domain->ops->activate) {
1707 ret = domain->ops->activate(domain, irqd, early); 1707 ret = domain->ops->activate(domain, irqd, reserve);
1708 /* Rollback in case of error */ 1708 /* Rollback in case of error */
1709 if (ret && irqd->parent_data) 1709 if (ret && irqd->parent_data)
1710 __irq_domain_deactivate_irq(irqd->parent_data); 1710 __irq_domain_deactivate_irq(irqd->parent_data);
@@ -1716,17 +1716,18 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
1716/** 1716/**
1717 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1717 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1718 * interrupt 1718 * interrupt
1719 * @irq_data: outermost irq_data associated with interrupt 1719 * @irq_data: Outermost irq_data associated with interrupt
1720 * @reserve: If set only reserve an interrupt vector instead of assigning one
1720 * 1721 *
1721 * This is the second step to call domain_ops->activate to program interrupt 1722 * This is the second step to call domain_ops->activate to program interrupt
1722 * controllers, so the interrupt could actually get delivered. 1723 * controllers, so the interrupt could actually get delivered.
1723 */ 1724 */
1724int irq_domain_activate_irq(struct irq_data *irq_data, bool early) 1725int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
1725{ 1726{
1726 int ret = 0; 1727 int ret = 0;
1727 1728
1728 if (!irqd_is_activated(irq_data)) 1729 if (!irqd_is_activated(irq_data))
1729 ret = __irq_domain_activate_irq(irq_data, early); 1730 ret = __irq_domain_activate_irq(irq_data, reserve);
1730 if (!ret) 1731 if (!ret)
1731 irqd_set_activated(irq_data); 1732 irqd_set_activated(irq_data);
1732 return ret; 1733 return ret;
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 7df2480005f8..5187dfe809ac 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
322 bool reserved, unsigned int *mapped_cpu) 322 bool reserved, unsigned int *mapped_cpu)
323{ 323{
324 unsigned int cpu; 324 unsigned int cpu, best_cpu, maxavl = 0;
325 struct cpumap *cm;
326 unsigned int bit;
325 327
328 best_cpu = UINT_MAX;
326 for_each_cpu(cpu, msk) { 329 for_each_cpu(cpu, msk) {
327 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); 330 cm = per_cpu_ptr(m->maps, cpu);
328 unsigned int bit;
329 331
330 if (!cm->online) 332 if (!cm->online || cm->available <= maxavl)
331 continue; 333 continue;
332 334
335 best_cpu = cpu;
336 maxavl = cm->available;
337 }
338
339 if (maxavl) {
340 cm = per_cpu_ptr(m->maps, best_cpu);
333 bit = matrix_alloc_area(m, cm, 1, false); 341 bit = matrix_alloc_area(m, cm, 1, false);
334 if (bit < m->alloc_end) { 342 if (bit < m->alloc_end) {
335 cm->allocated++; 343 cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
338 m->global_available--; 346 m->global_available--;
339 if (reserved) 347 if (reserved)
340 m->global_reserved--; 348 m->global_reserved--;
341 *mapped_cpu = cpu; 349 *mapped_cpu = best_cpu;
342 trace_irq_matrix_alloc(bit, cpu, m, cm); 350 trace_irq_matrix_alloc(bit, best_cpu, m, cm);
343 return bit; 351 return bit;
344 } 352 }
345 } 353 }
@@ -384,7 +392,9 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
384{ 392{
385 struct cpumap *cm = this_cpu_ptr(m->maps); 393 struct cpumap *cm = this_cpu_ptr(m->maps);
386 394
387 return (m->global_available - cpudown) ? cm->available : 0; 395 if (!cpudown)
396 return m->global_available;
397 return m->global_available - cm->available;
388} 398}
389 399
390/** 400/**
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index edb987b2c58d..2f3c4f5382cc 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
339 return ret; 339 return ret;
340} 340}
341 341
342/*
343 * Carefully check whether the device can use reservation mode. If
344 * reservation mode is enabled then the early activation will assign a
345 * dummy vector to the device. If the PCI/MSI device does not support
346 * masking of the entry then this can result in spurious interrupts when
347 * the device driver is not absolutely careful. But even then a malfunction
348 * of the hardware could result in a spurious interrupt on the dummy vector
349 * and render the device unusable. If the entry can be masked then the core
350 * logic will prevent the spurious interrupt and reservation mode can be
351 * used. For now reservation mode is restricted to PCI/MSI.
352 */
353static bool msi_check_reservation_mode(struct irq_domain *domain,
354 struct msi_domain_info *info,
355 struct device *dev)
356{
357 struct msi_desc *desc;
358
359 if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
360 return false;
361
362 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
363 return false;
364
365 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
366 return false;
367
368 /*
369 * Checking the first MSI descriptor is sufficient. MSIX supports
370 * masking and MSI does so when the maskbit is set.
371 */
372 desc = first_msi_entry(dev);
373 return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
374}
375
342/** 376/**
343 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 377 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
344 * @domain: The domain to allocate from 378 * @domain: The domain to allocate from
@@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
353{ 387{
354 struct msi_domain_info *info = domain->host_data; 388 struct msi_domain_info *info = domain->host_data;
355 struct msi_domain_ops *ops = info->ops; 389 struct msi_domain_ops *ops = info->ops;
356 msi_alloc_info_t arg; 390 struct irq_data *irq_data;
357 struct msi_desc *desc; 391 struct msi_desc *desc;
392 msi_alloc_info_t arg;
358 int i, ret, virq; 393 int i, ret, virq;
394 bool can_reserve;
359 395
360 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 396 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
361 if (ret) 397 if (ret)
@@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
385 if (ops->msi_finish) 421 if (ops->msi_finish)
386 ops->msi_finish(&arg, 0); 422 ops->msi_finish(&arg, 0);
387 423
424 can_reserve = msi_check_reservation_mode(domain, info, dev);
425
388 for_each_msi_entry(desc, dev) { 426 for_each_msi_entry(desc, dev) {
389 virq = desc->irq; 427 virq = desc->irq;
390 if (desc->nvec_used == 1) 428 if (desc->nvec_used == 1)
@@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
397 * the MSI entries before the PCI layer enables MSI in the 435 * the MSI entries before the PCI layer enables MSI in the
398 * card. Otherwise the card latches a random msi message. 436 * card. Otherwise the card latches a random msi message.
399 */ 437 */
400 if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { 438 if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
401 struct irq_data *irq_data; 439 continue;
402 440
441 irq_data = irq_domain_get_irq_data(domain, desc->irq);
442 if (!can_reserve)
443 irqd_clr_can_reserve(irq_data);
444 ret = irq_domain_activate_irq(irq_data, can_reserve);
445 if (ret)
446 goto cleanup;
447 }
448
449 /*
450 * If these interrupts use reservation mode, clear the activated bit
451 * so request_irq() will assign the final vector.
452 */
453 if (can_reserve) {
454 for_each_msi_entry(desc, dev) {
403 irq_data = irq_domain_get_irq_data(domain, desc->irq); 455 irq_data = irq_domain_get_irq_data(domain, desc->irq);
404 ret = irq_domain_activate_irq(irq_data, true); 456 irqd_clr_activated(irq_data);
405 if (ret)
406 goto cleanup;
407 if (info->flags & MSI_FLAG_MUST_REACTIVATE)
408 irqd_clr_activated(irq_data);
409 } 457 }
410 } 458 }
411 return 0; 459 return 0;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 8594d24e4adc..b4517095db6a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -79,7 +79,7 @@ int static_key_count(struct static_key *key)
79} 79}
80EXPORT_SYMBOL_GPL(static_key_count); 80EXPORT_SYMBOL_GPL(static_key_count);
81 81
82static void static_key_slow_inc_cpuslocked(struct static_key *key) 82void static_key_slow_inc_cpuslocked(struct static_key *key)
83{ 83{
84 int v, v1; 84 int v, v1;
85 85
@@ -180,7 +180,7 @@ void static_key_disable(struct static_key *key)
180} 180}
181EXPORT_SYMBOL_GPL(static_key_disable); 181EXPORT_SYMBOL_GPL(static_key_disable);
182 182
183static void static_key_slow_dec_cpuslocked(struct static_key *key, 183static void __static_key_slow_dec_cpuslocked(struct static_key *key,
184 unsigned long rate_limit, 184 unsigned long rate_limit,
185 struct delayed_work *work) 185 struct delayed_work *work)
186{ 186{
@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct static_key *key,
211 struct delayed_work *work) 211 struct delayed_work *work)
212{ 212{
213 cpus_read_lock(); 213 cpus_read_lock();
214 static_key_slow_dec_cpuslocked(key, rate_limit, work); 214 __static_key_slow_dec_cpuslocked(key, rate_limit, work);
215 cpus_read_unlock(); 215 cpus_read_unlock();
216} 216}
217 217
@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_key *key)
229} 229}
230EXPORT_SYMBOL_GPL(static_key_slow_dec); 230EXPORT_SYMBOL_GPL(static_key_slow_dec);
231 231
232void static_key_slow_dec_cpuslocked(struct static_key *key)
233{
234 STATIC_KEY_CHECK_USE(key);
235 __static_key_slow_dec_cpuslocked(key, 0, NULL);
236}
237
232void static_key_slow_dec_deferred(struct static_key_deferred *key) 238void static_key_slow_dec_deferred(struct static_key_deferred *key)
233{ 239{
234 STATIC_KEY_CHECK_USE(key); 240 STATIC_KEY_CHECK_USE(key);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 15f33faf4013..7594c033d98a 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
157} 157}
158EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 158EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159 159
160void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2) 160void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161{ 161{
162 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 162 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163} 163}
@@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
183} 183}
184EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 184EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185 185
186void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2) 186void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187{ 187{
188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
189 _RET_IP_); 189 _RET_IP_);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9776da8db180..521659044719 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -49,6 +49,7 @@
49#include <linux/gfp.h> 49#include <linux/gfp.h>
50#include <linux/random.h> 50#include <linux/random.h>
51#include <linux/jhash.h> 51#include <linux/jhash.h>
52#include <linux/nmi.h>
52 53
53#include <asm/sections.h> 54#include <asm/sections.h>
54 55
@@ -57,10 +58,6 @@
57#define CREATE_TRACE_POINTS 58#define CREATE_TRACE_POINTS
58#include <trace/events/lock.h> 59#include <trace/events/lock.h>
59 60
60#ifdef CONFIG_LOCKDEP_CROSSRELEASE
61#include <linux/slab.h>
62#endif
63
64#ifdef CONFIG_PROVE_LOCKING 61#ifdef CONFIG_PROVE_LOCKING
65int prove_locking = 1; 62int prove_locking = 1;
66module_param(prove_locking, int, 0644); 63module_param(prove_locking, int, 0644);
@@ -75,19 +72,6 @@ module_param(lock_stat, int, 0644);
75#define lock_stat 0 72#define lock_stat 0
76#endif 73#endif
77 74
78#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
79static int crossrelease_fullstack = 1;
80#else
81static int crossrelease_fullstack;
82#endif
83static int __init allow_crossrelease_fullstack(char *str)
84{
85 crossrelease_fullstack = 1;
86 return 0;
87}
88
89early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
90
91/* 75/*
92 * lockdep_lock: protects the lockdep graph, the hashes and the 76 * lockdep_lock: protects the lockdep graph, the hashes and the
93 * class/list/hash allocators. 77 * class/list/hash allocators.
@@ -740,18 +724,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
740 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 724 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
741} 725}
742 726
743#ifdef CONFIG_LOCKDEP_CROSSRELEASE
744static void cross_init(struct lockdep_map *lock, int cross);
745static int cross_lock(struct lockdep_map *lock);
746static int lock_acquire_crosslock(struct held_lock *hlock);
747static int lock_release_crosslock(struct lockdep_map *lock);
748#else
749static inline void cross_init(struct lockdep_map *lock, int cross) {}
750static inline int cross_lock(struct lockdep_map *lock) { return 0; }
751static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
752static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
753#endif
754
755/* 727/*
756 * Register a lock's class in the hash-table, if the class is not present 728 * Register a lock's class in the hash-table, if the class is not present
757 * yet. Otherwise we look it up. We cache the result in the lock object 729 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1151,41 +1123,22 @@ print_circular_lock_scenario(struct held_lock *src,
1151 printk(KERN_CONT "\n\n"); 1123 printk(KERN_CONT "\n\n");
1152 } 1124 }
1153 1125
1154 if (cross_lock(tgt->instance)) { 1126 printk(" Possible unsafe locking scenario:\n\n");
1155 printk(" Possible unsafe locking scenario by crosslock:\n\n"); 1127 printk(" CPU0 CPU1\n");
1156 printk(" CPU0 CPU1\n"); 1128 printk(" ---- ----\n");
1157 printk(" ---- ----\n"); 1129 printk(" lock(");
1158 printk(" lock("); 1130 __print_lock_name(target);
1159 __print_lock_name(parent); 1131 printk(KERN_CONT ");\n");
1160 printk(KERN_CONT ");\n"); 1132 printk(" lock(");
1161 printk(" lock("); 1133 __print_lock_name(parent);
1162 __print_lock_name(target); 1134 printk(KERN_CONT ");\n");
1163 printk(KERN_CONT ");\n"); 1135 printk(" lock(");
1164 printk(" lock("); 1136 __print_lock_name(target);
1165 __print_lock_name(source); 1137 printk(KERN_CONT ");\n");
1166 printk(KERN_CONT ");\n"); 1138 printk(" lock(");
1167 printk(" unlock("); 1139 __print_lock_name(source);
1168 __print_lock_name(target); 1140 printk(KERN_CONT ");\n");
1169 printk(KERN_CONT ");\n"); 1141 printk("\n *** DEADLOCK ***\n\n");
1170 printk("\n *** DEADLOCK ***\n\n");
1171 } else {
1172 printk(" Possible unsafe locking scenario:\n\n");
1173 printk(" CPU0 CPU1\n");
1174 printk(" ---- ----\n");
1175 printk(" lock(");
1176 __print_lock_name(target);
1177 printk(KERN_CONT ");\n");
1178 printk(" lock(");
1179 __print_lock_name(parent);
1180 printk(KERN_CONT ");\n");
1181 printk(" lock(");
1182 __print_lock_name(target);
1183 printk(KERN_CONT ");\n");
1184 printk(" lock(");
1185 __print_lock_name(source);
1186 printk(KERN_CONT ");\n");
1187 printk("\n *** DEADLOCK ***\n\n");
1188 }
1189} 1142}
1190 1143
1191/* 1144/*
@@ -1211,10 +1164,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1211 curr->comm, task_pid_nr(curr)); 1164 curr->comm, task_pid_nr(curr));
1212 print_lock(check_src); 1165 print_lock(check_src);
1213 1166
1214 if (cross_lock(check_tgt->instance)) 1167 pr_warn("\nbut task is already holding lock:\n");
1215 pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
1216 else
1217 pr_warn("\nbut task is already holding lock:\n");
1218 1168
1219 print_lock(check_tgt); 1169 print_lock(check_tgt);
1220 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1170 pr_warn("\nwhich lock already depends on the new lock.\n\n");
@@ -1244,9 +1194,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1244 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1194 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1245 return 0; 1195 return 0;
1246 1196
1247 if (cross_lock(check_tgt->instance)) 1197 if (!save_trace(&this->trace))
1248 this->trace = *trace;
1249 else if (!save_trace(&this->trace))
1250 return 0; 1198 return 0;
1251 1199
1252 depth = get_lock_depth(target); 1200 depth = get_lock_depth(target);
@@ -1850,9 +1798,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1850 if (nest) 1798 if (nest)
1851 return 2; 1799 return 2;
1852 1800
1853 if (cross_lock(prev->instance))
1854 continue;
1855
1856 return print_deadlock_bug(curr, prev, next); 1801 return print_deadlock_bug(curr, prev, next);
1857 } 1802 }
1858 return 1; 1803 return 1;
@@ -2018,31 +1963,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2018 for (;;) { 1963 for (;;) {
2019 int distance = curr->lockdep_depth - depth + 1; 1964 int distance = curr->lockdep_depth - depth + 1;
2020 hlock = curr->held_locks + depth - 1; 1965 hlock = curr->held_locks + depth - 1;
1966
2021 /* 1967 /*
2022 * Only non-crosslock entries get new dependencies added. 1968 * Only non-recursive-read entries get new dependencies
2023 * Crosslock entries will be added by commit later: 1969 * added:
2024 */ 1970 */
2025 if (!cross_lock(hlock->instance)) { 1971 if (hlock->read != 2 && hlock->check) {
1972 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1973 if (!ret)
1974 return 0;
1975
2026 /* 1976 /*
2027 * Only non-recursive-read entries get new dependencies 1977 * Stop after the first non-trylock entry,
2028 * added: 1978 * as non-trylock entries have added their
1979 * own direct dependencies already, so this
1980 * lock is connected to them indirectly:
2029 */ 1981 */
2030 if (hlock->read != 2 && hlock->check) { 1982 if (!hlock->trylock)
2031 int ret = check_prev_add(curr, hlock, next, 1983 break;
2032 distance, &trace, save_trace);
2033 if (!ret)
2034 return 0;
2035
2036 /*
2037 * Stop after the first non-trylock entry,
2038 * as non-trylock entries have added their
2039 * own direct dependencies already, so this
2040 * lock is connected to them indirectly:
2041 */
2042 if (!hlock->trylock)
2043 break;
2044 }
2045 } 1984 }
1985
2046 depth--; 1986 depth--;
2047 /* 1987 /*
2048 * End of lock-stack? 1988 * End of lock-stack?
@@ -3292,21 +3232,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3292void lockdep_init_map(struct lockdep_map *lock, const char *name, 3232void lockdep_init_map(struct lockdep_map *lock, const char *name,
3293 struct lock_class_key *key, int subclass) 3233 struct lock_class_key *key, int subclass)
3294{ 3234{
3295 cross_init(lock, 0);
3296 __lockdep_init_map(lock, name, key, subclass); 3235 __lockdep_init_map(lock, name, key, subclass);
3297} 3236}
3298EXPORT_SYMBOL_GPL(lockdep_init_map); 3237EXPORT_SYMBOL_GPL(lockdep_init_map);
3299 3238
3300#ifdef CONFIG_LOCKDEP_CROSSRELEASE
3301void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
3302 struct lock_class_key *key, int subclass)
3303{
3304 cross_init(lock, 1);
3305 __lockdep_init_map(lock, name, key, subclass);
3306}
3307EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
3308#endif
3309
3310struct lock_class_key __lockdep_no_validate__; 3239struct lock_class_key __lockdep_no_validate__;
3311EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3240EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3312 3241
@@ -3362,7 +3291,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3362 int chain_head = 0; 3291 int chain_head = 0;
3363 int class_idx; 3292 int class_idx;
3364 u64 chain_key; 3293 u64 chain_key;
3365 int ret;
3366 3294
3367 if (unlikely(!debug_locks)) 3295 if (unlikely(!debug_locks))
3368 return 0; 3296 return 0;
@@ -3411,8 +3339,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3411 3339
3412 class_idx = class - lock_classes + 1; 3340 class_idx = class - lock_classes + 1;
3413 3341
3414 /* TODO: nest_lock is not implemented for crosslock yet. */ 3342 if (depth) {
3415 if (depth && !cross_lock(lock)) {
3416 hlock = curr->held_locks + depth - 1; 3343 hlock = curr->held_locks + depth - 1;
3417 if (hlock->class_idx == class_idx && nest_lock) { 3344 if (hlock->class_idx == class_idx && nest_lock) {
3418 if (hlock->references) { 3345 if (hlock->references) {
@@ -3500,14 +3427,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3500 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3427 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3501 return 0; 3428 return 0;
3502 3429
3503 ret = lock_acquire_crosslock(hlock);
3504 /*
3505 * 2 means normal acquire operations are needed. Otherwise, it's
3506 * ok just to return with '0:fail, 1:success'.
3507 */
3508 if (ret != 2)
3509 return ret;
3510
3511 curr->curr_chain_key = chain_key; 3430 curr->curr_chain_key = chain_key;
3512 curr->lockdep_depth++; 3431 curr->lockdep_depth++;
3513 check_chain_key(curr); 3432 check_chain_key(curr);
@@ -3745,19 +3664,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3745 struct task_struct *curr = current; 3664 struct task_struct *curr = current;
3746 struct held_lock *hlock; 3665 struct held_lock *hlock;
3747 unsigned int depth; 3666 unsigned int depth;
3748 int ret, i; 3667 int i;
3749 3668
3750 if (unlikely(!debug_locks)) 3669 if (unlikely(!debug_locks))
3751 return 0; 3670 return 0;
3752 3671
3753 ret = lock_release_crosslock(lock);
3754 /*
3755 * 2 means normal release operations are needed. Otherwise, it's
3756 * ok just to return with '0:fail, 1:success'.
3757 */
3758 if (ret != 2)
3759 return ret;
3760
3761 depth = curr->lockdep_depth; 3672 depth = curr->lockdep_depth;
3762 /* 3673 /*
3763 * So we're all set to release this lock.. wait what lock? We don't 3674 * So we're all set to release this lock.. wait what lock? We don't
@@ -4580,6 +4491,7 @@ retry:
4580 if (!unlock) 4491 if (!unlock)
4581 if (read_trylock(&tasklist_lock)) 4492 if (read_trylock(&tasklist_lock))
4582 unlock = 1; 4493 unlock = 1;
4494 touch_nmi_watchdog();
4583 } while_each_thread(g, p); 4495 } while_each_thread(g, p);
4584 4496
4585 pr_warn("\n"); 4497 pr_warn("\n");
@@ -4675,494 +4587,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4675 dump_stack(); 4587 dump_stack();
4676} 4588}
4677EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4589EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4678
4679#ifdef CONFIG_LOCKDEP_CROSSRELEASE
4680
4681/*
4682 * Crossrelease works by recording a lock history for each thread and
4683 * connecting those historic locks that were taken after the
4684 * wait_for_completion() in the complete() context.
4685 *
4686 * Task-A Task-B
4687 *
4688 * mutex_lock(&A);
4689 * mutex_unlock(&A);
4690 *
4691 * wait_for_completion(&C);
4692 * lock_acquire_crosslock();
4693 * atomic_inc_return(&cross_gen_id);
4694 * |
4695 * | mutex_lock(&B);
4696 * | mutex_unlock(&B);
4697 * |
4698 * | complete(&C);
4699 * `-- lock_commit_crosslock();
4700 *
4701 * Which will then add a dependency between B and C.
4702 */
4703
4704#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
4705
4706/*
4707 * Whenever a crosslock is held, cross_gen_id will be increased.
4708 */
4709static atomic_t cross_gen_id; /* Can be wrapped */
4710
4711/*
4712 * Make an entry of the ring buffer invalid.
4713 */
4714static inline void invalidate_xhlock(struct hist_lock *xhlock)
4715{
4716 /*
4717 * Normally, xhlock->hlock.instance must be !NULL.
4718 */
4719 xhlock->hlock.instance = NULL;
4720}
4721
4722/*
4723 * Lock history stacks; we have 2 nested lock history stacks:
4724 *
4725 * HARD(IRQ)
4726 * SOFT(IRQ)
4727 *
4728 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
4729 * should not depend on any of the locks observed while running the IRQ. So
4730 * what we do is rewind the history buffer and erase all our knowledge of that
4731 * temporal event.
4732 */
4733
4734void crossrelease_hist_start(enum xhlock_context_t c)
4735{
4736 struct task_struct *cur = current;
4737
4738 if (!cur->xhlocks)
4739 return;
4740
4741 cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4742 cur->hist_id_save[c] = cur->hist_id;
4743}
4744
4745void crossrelease_hist_end(enum xhlock_context_t c)
4746{
4747 struct task_struct *cur = current;
4748
4749 if (cur->xhlocks) {
4750 unsigned int idx = cur->xhlock_idx_hist[c];
4751 struct hist_lock *h = &xhlock(idx);
4752
4753 cur->xhlock_idx = idx;
4754
4755 /* Check if the ring was overwritten. */
4756 if (h->hist_id != cur->hist_id_save[c])
4757 invalidate_xhlock(h);
4758 }
4759}
4760
4761/*
4762 * lockdep_invariant_state() is used to annotate independence inside a task, to
4763 * make one task look like multiple independent 'tasks'.
4764 *
4765 * Take for instance workqueues; each work is independent of the last. The
4766 * completion of a future work does not depend on the completion of a past work
4767 * (in general). Therefore we must not carry that (lock) dependency across
4768 * works.
4769 *
4770 * This is true for many things; pretty much all kthreads fall into this
4771 * pattern, where they have an invariant state and future completions do not
4772 * depend on past completions. Its just that since they all have the 'same'
4773 * form -- the kthread does the same over and over -- it doesn't typically
4774 * matter.
4775 *
4776 * The same is true for system-calls, once a system call is completed (we've
4777 * returned to userspace) the next system call does not depend on the lock
4778 * history of the previous system call.
4779 *
4780 * They key property for independence, this invariant state, is that it must be
4781 * a point where we hold no locks and have no history. Because if we were to
4782 * hold locks, the restore at _end() would not necessarily recover it's history
4783 * entry. Similarly, independence per-definition means it does not depend on
4784 * prior state.
4785 */
4786void lockdep_invariant_state(bool force)
4787{
4788 /*
4789 * We call this at an invariant point, no current state, no history.
4790 * Verify the former, enforce the latter.
4791 */
4792 WARN_ON_ONCE(!force && current->lockdep_depth);
4793 invalidate_xhlock(&xhlock(current->xhlock_idx));
4794}
4795
4796static int cross_lock(struct lockdep_map *lock)
4797{
4798 return lock ? lock->cross : 0;
4799}
4800
4801/*
4802 * This is needed to decide the relationship between wrapable variables.
4803 */
4804static inline int before(unsigned int a, unsigned int b)
4805{
4806 return (int)(a - b) < 0;
4807}
4808
4809static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
4810{
4811 return hlock_class(&xhlock->hlock);
4812}
4813
4814static inline struct lock_class *xlock_class(struct cross_lock *xlock)
4815{
4816 return hlock_class(&xlock->hlock);
4817}
4818
4819/*
4820 * Should we check a dependency with previous one?
4821 */
4822static inline int depend_before(struct held_lock *hlock)
4823{
4824 return hlock->read != 2 && hlock->check && !hlock->trylock;
4825}
4826
4827/*
4828 * Should we check a dependency with next one?
4829 */
4830static inline int depend_after(struct held_lock *hlock)
4831{
4832 return hlock->read != 2 && hlock->check;
4833}
4834
4835/*
4836 * Check if the xhlock is valid, which would be false if,
4837 *
4838 * 1. Has not used after initializaion yet.
4839 * 2. Got invalidated.
4840 *
4841 * Remind hist_lock is implemented as a ring buffer.
4842 */
4843static inline int xhlock_valid(struct hist_lock *xhlock)
4844{
4845 /*
4846 * xhlock->hlock.instance must be !NULL.
4847 */
4848 return !!xhlock->hlock.instance;
4849}
4850
4851/*
4852 * Record a hist_lock entry.
4853 *
4854 * Irq disable is only required.
4855 */
4856static void add_xhlock(struct held_lock *hlock)
4857{
4858 unsigned int idx = ++current->xhlock_idx;
4859 struct hist_lock *xhlock = &xhlock(idx);
4860
4861#ifdef CONFIG_DEBUG_LOCKDEP
4862 /*
4863 * This can be done locklessly because they are all task-local
4864 * state, we must however ensure IRQs are disabled.
4865 */
4866 WARN_ON_ONCE(!irqs_disabled());
4867#endif
4868
4869 /* Initialize hist_lock's members */
4870 xhlock->hlock = *hlock;
4871 xhlock->hist_id = ++current->hist_id;
4872
4873 xhlock->trace.nr_entries = 0;
4874 xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
4875 xhlock->trace.entries = xhlock->trace_entries;
4876
4877 if (crossrelease_fullstack) {
4878 xhlock->trace.skip = 3;
4879 save_stack_trace(&xhlock->trace);
4880 } else {
4881 xhlock->trace.nr_entries = 1;
4882 xhlock->trace.entries[0] = hlock->acquire_ip;
4883 }
4884}
4885
4886static inline int same_context_xhlock(struct hist_lock *xhlock)
4887{
4888 return xhlock->hlock.irq_context == task_irq_context(current);
4889}
4890
4891/*
4892 * This should be lockless as far as possible because this would be
4893 * called very frequently.
4894 */
4895static void check_add_xhlock(struct held_lock *hlock)
4896{
4897 /*
4898 * Record a hist_lock, only in case that acquisitions ahead
4899 * could depend on the held_lock. For example, if the held_lock
4900 * is trylock then acquisitions ahead never depends on that.
4901 * In that case, we don't need to record it. Just return.
4902 */
4903 if (!current->xhlocks || !depend_before(hlock))
4904 return;
4905
4906 add_xhlock(hlock);
4907}
4908
4909/*
4910 * For crosslock.
4911 */
4912static int add_xlock(struct held_lock *hlock)
4913{
4914 struct cross_lock *xlock;
4915 unsigned int gen_id;
4916
4917 if (!graph_lock())
4918 return 0;
4919
4920 xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
4921
4922 /*
4923 * When acquisitions for a crosslock are overlapped, we use
4924 * nr_acquire to perform commit for them, based on cross_gen_id
4925 * of the first acquisition, which allows to add additional
4926 * dependencies.
4927 *
4928 * Moreover, when no acquisition of a crosslock is in progress,
4929 * we should not perform commit because the lock might not exist
4930 * any more, which might cause incorrect memory access. So we
4931 * have to track the number of acquisitions of a crosslock.
4932 *
4933 * depend_after() is necessary to initialize only the first
4934 * valid xlock so that the xlock can be used on its commit.
4935 */
4936 if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
4937 goto unlock;
4938
4939 gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
4940 xlock->hlock = *hlock;
4941 xlock->hlock.gen_id = gen_id;
4942unlock:
4943 graph_unlock();
4944 return 1;
4945}
4946
4947/*
4948 * Called for both normal and crosslock acquires. Normal locks will be
4949 * pushed on the hist_lock queue. Cross locks will record state and
4950 * stop regular lock_acquire() to avoid being placed on the held_lock
4951 * stack.
4952 *
4953 * Return: 0 - failure;
4954 * 1 - crosslock, done;
4955 * 2 - normal lock, continue to held_lock[] ops.
4956 */
4957static int lock_acquire_crosslock(struct held_lock *hlock)
4958{
4959 /*
4960 * CONTEXT 1 CONTEXT 2
4961 * --------- ---------
4962 * lock A (cross)
4963 * X = atomic_inc_return(&cross_gen_id)
4964 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4965 * Y = atomic_read_acquire(&cross_gen_id)
4966 * lock B
4967 *
4968 * atomic_read_acquire() is for ordering between A and B,
4969 * IOW, A happens before B, when CONTEXT 2 see Y >= X.
4970 *
4971 * Pairs with atomic_inc_return() in add_xlock().
4972 */
4973 hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
4974
4975 if (cross_lock(hlock->instance))
4976 return add_xlock(hlock);
4977
4978 check_add_xhlock(hlock);
4979 return 2;
4980}
4981
4982static int copy_trace(struct stack_trace *trace)
4983{
4984 unsigned long *buf = stack_trace + nr_stack_trace_entries;
4985 unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
4986 unsigned int nr = min(max_nr, trace->nr_entries);
4987
4988 trace->nr_entries = nr;
4989 memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
4990 trace->entries = buf;
4991 nr_stack_trace_entries += nr;
4992
4993 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
4994 if (!debug_locks_off_graph_unlock())
4995 return 0;
4996
4997 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
4998 dump_stack();
4999
5000 return 0;
5001 }
5002
5003 return 1;
5004}
5005
5006static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
5007{
5008 unsigned int xid, pid;
5009 u64 chain_key;
5010
5011 xid = xlock_class(xlock) - lock_classes;
5012 chain_key = iterate_chain_key((u64)0, xid);
5013 pid = xhlock_class(xhlock) - lock_classes;
5014 chain_key = iterate_chain_key(chain_key, pid);
5015
5016 if (lookup_chain_cache(chain_key))
5017 return 1;
5018
5019 if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
5020 chain_key))
5021 return 0;
5022
5023 if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
5024 &xhlock->trace, copy_trace))
5025 return 0;
5026
5027 return 1;
5028}
5029
5030static void commit_xhlocks(struct cross_lock *xlock)
5031{
5032 unsigned int cur = current->xhlock_idx;
5033 unsigned int prev_hist_id = xhlock(cur).hist_id;
5034 unsigned int i;
5035
5036 if (!graph_lock())
5037 return;
5038
5039 if (xlock->nr_acquire) {
5040 for (i = 0; i < MAX_XHLOCKS_NR; i++) {
5041 struct hist_lock *xhlock = &xhlock(cur - i);
5042
5043 if (!xhlock_valid(xhlock))
5044 break;
5045
5046 if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
5047 break;
5048
5049 if (!same_context_xhlock(xhlock))
5050 break;
5051
5052 /*
5053 * Filter out the cases where the ring buffer was
5054 * overwritten and the current entry has a bigger
5055 * hist_id than the previous one, which is impossible
5056 * otherwise:
5057 */
5058 if (unlikely(before(prev_hist_id, xhlock->hist_id)))
5059 break;
5060
5061 prev_hist_id = xhlock->hist_id;
5062
5063 /*
5064 * commit_xhlock() returns 0 with graph_lock already
5065 * released if fail.
5066 */
5067 if (!commit_xhlock(xlock, xhlock))
5068 return;
5069 }
5070 }
5071
5072 graph_unlock();
5073}
5074
5075void lock_commit_crosslock(struct lockdep_map *lock)
5076{
5077 struct cross_lock *xlock;
5078 unsigned long flags;
5079
5080 if (unlikely(!debug_locks || current->lockdep_recursion))
5081 return;
5082
5083 if (!current->xhlocks)
5084 return;
5085
5086 /*
5087 * Do commit hist_locks with the cross_lock, only in case that
5088 * the cross_lock could depend on acquisitions after that.
5089 *
5090 * For example, if the cross_lock does not have the 'check' flag
5091 * then we don't need to check dependencies and commit for that.
5092 * Just skip it. In that case, of course, the cross_lock does
5093 * not depend on acquisitions ahead, either.
5094 *
5095 * WARNING: Don't do that in add_xlock() in advance. When an
5096 * acquisition context is different from the commit context,
5097 * invalid(skipped) cross_lock might be accessed.
5098 */
5099 if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
5100 return;
5101
5102 raw_local_irq_save(flags);
5103 check_flags(flags);
5104 current->lockdep_recursion = 1;
5105 xlock = &((struct lockdep_map_cross *)lock)->xlock;
5106 commit_xhlocks(xlock);
5107 current->lockdep_recursion = 0;
5108 raw_local_irq_restore(flags);
5109}
5110EXPORT_SYMBOL_GPL(lock_commit_crosslock);
5111
5112/*
5113 * Return: 0 - failure;
5114 * 1 - crosslock, done;
5115 * 2 - normal lock, continue to held_lock[] ops.
5116 */
5117static int lock_release_crosslock(struct lockdep_map *lock)
5118{
5119 if (cross_lock(lock)) {
5120 if (!graph_lock())
5121 return 0;
5122 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
5123 graph_unlock();
5124 return 1;
5125 }
5126 return 2;
5127}
5128
5129static void cross_init(struct lockdep_map *lock, int cross)
5130{
5131 if (cross)
5132 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
5133
5134 lock->cross = cross;
5135
5136 /*
5137 * Crossrelease assumes that the ring buffer size of xhlocks
5138 * is aligned with power of 2. So force it on build.
5139 */
5140 BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
5141}
5142
5143void lockdep_init_task(struct task_struct *task)
5144{
5145 int i;
5146
5147 task->xhlock_idx = UINT_MAX;
5148 task->hist_id = 0;
5149
5150 for (i = 0; i < XHLOCK_CTX_NR; i++) {
5151 task->xhlock_idx_hist[i] = UINT_MAX;
5152 task->hist_id_save[i] = 0;
5153 }
5154
5155 task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
5156 GFP_KERNEL);
5157}
5158
5159void lockdep_free_task(struct task_struct *task)
5160{
5161 if (task->xhlocks) {
5162 void *tmp = task->xhlocks;
5163 /* Diable crossrelease for current */
5164 task->xhlocks = NULL;
5165 kfree(tmp);
5166 }
5167}
5168#endif
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6f3dba6e4e9e..65cc0cb984e6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1290 return ret; 1290 return ret;
1291} 1291}
1292 1292
1293static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1294{
1295 int ret = try_to_take_rt_mutex(lock, current, NULL);
1296
1297 /*
1298 * try_to_take_rt_mutex() sets the lock waiters bit
1299 * unconditionally. Clean this up.
1300 */
1301 fixup_rt_mutex_waiters(lock);
1302
1303 return ret;
1304}
1305
1293/* 1306/*
1294 * Slow path try-lock function: 1307 * Slow path try-lock function:
1295 */ 1308 */
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1312 */ 1325 */
1313 raw_spin_lock_irqsave(&lock->wait_lock, flags); 1326 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1314 1327
1315 ret = try_to_take_rt_mutex(lock, current, NULL); 1328 ret = __rt_mutex_slowtrylock(lock);
1316
1317 /*
1318 * try_to_take_rt_mutex() sets the lock waiters bit
1319 * unconditionally. Clean this up.
1320 */
1321 fixup_rt_mutex_waiters(lock);
1322 1329
1323 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1330 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1324 1331
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1505 return rt_mutex_slowtrylock(lock); 1512 return rt_mutex_slowtrylock(lock);
1506} 1513}
1507 1514
1515int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1516{
1517 return __rt_mutex_slowtrylock(lock);
1518}
1519
1508/** 1520/**
1509 * rt_mutex_timed_lock - lock a rt_mutex interruptible 1521 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1510 * the timeout structure is provided 1522 * the timeout structure is provided
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 124e98ca0b17..68686b3ec3c1 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
148 struct rt_mutex_waiter *waiter); 148 struct rt_mutex_waiter *waiter);
149 149
150extern int rt_mutex_futex_trylock(struct rt_mutex *l); 150extern int rt_mutex_futex_trylock(struct rt_mutex *l);
151extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
151 152
152extern void rt_mutex_futex_unlock(struct rt_mutex *lock); 153extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
153extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, 154extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 1fd1a7543cdd..936f3d14dd6b 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
66 break; \ 66 break; \
67 preempt_enable(); \ 67 preempt_enable(); \
68 \ 68 \
69 if (!(lock)->break_lock) \ 69 arch_##op##_relax(&lock->raw_lock); \
70 (lock)->break_lock = 1; \
71 while ((lock)->break_lock) \
72 arch_##op##_relax(&lock->raw_lock); \
73 } \ 70 } \
74 (lock)->break_lock = 0; \
75} \ 71} \
76 \ 72 \
77unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 73unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
86 local_irq_restore(flags); \ 82 local_irq_restore(flags); \
87 preempt_enable(); \ 83 preempt_enable(); \
88 \ 84 \
89 if (!(lock)->break_lock) \ 85 arch_##op##_relax(&lock->raw_lock); \
90 (lock)->break_lock = 1; \
91 while ((lock)->break_lock) \
92 arch_##op##_relax(&lock->raw_lock); \
93 } \ 86 } \
94 (lock)->break_lock = 0; \ 87 \
95 return flags; \ 88 return flags; \
96} \ 89} \
97 \ 90 \
diff --git a/kernel/pid.c b/kernel/pid.c
index b13b624e2c49..1e8bb6550ec4 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -193,10 +193,8 @@ struct pid *alloc_pid(struct pid_namespace *ns)
193 } 193 }
194 194
195 if (unlikely(is_child_reaper(pid))) { 195 if (unlikely(is_child_reaper(pid))) {
196 if (pid_ns_prepare_proc(ns)) { 196 if (pid_ns_prepare_proc(ns))
197 disable_pid_allocation(ns);
198 goto out_free; 197 goto out_free;
199 }
200 } 198 }
201 199
202 get_pid_ns(ns); 200 get_pid_ns(ns);
@@ -226,6 +224,10 @@ out_free:
226 while (++i <= ns->level) 224 while (++i <= ns->level)
227 idr_remove(&ns->idr, (pid->numbers + i)->nr); 225 idr_remove(&ns->idr, (pid->numbers + i)->nr);
228 226
227 /* On failure to allocate the first pid, reset the state */
228 if (ns->pid_allocated == PIDNS_ADDING)
229 idr_set_cursor(&ns->idr, 0);
230
229 spin_unlock_irq(&pidmap_lock); 231 spin_unlock_irq(&pidmap_lock);
230 232
231 kmem_cache_free(ns->pid_cachep, pid); 233 kmem_cache_free(ns->pid_cachep, pid);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5d81206a572d..b9006617710f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3141,9 +3141,6 @@ void dump_stack_print_info(const char *log_lvl)
3141void show_regs_print_info(const char *log_lvl) 3141void show_regs_print_info(const char *log_lvl)
3142{ 3142{
3143 dump_stack_print_info(log_lvl); 3143 dump_stack_print_info(log_lvl);
3144
3145 printk("%stask: %p task.stack: %p\n",
3146 log_lvl, current, task_stack_page(current));
3147} 3144}
3148 3145
3149#endif 3146#endif
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 2ddaec40956f..0926aef10dad 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -34,11 +34,6 @@ void complete(struct completion *x)
34 34
35 spin_lock_irqsave(&x->wait.lock, flags); 35 spin_lock_irqsave(&x->wait.lock, flags);
36 36
37 /*
38 * Perform commit of crossrelease here.
39 */
40 complete_release_commit(x);
41
42 if (x->done != UINT_MAX) 37 if (x->done != UINT_MAX)
43 x->done++; 38 x->done++;
44 __wake_up_locked(&x->wait, TASK_NORMAL, 1); 39 __wake_up_locked(&x->wait, TASK_NORMAL, 1);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3..a7bf32aabfda 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2056,7 +2056,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2056 p->state = TASK_WAKING; 2056 p->state = TASK_WAKING;
2057 2057
2058 if (p->in_iowait) { 2058 if (p->in_iowait) {
2059 delayacct_blkio_end(); 2059 delayacct_blkio_end(p);
2060 atomic_dec(&task_rq(p)->nr_iowait); 2060 atomic_dec(&task_rq(p)->nr_iowait);
2061 } 2061 }
2062 2062
@@ -2069,7 +2069,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2069#else /* CONFIG_SMP */ 2069#else /* CONFIG_SMP */
2070 2070
2071 if (p->in_iowait) { 2071 if (p->in_iowait) {
2072 delayacct_blkio_end(); 2072 delayacct_blkio_end(p);
2073 atomic_dec(&task_rq(p)->nr_iowait); 2073 atomic_dec(&task_rq(p)->nr_iowait);
2074 } 2074 }
2075 2075
@@ -2122,7 +2122,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
2122 2122
2123 if (!task_on_rq_queued(p)) { 2123 if (!task_on_rq_queued(p)) {
2124 if (p->in_iowait) { 2124 if (p->in_iowait) {
2125 delayacct_blkio_end(); 2125 delayacct_blkio_end(p);
2126 atomic_dec(&rq->nr_iowait); 2126 atomic_dec(&rq->nr_iowait);
2127 } 2127 }
2128 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); 2128 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5097 return ret; 5097 return ret;
5098} 5098}
5099 5099
5100/**
5101 * sys_sched_rr_get_interval - return the default timeslice of a process.
5102 * @pid: pid of the process.
5103 * @interval: userspace pointer to the timeslice value.
5104 *
5105 * this syscall writes the default timeslice value of a given process
5106 * into the user-space timespec buffer. A value of '0' means infinity.
5107 *
5108 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5109 * an error code.
5110 */
5111static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5100static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
5112{ 5101{
5113 struct task_struct *p; 5102 struct task_struct *p;
@@ -5144,6 +5133,17 @@ out_unlock:
5144 return retval; 5133 return retval;
5145} 5134}
5146 5135
5136/**
5137 * sys_sched_rr_get_interval - return the default timeslice of a process.
5138 * @pid: pid of the process.
5139 * @interval: userspace pointer to the timeslice value.
5140 *
5141 * this syscall writes the default timeslice value of a given process
5142 * into the user-space timespec buffer. A value of '0' means infinity.
5143 *
5144 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5145 * an error code.
5146 */
5147SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5147SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5148 struct timespec __user *, interval) 5148 struct timespec __user *, interval)
5149{ 5149{
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2f52ec0f1539..d6717a3331a1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -244,7 +244,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
244#ifdef CONFIG_NO_HZ_COMMON 244#ifdef CONFIG_NO_HZ_COMMON
245static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) 245static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
246{ 246{
247 unsigned long idle_calls = tick_nohz_get_idle_calls(); 247 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
248 bool ret = idle_calls == sg_cpu->saved_idle_calls; 248 bool ret = idle_calls == sg_cpu->saved_idle_calls;
249 249
250 sg_cpu->saved_idle_calls = idle_calls; 250 sg_cpu->saved_idle_calls = idle_calls;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4037e19bbca2..26a71ebcd3c2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3413,9 +3413,9 @@ void set_task_rq_fair(struct sched_entity *se,
3413 * _IFF_ we look at the pure running and runnable sums. Because they 3413 * _IFF_ we look at the pure running and runnable sums. Because they
3414 * represent the very same entity, just at different points in the hierarchy. 3414 * represent the very same entity, just at different points in the hierarchy.
3415 * 3415 *
3416 * 3416 * Per the above update_tg_cfs_util() is trivial and simply copies the running
3417 * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and 3417 * sum over (but still wrong, because the group entity and group rq do not have
3418 * simply copies the running sum over. 3418 * their PELT windows aligned).
3419 * 3419 *
3420 * However, update_tg_cfs_runnable() is more complex. So we have: 3420 * However, update_tg_cfs_runnable() is more complex. So we have:
3421 * 3421 *
@@ -3424,11 +3424,11 @@ void set_task_rq_fair(struct sched_entity *se,
3424 * And since, like util, the runnable part should be directly transferable, 3424 * And since, like util, the runnable part should be directly transferable,
3425 * the following would _appear_ to be the straight forward approach: 3425 * the following would _appear_ to be the straight forward approach:
3426 * 3426 *
3427 * grq->avg.load_avg = grq->load.weight * grq->avg.running_avg (3) 3427 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3428 * 3428 *
3429 * And per (1) we have: 3429 * And per (1) we have:
3430 * 3430 *
3431 * ge->avg.running_avg == grq->avg.running_avg 3431 * ge->avg.runnable_avg == grq->avg.runnable_avg
3432 * 3432 *
3433 * Which gives: 3433 * Which gives:
3434 * 3434 *
@@ -3447,27 +3447,28 @@ void set_task_rq_fair(struct sched_entity *se,
3447 * to (shortly) return to us. This only works by keeping the weights as 3447 * to (shortly) return to us. This only works by keeping the weights as
3448 * integral part of the sum. We therefore cannot decompose as per (3). 3448 * integral part of the sum. We therefore cannot decompose as per (3).
3449 * 3449 *
3450 * OK, so what then? 3450 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3451 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3452 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3453 * runnable section of these tasks overlap (or not). If they were to perfectly
3454 * align the rq as a whole would be runnable 2/3 of the time. If however we
3455 * always have at least 1 runnable task, the rq as a whole is always runnable.
3451 * 3456 *
3457 * So we'll have to approximate.. :/
3452 * 3458 *
3453 * Another way to look at things is: 3459 * Given the constraint:
3454 * 3460 *
3455 * grq->avg.load_avg = \Sum se->avg.load_avg 3461 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3456 * 3462 *
3457 * Therefore, per (2): 3463 * We can construct a rule that adds runnable to a rq by assuming minimal
3464 * overlap.
3458 * 3465 *
3459 * grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg 3466 * On removal, we'll assume each task is equally runnable; which yields:
3460 * 3467 *
3461 * And the very thing we're propagating is a change in that sum (someone 3468 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3462 * joined/left). So we can easily know the runnable change, which would be, per
3463 * (2) the already tracked se->load_avg divided by the corresponding
3464 * se->weight.
3465 * 3469 *
3466 * Basically (4) but in differential form: 3470 * XXX: only do this for the part of runnable > running ?
3467 * 3471 *
3468 * d(runnable_avg) += se->avg.load_avg / se->load.weight
3469 * (5)
3470 * ge->avg.load_avg += ge->load.weight * d(runnable_avg)
3471 */ 3472 */
3472 3473
3473static inline void 3474static inline void
@@ -3479,6 +3480,14 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3479 if (!delta) 3480 if (!delta)
3480 return; 3481 return;
3481 3482
3483 /*
3484 * The relation between sum and avg is:
3485 *
3486 * LOAD_AVG_MAX - 1024 + sa->period_contrib
3487 *
3488 * however, the PELT windows are not aligned between grq and gse.
3489 */
3490
3482 /* Set new sched_entity's utilization */ 3491 /* Set new sched_entity's utilization */
3483 se->avg.util_avg = gcfs_rq->avg.util_avg; 3492 se->avg.util_avg = gcfs_rq->avg.util_avg;
3484 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3493 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
@@ -3491,33 +3500,68 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3491static inline void 3500static inline void
3492update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3501update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3493{ 3502{
3494 long runnable_sum = gcfs_rq->prop_runnable_sum; 3503 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3495 long runnable_load_avg, load_avg; 3504 unsigned long runnable_load_avg, load_avg;
3496 s64 runnable_load_sum, load_sum; 3505 u64 runnable_load_sum, load_sum = 0;
3506 s64 delta_sum;
3497 3507
3498 if (!runnable_sum) 3508 if (!runnable_sum)
3499 return; 3509 return;
3500 3510
3501 gcfs_rq->prop_runnable_sum = 0; 3511 gcfs_rq->prop_runnable_sum = 0;
3502 3512
3513 if (runnable_sum >= 0) {
3514 /*
3515 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3516 * the CPU is saturated running == runnable.
3517 */
3518 runnable_sum += se->avg.load_sum;
3519 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
3520 } else {
3521 /*
3522 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3523 * assuming all tasks are equally runnable.
3524 */
3525 if (scale_load_down(gcfs_rq->load.weight)) {
3526 load_sum = div_s64(gcfs_rq->avg.load_sum,
3527 scale_load_down(gcfs_rq->load.weight));
3528 }
3529
3530 /* But make sure to not inflate se's runnable */
3531 runnable_sum = min(se->avg.load_sum, load_sum);
3532 }
3533
3534 /*
3535 * runnable_sum can't be lower than running_sum
3536 * As running sum is scale with cpu capacity wehreas the runnable sum
3537 * is not we rescale running_sum 1st
3538 */
3539 running_sum = se->avg.util_sum /
3540 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
3541 runnable_sum = max(runnable_sum, running_sum);
3542
3503 load_sum = (s64)se_weight(se) * runnable_sum; 3543 load_sum = (s64)se_weight(se) * runnable_sum;
3504 load_avg = div_s64(load_sum, LOAD_AVG_MAX); 3544 load_avg = div_s64(load_sum, LOAD_AVG_MAX);
3505 3545
3506 add_positive(&se->avg.load_sum, runnable_sum); 3546 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3507 add_positive(&se->avg.load_avg, load_avg); 3547 delta_avg = load_avg - se->avg.load_avg;
3508 3548
3509 add_positive(&cfs_rq->avg.load_avg, load_avg); 3549 se->avg.load_sum = runnable_sum;
3510 add_positive(&cfs_rq->avg.load_sum, load_sum); 3550 se->avg.load_avg = load_avg;
3551 add_positive(&cfs_rq->avg.load_avg, delta_avg);
3552 add_positive(&cfs_rq->avg.load_sum, delta_sum);
3511 3553
3512 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; 3554 runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
3513 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); 3555 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
3556 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
3557 delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
3514 3558
3515 add_positive(&se->avg.runnable_load_sum, runnable_sum); 3559 se->avg.runnable_load_sum = runnable_sum;
3516 add_positive(&se->avg.runnable_load_avg, runnable_load_avg); 3560 se->avg.runnable_load_avg = runnable_load_avg;
3517 3561
3518 if (se->on_rq) { 3562 if (se->on_rq) {
3519 add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg); 3563 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
3520 add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum); 3564 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
3521 } 3565 }
3522} 3566}
3523 3567
@@ -4321,12 +4365,12 @@ static inline bool cfs_bandwidth_used(void)
4321 4365
4322void cfs_bandwidth_usage_inc(void) 4366void cfs_bandwidth_usage_inc(void)
4323{ 4367{
4324 static_key_slow_inc(&__cfs_bandwidth_used); 4368 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
4325} 4369}
4326 4370
4327void cfs_bandwidth_usage_dec(void) 4371void cfs_bandwidth_usage_dec(void)
4328{ 4372{
4329 static_key_slow_dec(&__cfs_bandwidth_used); 4373 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
4330} 4374}
4331#else /* HAVE_JUMP_LABEL */ 4375#else /* HAVE_JUMP_LABEL */
4332static bool cfs_bandwidth_used(void) 4376static bool cfs_bandwidth_used(void)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index dd7908743dab..9bcbacba82a8 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
89 rcu_read_unlock(); 89 rcu_read_unlock();
90 } 90 }
91 if (!fallback) { 91 if (!fallback) {
92 preempt_disable();
92 smp_call_function_many(tmpmask, ipi_mb, NULL, 1); 93 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
94 preempt_enable();
93 free_cpumask_var(tmpmask); 95 free_cpumask_var(tmpmask);
94 } 96 }
95 cpus_read_unlock(); 97 cpus_read_unlock();
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 4056c19ca3f0..665ace2fc558 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq)
2034 bool resched = false; 2034 bool resched = false;
2035 struct task_struct *p; 2035 struct task_struct *p;
2036 struct rq *src_rq; 2036 struct rq *src_rq;
2037 int rt_overload_count = rt_overloaded(this_rq);
2037 2038
2038 if (likely(!rt_overloaded(this_rq))) 2039 if (likely(!rt_overload_count))
2039 return; 2040 return;
2040 2041
2041 /* 2042 /*
@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq)
2044 */ 2045 */
2045 smp_rmb(); 2046 smp_rmb();
2046 2047
2048 /* If we are the only overloaded CPU do nothing */
2049 if (rt_overload_count == 1 &&
2050 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2051 return;
2052
2047#ifdef HAVE_RT_PUSH_IPI 2053#ifdef HAVE_RT_PUSH_IPI
2048 if (sched_feat(RT_PUSH_IPI)) { 2054 if (sched_feat(RT_PUSH_IPI)) {
2049 tell_cpu_to_push(this_rq); 2055 tell_cpu_to_push(this_rq);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 98feab7933c7..929ecb7d6b78 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
27 27
28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; 28 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
29 spin_lock_irqsave(&wq_head->lock, flags); 29 spin_lock_irqsave(&wq_head->lock, flags);
30 __add_wait_queue_entry_tail(wq_head, wq_entry); 30 __add_wait_queue(wq_head, wq_entry);
31 spin_unlock_irqrestore(&wq_head->lock, flags); 31 spin_unlock_irqrestore(&wq_head->lock, flags);
32} 32}
33EXPORT_SYMBOL(add_wait_queue); 33EXPORT_SYMBOL(add_wait_queue);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index e776fc8cc1df..f6b5f19223d6 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -95,6 +95,7 @@ config NO_HZ_FULL
95 select RCU_NOCB_CPU 95 select RCU_NOCB_CPU
96 select VIRT_CPU_ACCOUNTING_GEN 96 select VIRT_CPU_ACCOUNTING_GEN
97 select IRQ_WORK 97 select IRQ_WORK
98 select CPU_ISOLATION
98 help 99 help
99 Adaptively try to shutdown the tick whenever possible, even when 100 Adaptively try to shutdown the tick whenever possible, even when
100 the CPU is running tasks. Typically this requires running a single 101 the CPU is running tasks. Typically this requires running a single
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d32520840fde..aa9d2a2b1210 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -655,7 +655,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
655static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 655static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
656{ 656{
657 base->expires_next = KTIME_MAX; 657 base->expires_next = KTIME_MAX;
658 base->hang_detected = 0;
658 base->hres_active = 0; 659 base->hres_active = 0;
660 base->next_timer = NULL;
659} 661}
660 662
661/* 663/*
@@ -1589,6 +1591,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
1589 timerqueue_init_head(&cpu_base->clock_base[i].active); 1591 timerqueue_init_head(&cpu_base->clock_base[i].active);
1590 } 1592 }
1591 1593
1594 cpu_base->active_bases = 0;
1592 cpu_base->cpu = cpu; 1595 cpu_base->cpu = cpu;
1593 hrtimer_init_hres(cpu_base); 1596 hrtimer_init_hres(cpu_base);
1594 return 0; 1597 return 0;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 13d6881f908b..ec999f32c840 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
434{ 434{
435 struct task_struct *rtn = current->group_leader; 435 struct task_struct *rtn = current->group_leader;
436 436
437 if ((event->sigev_notify & SIGEV_THREAD_ID ) && 437 switch (event->sigev_notify) {
438 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || 438 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
439 !same_thread_group(rtn, current) || 439 rtn = find_task_by_vpid(event->sigev_notify_thread_id);
440 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) 440 if (!rtn || !same_thread_group(rtn, current))
441 return NULL;
442 /* FALLTHRU */
443 case SIGEV_SIGNAL:
444 case SIGEV_THREAD:
445 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
446 return NULL;
447 /* FALLTHRU */
448 case SIGEV_NONE:
449 return task_pid(rtn);
450 default:
441 return NULL; 451 return NULL;
442 452 }
443 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
444 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
445 return NULL;
446
447 return task_pid(rtn);
448} 453}
449 454
450static struct k_itimer * alloc_posix_timer(void) 455static struct k_itimer * alloc_posix_timer(void)
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
669 struct timespec64 ts64; 674 struct timespec64 ts64;
670 bool sig_none; 675 bool sig_none;
671 676
672 sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; 677 sig_none = timr->it_sigev_notify == SIGEV_NONE;
673 iv = timr->it_interval; 678 iv = timr->it_interval;
674 679
675 /* interval timer ? */ 680 /* interval timer ? */
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
856 861
857 timr->it_interval = timespec64_to_ktime(new_setting->it_interval); 862 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
858 expires = timespec64_to_ktime(new_setting->it_value); 863 expires = timespec64_to_ktime(new_setting->it_value);
859 sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; 864 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
860 865
861 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); 866 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
862 timr->it_active = !sigev_none; 867 timr->it_active = !sigev_none;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 99578f06c8d4..f7cc7abfcf25 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
650 ts->next_tick = 0; 650 ts->next_tick = 0;
651} 651}
652 652
653static inline bool local_timer_softirq_pending(void)
654{
655 return local_softirq_pending() & TIMER_SOFTIRQ;
656}
657
653static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, 658static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
654 ktime_t now, int cpu) 659 ktime_t now, int cpu)
655{ 660{
@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
666 } while (read_seqretry(&jiffies_lock, seq)); 671 } while (read_seqretry(&jiffies_lock, seq));
667 ts->last_jiffies = basejiff; 672 ts->last_jiffies = basejiff;
668 673
669 if (rcu_needs_cpu(basemono, &next_rcu) || 674 /*
670 arch_needs_cpu() || irq_work_needs_cpu()) { 675 * Keep the periodic tick, when RCU, architecture or irq_work
676 * requests it.
677 * Aside of that check whether the local timer softirq is
678 * pending. If so its a bad idea to call get_next_timer_interrupt()
679 * because there is an already expired timer, so it will request
680 * immeditate expiry, which rearms the hardware timer with a
681 * minimal delta which brings us back to this place
682 * immediately. Lather, rinse and repeat...
683 */
684 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
685 irq_work_needs_cpu() || local_timer_softirq_pending()) {
671 next_tick = basemono + TICK_NSEC; 686 next_tick = basemono + TICK_NSEC;
672 } else { 687 } else {
673 /* 688 /*
@@ -986,6 +1001,19 @@ ktime_t tick_nohz_get_sleep_length(void)
986} 1001}
987 1002
988/** 1003/**
1004 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1005 * for a particular CPU.
1006 *
1007 * Called from the schedutil frequency scaling governor in scheduler context.
1008 */
1009unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1010{
1011 struct tick_sched *ts = tick_get_tick_sched(cpu);
1012
1013 return ts->idle_calls;
1014}
1015
1016/**
989 * tick_nohz_get_idle_calls - return the current idle calls counter value 1017 * tick_nohz_get_idle_calls - return the current idle calls counter value
990 * 1018 *
991 * Called from the schedutil frequency scaling governor in scheduler context. 1019 * Called from the schedutil frequency scaling governor in scheduler context.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ffebcf878fba..0bcf00e3ce48 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
823 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 823 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
824 824
825 /* 825 /*
826 * If the timer is deferrable and nohz is active then we need to use 826 * If the timer is deferrable and NO_HZ_COMMON is set then we need
827 * the deferrable base. 827 * to use the deferrable base.
828 */ 828 */
829 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 829 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
830 (tflags & TIMER_DEFERRABLE))
831 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 830 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
832 return base; 831 return base;
833} 832}
@@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
837 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 836 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
838 837
839 /* 838 /*
840 * If the timer is deferrable and nohz is active then we need to use 839 * If the timer is deferrable and NO_HZ_COMMON is set then we need
841 * the deferrable base. 840 * to use the deferrable base.
842 */ 841 */
843 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 842 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
844 (tflags & TIMER_DEFERRABLE))
845 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 843 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
846 return base; 844 return base;
847} 845}
@@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
1009 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1007 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1010 goto out_unlock; 1008 goto out_unlock;
1011 1009
1012 debug_activate(timer, expires);
1013
1014 new_base = get_target_base(base, timer->flags); 1010 new_base = get_target_base(base, timer->flags);
1015 1011
1016 if (base != new_base) { 1012 if (base != new_base) {
@@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
1034 } 1030 }
1035 } 1031 }
1036 1032
1033 debug_activate(timer, expires);
1034
1037 timer->expires = expires; 1035 timer->expires = expires;
1038 /* 1036 /*
1039 * If 'idx' was calculated above and the base time did not advance 1037 * If 'idx' was calculated above and the base time did not advance
@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1684 base->must_forward_clk = false; 1682 base->must_forward_clk = false;
1685 1683
1686 __run_timers(base); 1684 __run_timers(base);
1687 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) 1685 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1688 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1686 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1689} 1687}
1690 1688
@@ -1698,7 +1696,7 @@ void run_local_timers(void)
1698 hrtimer_run_queues(); 1696 hrtimer_run_queues();
1699 /* Raise the softirq only if required. */ 1697 /* Raise the softirq only if required. */
1700 if (time_before(jiffies, base->clk)) { 1698 if (time_before(jiffies, base->clk)) {
1701 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) 1699 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1702 return; 1700 return;
1703 /* CPU is awake, so check the deferrable base. */ 1701 /* CPU is awake, so check the deferrable base. */
1704 base++; 1702 base++;
@@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
1855 } 1853 }
1856} 1854}
1857 1855
1856int timers_prepare_cpu(unsigned int cpu)
1857{
1858 struct timer_base *base;
1859 int b;
1860
1861 for (b = 0; b < NR_BASES; b++) {
1862 base = per_cpu_ptr(&timer_bases[b], cpu);
1863 base->clk = jiffies;
1864 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1865 base->is_idle = false;
1866 base->must_forward_clk = true;
1867 }
1868 return 0;
1869}
1870
1858int timers_dead_cpu(unsigned int cpu) 1871int timers_dead_cpu(unsigned int cpu)
1859{ 1872{
1860 struct timer_base *old_base; 1873 struct timer_base *old_base;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index af7dad126c13..f54dc62b599c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
164 bool "Enable trace events for preempt and irq disable/enable" 164 bool "Enable trace events for preempt and irq disable/enable"
165 select TRACE_IRQFLAGS 165 select TRACE_IRQFLAGS
166 depends on DEBUG_PREEMPT || !PROVE_LOCKING 166 depends on DEBUG_PREEMPT || !PROVE_LOCKING
167 depends on TRACING
167 default n 168 default n
168 help 169 help
169 Enable tracing of disable and enable events for preemption and irqs. 170 Enable tracing of disable and enable events for preemption and irqs.
@@ -354,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES
354 on if you need to profile the system's use of these macros. 355 on if you need to profile the system's use of these macros.
355 356
356config PROFILE_ALL_BRANCHES 357config PROFILE_ALL_BRANCHES
357 bool "Profile all if conditionals" 358 bool "Profile all if conditionals" if !FORTIFY_SOURCE
358 select TRACE_BRANCH_PROFILING 359 select TRACE_BRANCH_PROFILING
359 help 360 help
360 This tracer profiles all branch conditions. Every if () 361 This tracer profiles all branch conditions. Every if ()
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 27d1f4ffa3de..40207c2a4113 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
343 .arg4_type = ARG_CONST_SIZE, 343 .arg4_type = ARG_CONST_SIZE,
344}; 344};
345 345
346static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); 346static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
347 347
348static __always_inline u64 348static __always_inline u64
349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
350 u64 flags, struct perf_raw_record *raw) 350 u64 flags, struct perf_sample_data *sd)
351{ 351{
352 struct bpf_array *array = container_of(map, struct bpf_array, map); 352 struct bpf_array *array = container_of(map, struct bpf_array, map);
353 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
354 unsigned int cpu = smp_processor_id(); 353 unsigned int cpu = smp_processor_id();
355 u64 index = flags & BPF_F_INDEX_MASK; 354 u64 index = flags & BPF_F_INDEX_MASK;
356 struct bpf_event_entry *ee; 355 struct bpf_event_entry *ee;
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
373 if (unlikely(event->oncpu != cpu)) 372 if (unlikely(event->oncpu != cpu))
374 return -EOPNOTSUPP; 373 return -EOPNOTSUPP;
375 374
376 perf_sample_data_init(sd, 0, 0);
377 sd->raw = raw;
378 perf_event_output(event, sd, regs); 375 perf_event_output(event, sd, regs);
379 return 0; 376 return 0;
380} 377}
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
382BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 379BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
383 u64, flags, void *, data, u64, size) 380 u64, flags, void *, data, u64, size)
384{ 381{
382 struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
385 struct perf_raw_record raw = { 383 struct perf_raw_record raw = {
386 .frag = { 384 .frag = {
387 .size = size, 385 .size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
392 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 390 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
393 return -EINVAL; 391 return -EINVAL;
394 392
395 return __bpf_perf_event_output(regs, map, flags, &raw); 393 perf_sample_data_init(sd, 0, 0);
394 sd->raw = &raw;
395
396 return __bpf_perf_event_output(regs, map, flags, sd);
396} 397}
397 398
398static const struct bpf_func_proto bpf_perf_event_output_proto = { 399static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
407}; 408};
408 409
409static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 410static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
411static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
410 412
411u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 413u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
412 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 414 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
413{ 415{
416 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
414 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 417 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
415 struct perf_raw_frag frag = { 418 struct perf_raw_frag frag = {
416 .copy = ctx_copy, 419 .copy = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
428 }; 431 };
429 432
430 perf_fetch_caller_regs(regs); 433 perf_fetch_caller_regs(regs);
434 perf_sample_data_init(sd, 0, 0);
435 sd->raw = &raw;
431 436
432 return __bpf_perf_event_output(regs, map, flags, &raw); 437 return __bpf_perf_event_output(regs, map, flags, sd);
433} 438}
434 439
435BPF_CALL_0(bpf_get_current_task) 440BPF_CALL_0(bpf_get_current_task)
@@ -759,6 +764,8 @@ const struct bpf_prog_ops perf_event_prog_ops = {
759 764
760static DEFINE_MUTEX(bpf_event_mutex); 765static DEFINE_MUTEX(bpf_event_mutex);
761 766
767#define BPF_TRACE_MAX_PROGS 64
768
762int perf_event_attach_bpf_prog(struct perf_event *event, 769int perf_event_attach_bpf_prog(struct perf_event *event,
763 struct bpf_prog *prog) 770 struct bpf_prog *prog)
764{ 771{
@@ -772,6 +779,12 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
772 goto unlock; 779 goto unlock;
773 780
774 old_array = event->tp_event->prog_array; 781 old_array = event->tp_event->prog_array;
782 if (old_array &&
783 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
784 ret = -E2BIG;
785 goto unlock;
786 }
787
775 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); 788 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
776 if (ret < 0) 789 if (ret < 0)
777 goto unlock; 790 goto unlock;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ccdf3664e4a9..554b517c61a0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
1119}; 1119};
1120 1120
1121/* 1121/*
1122 * This is used by __kernel_text_address() to return true if the 1122 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1123 * address is on a dynamically allocated trampoline that would
1124 * not return true for either core_kernel_text() or
1125 * is_module_text_address().
1126 */ 1123 */
1127bool is_ftrace_trampoline(unsigned long addr) 1124struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1128{ 1125{
1129 struct ftrace_ops *op; 1126 struct ftrace_ops *op = NULL;
1130 bool ret = false;
1131 1127
1132 /* 1128 /*
1133 * Some of the ops may be dynamically allocated, 1129 * Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
1144 if (op->trampoline && op->trampoline_size) 1140 if (op->trampoline && op->trampoline_size)
1145 if (addr >= op->trampoline && 1141 if (addr >= op->trampoline &&
1146 addr < op->trampoline + op->trampoline_size) { 1142 addr < op->trampoline + op->trampoline_size) {
1147 ret = true; 1143 preempt_enable_notrace();
1148 goto out; 1144 return op;
1149 } 1145 }
1150 } while_for_each_ftrace_op(op); 1146 } while_for_each_ftrace_op(op);
1151
1152 out:
1153 preempt_enable_notrace(); 1147 preempt_enable_notrace();
1154 1148
1155 return ret; 1149 return NULL;
1150}
1151
1152/*
1153 * This is used by __kernel_text_address() to return true if the
1154 * address is on a dynamically allocated trampoline that would
1155 * not return true for either core_kernel_text() or
1156 * is_module_text_address().
1157 */
1158bool is_ftrace_trampoline(unsigned long addr)
1159{
1160 return ftrace_ops_trampoline(addr) != NULL;
1156} 1161}
1157 1162
1158struct ftrace_page { 1163struct ftrace_page {
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 91874a95060d..5af2842dea96 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
280/* Missed count stored at end */ 280/* Missed count stored at end */
281#define RB_MISSED_STORED (1 << 30) 281#define RB_MISSED_STORED (1 << 30)
282 282
283#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
284
283struct buffer_data_page { 285struct buffer_data_page {
284 u64 time_stamp; /* page time stamp */ 286 u64 time_stamp; /* page time stamp */
285 local_t commit; /* write committed index */ 287 local_t commit; /* write committed index */
@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
331 */ 333 */
332size_t ring_buffer_page_len(void *page) 334size_t ring_buffer_page_len(void *page)
333{ 335{
334 return local_read(&((struct buffer_data_page *)page)->commit) 336 struct buffer_data_page *bpage = page;
337
338 return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
335 + BUF_PAGE_HDR_SIZE; 339 + BUF_PAGE_HDR_SIZE;
336} 340}
337 341
@@ -1799,12 +1803,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1799} 1803}
1800EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1804EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1801 1805
1802static __always_inline void *
1803__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1804{
1805 return bpage->data + index;
1806}
1807
1808static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 1806static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1809{ 1807{
1810 return bpage->page->data + index; 1808 return bpage->page->data + index;
@@ -2536,29 +2534,58 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2536 * The lock and unlock are done within a preempt disable section. 2534 * The lock and unlock are done within a preempt disable section.
2537 * The current_context per_cpu variable can only be modified 2535 * The current_context per_cpu variable can only be modified
2538 * by the current task between lock and unlock. But it can 2536 * by the current task between lock and unlock. But it can
2539 * be modified more than once via an interrupt. There are four 2537 * be modified more than once via an interrupt. To pass this
2540 * different contexts that we need to consider. 2538 * information from the lock to the unlock without having to
2539 * access the 'in_interrupt()' functions again (which do show
2540 * a bit of overhead in something as critical as function tracing,
2541 * we use a bitmask trick.
2542 *
2543 * bit 0 = NMI context
2544 * bit 1 = IRQ context
2545 * bit 2 = SoftIRQ context
2546 * bit 3 = normal context.
2547 *
2548 * This works because this is the order of contexts that can
2549 * preempt other contexts. A SoftIRQ never preempts an IRQ
2550 * context.
2551 *
2552 * When the context is determined, the corresponding bit is
2553 * checked and set (if it was set, then a recursion of that context
2554 * happened).
2555 *
2556 * On unlock, we need to clear this bit. To do so, just subtract
2557 * 1 from the current_context and AND it to itself.
2541 * 2558 *
2542 * Normal context. 2559 * (binary)
2543 * SoftIRQ context 2560 * 101 - 1 = 100
2544 * IRQ context 2561 * 101 & 100 = 100 (clearing bit zero)
2545 * NMI context
2546 * 2562 *
2547 * If for some reason the ring buffer starts to recurse, we 2563 * 1010 - 1 = 1001
2548 * only allow that to happen at most 4 times (one for each 2564 * 1010 & 1001 = 1000 (clearing bit 1)
2549 * context). If it happens 5 times, then we consider this a 2565 *
2550 * recusive loop and do not let it go further. 2566 * The least significant bit can be cleared this way, and it
2567 * just so happens that it is the same bit corresponding to
2568 * the current context.
2551 */ 2569 */
2552 2570
2553static __always_inline int 2571static __always_inline int
2554trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 2572trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2555{ 2573{
2556 if (cpu_buffer->current_context >= 4) 2574 unsigned int val = cpu_buffer->current_context;
2575 unsigned long pc = preempt_count();
2576 int bit;
2577
2578 if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2579 bit = RB_CTX_NORMAL;
2580 else
2581 bit = pc & NMI_MASK ? RB_CTX_NMI :
2582 pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2583
2584 if (unlikely(val & (1 << bit)))
2557 return 1; 2585 return 1;
2558 2586
2559 cpu_buffer->current_context++; 2587 val |= (1 << bit);
2560 /* Interrupts must see this update */ 2588 cpu_buffer->current_context = val;
2561 barrier();
2562 2589
2563 return 0; 2590 return 0;
2564} 2591}
@@ -2566,9 +2593,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2566static __always_inline void 2593static __always_inline void
2567trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 2594trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2568{ 2595{
2569 /* Don't let the dec leak out */ 2596 cpu_buffer->current_context &= cpu_buffer->current_context - 1;
2570 barrier();
2571 cpu_buffer->current_context--;
2572} 2597}
2573 2598
2574/** 2599/**
@@ -4406,8 +4431,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4406{ 4431{
4407 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 4432 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4408 struct buffer_data_page *bpage = data; 4433 struct buffer_data_page *bpage = data;
4434 struct page *page = virt_to_page(bpage);
4409 unsigned long flags; 4435 unsigned long flags;
4410 4436
4437 /* If the page is still in use someplace else, we can't reuse it */
4438 if (page_ref_count(page) > 1)
4439 goto out;
4440
4411 local_irq_save(flags); 4441 local_irq_save(flags);
4412 arch_spin_lock(&cpu_buffer->lock); 4442 arch_spin_lock(&cpu_buffer->lock);
4413 4443
@@ -4419,6 +4449,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4419 arch_spin_unlock(&cpu_buffer->lock); 4449 arch_spin_unlock(&cpu_buffer->lock);
4420 local_irq_restore(flags); 4450 local_irq_restore(flags);
4421 4451
4452 out:
4422 free_page((unsigned long)bpage); 4453 free_page((unsigned long)bpage);
4423} 4454}
4424EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 4455EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 73e67b68c53b..8e3f20a18a06 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
362} 362}
363 363
364/** 364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list 365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
366 * @pid_list: The list to modify 366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit 367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove 368 * @task: The task to add or remove
@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
925} 925}
926 926
927/** 927/**
928 * trace_snapshot - take a snapshot of the current buffer. 928 * tracing_snapshot - take a snapshot of the current buffer.
929 * 929 *
930 * This causes a swap between the snapshot buffer and the current live 930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live 931 * tracing buffer. You can use this to take snapshots of the live
@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005 1005
1006/** 1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 * 1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the 1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only 1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep. 1011 * where it is safe to sleep, as the allocation may sleep.
1012 * 1012 *
@@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
1303/* 1303/*
1304 * Copy the new maximum trace into the separate maximum-trace 1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved, 1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 1306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1307 */ 1307 */
1308static void 1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2374} 2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376 2376
2377/*
2378 * Skip 3:
2379 *
2380 * trace_buffer_unlock_commit_regs()
2381 * trace_event_buffer_commit()
2382 * trace_event_raw_event_xxx()
2383*/
2384# define STACK_SKIP 3
2385
2377void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2386void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer, 2387 struct ring_buffer *buffer,
2379 struct ring_buffer_event *event, 2388 struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2383 __buffer_unlock_commit(buffer, event); 2392 __buffer_unlock_commit(buffer, event);
2384 2393
2385 /* 2394 /*
2386 * If regs is not set, then skip the following callers: 2395 * If regs is not set, then skip the necessary functions.
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer 2396 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or 2397 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful. 2398 * two. They are not that meaningful.
2394 */ 2399 */
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); 2400 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2396 ftrace_trace_userstack(buffer, flags, pc); 2401 ftrace_trace_userstack(buffer, flags, pc);
2397} 2402}
2398 2403
@@ -2415,7 +2420,7 @@ trace_process_export(struct trace_export *export,
2415 2420
2416 entry = ring_buffer_event_data(event); 2421 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event); 2422 size = ring_buffer_event_length(event);
2418 export->write(entry, size); 2423 export->write(export, entry, size);
2419} 2424}
2420 2425
2421static DEFINE_MUTEX(ftrace_export_lock); 2426static DEFINE_MUTEX(ftrace_export_lock);
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
2579 trace.skip = skip; 2584 trace.skip = skip;
2580 2585
2581 /* 2586 /*
2582 * Add two, for this function and the call to save_stack_trace() 2587 * Add one, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way. 2588 * If regs is set, then these functions will not be in the way.
2584 */ 2589 */
2590#ifndef CONFIG_UNWINDER_ORC
2585 if (!regs) 2591 if (!regs)
2586 trace.skip += 2; 2592 trace.skip++;
2593#endif
2587 2594
2588 /* 2595 /*
2589 * Since events can happen in NMIs there's no safe way to 2596 * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
2711 2718
2712 local_save_flags(flags); 2719 local_save_flags(flags);
2713 2720
2714 /* 2721#ifndef CONFIG_UNWINDER_ORC
2715 * Skip 3 more, seems to get us at the caller of 2722 /* Skip 1 to skip this function. */
2716 * this function. 2723 skip++;
2717 */ 2724#endif
2718 skip += 3;
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 2725 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL); 2726 flags, skip, preempt_count(), NULL);
2721} 2727}
@@ -4178,37 +4184,30 @@ static const struct file_operations show_traces_fops = {
4178 .llseek = seq_lseek, 4184 .llseek = seq_lseek,
4179}; 4185};
4180 4186
4181/*
4182 * The tracer itself will not take this lock, but still we want
4183 * to provide a consistent cpumask to user-space:
4184 */
4185static DEFINE_MUTEX(tracing_cpumask_update_lock);
4186
4187/*
4188 * Temporary storage for the character representation of the
4189 * CPU bitmask (and one more byte for the newline):
4190 */
4191static char mask_str[NR_CPUS + 1];
4192
4193static ssize_t 4187static ssize_t
4194tracing_cpumask_read(struct file *filp, char __user *ubuf, 4188tracing_cpumask_read(struct file *filp, char __user *ubuf,
4195 size_t count, loff_t *ppos) 4189 size_t count, loff_t *ppos)
4196{ 4190{
4197 struct trace_array *tr = file_inode(filp)->i_private; 4191 struct trace_array *tr = file_inode(filp)->i_private;
4192 char *mask_str;
4198 int len; 4193 int len;
4199 4194
4200 mutex_lock(&tracing_cpumask_update_lock); 4195 len = snprintf(NULL, 0, "%*pb\n",
4196 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4197 mask_str = kmalloc(len, GFP_KERNEL);
4198 if (!mask_str)
4199 return -ENOMEM;
4201 4200
4202 len = snprintf(mask_str, count, "%*pb\n", 4201 len = snprintf(mask_str, len, "%*pb\n",
4203 cpumask_pr_args(tr->tracing_cpumask)); 4202 cpumask_pr_args(tr->tracing_cpumask));
4204 if (len >= count) { 4203 if (len >= count) {
4205 count = -EINVAL; 4204 count = -EINVAL;
4206 goto out_err; 4205 goto out_err;
4207 } 4206 }
4208 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 4207 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4209 4208
4210out_err: 4209out_err:
4211 mutex_unlock(&tracing_cpumask_update_lock); 4210 kfree(mask_str);
4212 4211
4213 return count; 4212 return count;
4214} 4213}
@@ -4228,8 +4227,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4228 if (err) 4227 if (err)
4229 goto err_unlock; 4228 goto err_unlock;
4230 4229
4231 mutex_lock(&tracing_cpumask_update_lock);
4232
4233 local_irq_disable(); 4230 local_irq_disable();
4234 arch_spin_lock(&tr->max_lock); 4231 arch_spin_lock(&tr->max_lock);
4235 for_each_tracing_cpu(cpu) { 4232 for_each_tracing_cpu(cpu) {
@@ -4252,8 +4249,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4252 local_irq_enable(); 4249 local_irq_enable();
4253 4250
4254 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 4251 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4255
4256 mutex_unlock(&tracing_cpumask_update_lock);
4257 free_cpumask_var(tracing_cpumask_new); 4252 free_cpumask_var(tracing_cpumask_new);
4258 4253
4259 return count; 4254 return count;
@@ -6780,7 +6775,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6780 .spd_release = buffer_spd_release, 6775 .spd_release = buffer_spd_release,
6781 }; 6776 };
6782 struct buffer_ref *ref; 6777 struct buffer_ref *ref;
6783 int entries, size, i; 6778 int entries, i;
6784 ssize_t ret = 0; 6779 ssize_t ret = 0;
6785 6780
6786#ifdef CONFIG_TRACER_MAX_TRACE 6781#ifdef CONFIG_TRACER_MAX_TRACE
@@ -6834,14 +6829,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6834 break; 6829 break;
6835 } 6830 }
6836 6831
6837 /*
6838 * zero out any left over data, this is going to
6839 * user land.
6840 */
6841 size = ring_buffer_page_len(ref->page);
6842 if (size < PAGE_SIZE)
6843 memset(ref->page + size, 0, PAGE_SIZE - size);
6844
6845 page = virt_to_page(ref->page); 6832 page = virt_to_page(ref->page);
6846 6833
6847 spd.pages[i] = page; 6834 spd.pages[i] = page;
@@ -7599,6 +7586,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
7599 buf->data = alloc_percpu(struct trace_array_cpu); 7586 buf->data = alloc_percpu(struct trace_array_cpu);
7600 if (!buf->data) { 7587 if (!buf->data) {
7601 ring_buffer_free(buf->buffer); 7588 ring_buffer_free(buf->buffer);
7589 buf->buffer = NULL;
7602 return -ENOMEM; 7590 return -ENOMEM;
7603 } 7591 }
7604 7592
@@ -7622,7 +7610,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
7622 allocate_snapshot ? size : 1); 7610 allocate_snapshot ? size : 1);
7623 if (WARN_ON(ret)) { 7611 if (WARN_ON(ret)) {
7624 ring_buffer_free(tr->trace_buffer.buffer); 7612 ring_buffer_free(tr->trace_buffer.buffer);
7613 tr->trace_buffer.buffer = NULL;
7625 free_percpu(tr->trace_buffer.data); 7614 free_percpu(tr->trace_buffer.data);
7615 tr->trace_buffer.data = NULL;
7626 return -ENOMEM; 7616 return -ENOMEM;
7627 } 7617 }
7628 tr->allocated_snapshot = allocate_snapshot; 7618 tr->allocated_snapshot = allocate_snapshot;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ec0f9aa4e151..1b87157edbff 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
2213{ 2213{
2214 struct trace_event_call *call, *p; 2214 struct trace_event_call *call, *p;
2215 const char *last_system = NULL; 2215 const char *last_system = NULL;
2216 bool first = false;
2216 int last_i; 2217 int last_i;
2217 int i; 2218 int i;
2218 2219
@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
2220 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2221 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2221 /* events are usually grouped together with systems */ 2222 /* events are usually grouped together with systems */
2222 if (!last_system || call->class->system != last_system) { 2223 if (!last_system || call->class->system != last_system) {
2224 first = true;
2223 last_i = 0; 2225 last_i = 0;
2224 last_system = call->class->system; 2226 last_system = call->class->system;
2225 } 2227 }
2226 2228
2229 /*
2230 * Since calls are grouped by systems, the likelyhood that the
2231 * next call in the iteration belongs to the same system as the
2232 * previous call is high. As an optimization, we skip seaching
2233 * for a map[] that matches the call's system if the last call
2234 * was from the same system. That's what last_i is for. If the
2235 * call has the same system as the previous call, then last_i
2236 * will be the index of the first map[] that has a matching
2237 * system.
2238 */
2227 for (i = last_i; i < len; i++) { 2239 for (i = last_i; i < len; i++) {
2228 if (call->class->system == map[i]->system) { 2240 if (call->class->system == map[i]->system) {
2229 /* Save the first system if need be */ 2241 /* Save the first system if need be */
2230 if (!last_i) 2242 if (first) {
2231 last_i = i; 2243 last_i = i;
2244 first = false;
2245 }
2232 update_event_printk(call, map[i]); 2246 update_event_printk(call, map[i]);
2233 } 2247 }
2234 } 2248 }
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index f2ac9d44f6c4..87411482a46f 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
1123#endif /* CONFIG_TRACER_SNAPSHOT */ 1123#endif /* CONFIG_TRACER_SNAPSHOT */
1124 1124
1125#ifdef CONFIG_STACKTRACE 1125#ifdef CONFIG_STACKTRACE
1126#ifdef CONFIG_UNWINDER_ORC
1127/* Skip 2:
1128 * event_triggers_post_call()
1129 * trace_event_raw_event_xxx()
1130 */
1131# define STACK_SKIP 2
1132#else
1126/* 1133/*
1127 * Skip 3: 1134 * Skip 4:
1128 * stacktrace_trigger() 1135 * stacktrace_trigger()
1129 * event_triggers_post_call() 1136 * event_triggers_post_call()
1137 * trace_event_buffer_commit()
1130 * trace_event_raw_event_xxx() 1138 * trace_event_raw_event_xxx()
1131 */ 1139 */
1132#define STACK_SKIP 3 1140#define STACK_SKIP 4
1141#endif
1133 1142
1134static void 1143static void
1135stacktrace_trigger(struct event_trigger_data *data, void *rec) 1144stacktrace_trigger(struct event_trigger_data *data, void *rec)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 27f7ad12c4b1..b611cd36e22d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
154 preempt_enable_notrace(); 154 preempt_enable_notrace();
155} 155}
156 156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 * function_stack_trace_call()
162 * ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 * __trace_stack()
169 * function_stack_trace_call()
170 * ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
157static void 175static void
158function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
159 struct ftrace_ops *op, struct pt_regs *pt_regs) 177 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
180 if (likely(disabled == 1)) { 198 if (likely(disabled == 1)) {
181 pc = preempt_count(); 199 pc = preempt_count();
182 trace_function(tr, ip, parent_ip, flags, pc); 200 trace_function(tr, ip, parent_ip, flags, pc);
183 /* 201 __trace_stack(tr, flags, STACK_SKIP, pc);
184 * skip over 5 funcs:
185 * __ftrace_trace_stack,
186 * __trace_stack,
187 * function_stack_trace_call
188 * ftrace_list_func
189 * ftrace_call
190 */
191 __trace_stack(tr, flags, 5, pc);
192 } 202 }
193 203
194 atomic_dec(&data->disabled); 204 atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
367 tracer_tracing_off(tr); 377 tracer_tracing_off(tr);
368} 378}
369 379
380#ifdef CONFIG_UNWINDER_ORC
370/* 381/*
371 * Skip 4: 382 * Skip 3:
383 *
384 * function_trace_probe_call()
385 * ftrace_ops_assist_func()
386 * ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 * __trace_stack()
372 * ftrace_stacktrace() 394 * ftrace_stacktrace()
373 * function_trace_probe_call() 395 * function_trace_probe_call()
374 * ftrace_ops_list_func() 396 * ftrace_ops_assist_func()
375 * ftrace_call() 397 * ftrace_call()
376 */ 398 */
377#define STACK_SKIP 4 399#define FTRACE_STACK_SKIP 5
400#endif
378 401
379static __always_inline void trace_stack(struct trace_array *tr) 402static __always_inline void trace_stack(struct trace_array *tr)
380{ 403{
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
384 local_save_flags(flags); 407 local_save_flags(flags);
385 pc = preempt_count(); 408 pc = preempt_count();
386 409
387 __trace_stack(tr, flags, STACK_SKIP, pc); 410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
388} 411}
389 412
390static void 413static void
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 734accc02418..3c7bfc4bf5e9 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
209 if (__this_cpu_read(disable_stack_tracer) != 1) 209 if (__this_cpu_read(disable_stack_tracer) != 1)
210 goto out; 210 goto out;
211 211
212 /* If rcu is not watching, then save stack trace can fail */
213 if (!rcu_is_watching())
214 goto out;
215
212 ip += MCOUNT_INSN_SIZE; 216 ip += MCOUNT_INSN_SIZE;
213 217
214 check_stack(ip, &stack); 218 check_stack(ip, &stack);
diff --git a/kernel/uid16.c b/kernel/uid16.c
index ce74a4901d2b..ef1da2a5f9bd 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
192 return retval; 192 return retval;
193 } 193 }
194 194
195 groups_sort(group_info);
195 retval = set_current_groups(group_info); 196 retval = set_current_groups(group_info);
196 put_group_info(group_info); 197 put_group_info(group_info);
197 198
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8fdb710bfdd7..f699122dab32 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -38,7 +38,6 @@
38#include <linux/hardirq.h> 38#include <linux/hardirq.h>
39#include <linux/mempolicy.h> 39#include <linux/mempolicy.h>
40#include <linux/freezer.h> 40#include <linux/freezer.h>
41#include <linux/kallsyms.h>
42#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
43#include <linux/lockdep.h> 42#include <linux/lockdep.h>
44#include <linux/idr.h> 43#include <linux/idr.h>
@@ -48,6 +47,8 @@
48#include <linux/nodemask.h> 47#include <linux/nodemask.h>
49#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
50#include <linux/uaccess.h> 49#include <linux/uaccess.h>
50#include <linux/sched/isolation.h>
51#include <linux/nmi.h>
51 52
52#include "workqueue_internal.h" 53#include "workqueue_internal.h"
53 54
@@ -1634,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker)
1634 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1635 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1635 1636
1636 /* 1637 /*
1637 * Sanity check nr_running. Because wq_unbind_fn() releases 1638 * Sanity check nr_running. Because unbind_workers() releases
1638 * pool->lock between setting %WORKER_UNBOUND and zapping 1639 * pool->lock between setting %WORKER_UNBOUND and zapping
1639 * nr_running, the warning may trigger spuriously. Check iff 1640 * nr_running, the warning may trigger spuriously. Check iff
1640 * unbind is not in progress. 1641 * unbind is not in progress.
@@ -4463,6 +4464,12 @@ void show_workqueue_state(void)
4463 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) 4464 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4464 show_pwq(pwq); 4465 show_pwq(pwq);
4465 spin_unlock_irqrestore(&pwq->pool->lock, flags); 4466 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4467 /*
4468 * We could be printing a lot from atomic context, e.g.
4469 * sysrq-t -> show_workqueue_state(). Avoid triggering
4470 * hard lockup.
4471 */
4472 touch_nmi_watchdog();
4466 } 4473 }
4467 } 4474 }
4468 4475
@@ -4490,6 +4497,12 @@ void show_workqueue_state(void)
4490 pr_cont("\n"); 4497 pr_cont("\n");
4491 next_pool: 4498 next_pool:
4492 spin_unlock_irqrestore(&pool->lock, flags); 4499 spin_unlock_irqrestore(&pool->lock, flags);
4500 /*
4501 * We could be printing a lot from atomic context, e.g.
4502 * sysrq-t -> show_workqueue_state(). Avoid triggering
4503 * hard lockup.
4504 */
4505 touch_nmi_watchdog();
4493 } 4506 }
4494 4507
4495 rcu_read_unlock_sched(); 4508 rcu_read_unlock_sched();
@@ -4510,9 +4523,8 @@ void show_workqueue_state(void)
4510 * cpu comes back online. 4523 * cpu comes back online.
4511 */ 4524 */
4512 4525
4513static void wq_unbind_fn(struct work_struct *work) 4526static void unbind_workers(int cpu)
4514{ 4527{
4515 int cpu = smp_processor_id();
4516 struct worker_pool *pool; 4528 struct worker_pool *pool;
4517 struct worker *worker; 4529 struct worker *worker;
4518 4530
@@ -4589,16 +4601,6 @@ static void rebind_workers(struct worker_pool *pool)
4589 4601
4590 spin_lock_irq(&pool->lock); 4602 spin_lock_irq(&pool->lock);
4591 4603
4592 /*
4593 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4594 * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
4595 * being reworked and this can go away in time.
4596 */
4597 if (!(pool->flags & POOL_DISASSOCIATED)) {
4598 spin_unlock_irq(&pool->lock);
4599 return;
4600 }
4601
4602 pool->flags &= ~POOL_DISASSOCIATED; 4604 pool->flags &= ~POOL_DISASSOCIATED;
4603 4605
4604 for_each_pool_worker(worker, pool) { 4606 for_each_pool_worker(worker, pool) {
@@ -4709,12 +4711,13 @@ int workqueue_online_cpu(unsigned int cpu)
4709 4711
4710int workqueue_offline_cpu(unsigned int cpu) 4712int workqueue_offline_cpu(unsigned int cpu)
4711{ 4713{
4712 struct work_struct unbind_work;
4713 struct workqueue_struct *wq; 4714 struct workqueue_struct *wq;
4714 4715
4715 /* unbinding per-cpu workers should happen on the local CPU */ 4716 /* unbinding per-cpu workers should happen on the local CPU */
4716 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4717 if (WARN_ON(cpu != smp_processor_id()))
4717 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4718 return -1;
4719
4720 unbind_workers(cpu);
4718 4721
4719 /* update NUMA affinity of unbound workqueues */ 4722 /* update NUMA affinity of unbound workqueues */
4720 mutex_lock(&wq_pool_mutex); 4723 mutex_lock(&wq_pool_mutex);
@@ -4722,9 +4725,6 @@ int workqueue_offline_cpu(unsigned int cpu)
4722 wq_update_unbound_numa(wq, cpu, false); 4725 wq_update_unbound_numa(wq, cpu, false);
4723 mutex_unlock(&wq_pool_mutex); 4726 mutex_unlock(&wq_pool_mutex);
4724 4727
4725 /* wait for per-cpu unbinding to finish */
4726 flush_work(&unbind_work);
4727 destroy_work_on_stack(&unbind_work);
4728 return 0; 4728 return 0;
4729} 4729}
4730 4730
@@ -4957,6 +4957,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4957 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) 4957 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4958 return -ENOMEM; 4958 return -ENOMEM;
4959 4959
4960 /*
4961 * Not excluding isolated cpus on purpose.
4962 * If the user wishes to include them, we allow that.
4963 */
4960 cpumask_and(cpumask, cpumask, cpu_possible_mask); 4964 cpumask_and(cpumask, cpumask, cpu_possible_mask);
4961 if (!cpumask_empty(cpumask)) { 4965 if (!cpumask_empty(cpumask)) {
4962 apply_wqattrs_lock(); 4966 apply_wqattrs_lock();
@@ -5555,7 +5559,7 @@ int __init workqueue_init_early(void)
5555 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5559 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5556 5560
5557 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 5561 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5558 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 5562 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));
5559 5563
5560 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5564 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5561 5565
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 947d3e2ed5c2..9d5b78aad4c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
1099 select DEBUG_MUTEXES 1099 select DEBUG_MUTEXES
1100 select DEBUG_RT_MUTEXES if RT_MUTEXES 1100 select DEBUG_RT_MUTEXES if RT_MUTEXES
1101 select DEBUG_LOCK_ALLOC 1101 select DEBUG_LOCK_ALLOC
1102 select LOCKDEP_CROSSRELEASE
1103 select LOCKDEP_COMPLETIONS
1104 select TRACE_IRQFLAGS 1102 select TRACE_IRQFLAGS
1105 default n 1103 default n
1106 help 1104 help
@@ -1170,37 +1168,6 @@ config LOCK_STAT
1170 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. 1168 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
1171 (CONFIG_LOCKDEP defines "acquire" and "release" events.) 1169 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
1172 1170
1173config LOCKDEP_CROSSRELEASE
1174 bool
1175 help
1176 This makes lockdep work for crosslock which is a lock allowed to
1177 be released in a different context from the acquisition context.
1178 Normally a lock must be released in the context acquiring the lock.
1179 However, relexing this constraint helps synchronization primitives
1180 such as page locks or completions can use the lock correctness
1181 detector, lockdep.
1182
1183config LOCKDEP_COMPLETIONS
1184 bool
1185 help
1186 A deadlock caused by wait_for_completion() and complete() can be
1187 detected by lockdep using crossrelease feature.
1188
1189config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
1190 bool "Enable the boot parameter, crossrelease_fullstack"
1191 depends on LOCKDEP_CROSSRELEASE
1192 default n
1193 help
1194 The lockdep "cross-release" feature needs to record stack traces
1195 (of calling functions) for all acquisitions, for eventual later
1196 use during analysis. By default only a single caller is recorded,
1197 because the unwind operation can be very expensive with deeper
1198 stack chains.
1199
1200 However a boot parameter, crossrelease_fullstack, was
1201 introduced since sometimes deeper traces are required for full
1202 analysis. This option turns on the boot parameter.
1203
1204config DEBUG_LOCKDEP 1171config DEBUG_LOCKDEP
1205 bool "Lock dependency engine debugging" 1172 bool "Lock dependency engine debugging"
1206 depends on DEBUG_KERNEL && LOCKDEP 1173 depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1ef0cec38d78..dc14beae2c9a 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -313,42 +313,47 @@ next_op:
313 313
314 /* Decide how to handle the operation */ 314 /* Decide how to handle the operation */
315 switch (op) { 315 switch (op) {
316 case ASN1_OP_MATCH_ANY_ACT:
317 case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
318 case ASN1_OP_COND_MATCH_ANY_ACT:
319 case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
320 ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
321 if (ret < 0)
322 return ret;
323 goto skip_data;
324
325 case ASN1_OP_MATCH_ACT:
326 case ASN1_OP_MATCH_ACT_OR_SKIP:
327 case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
328 ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
329 if (ret < 0)
330 return ret;
331 goto skip_data;
332
333 case ASN1_OP_MATCH: 316 case ASN1_OP_MATCH:
334 case ASN1_OP_MATCH_OR_SKIP: 317 case ASN1_OP_MATCH_OR_SKIP:
318 case ASN1_OP_MATCH_ACT:
319 case ASN1_OP_MATCH_ACT_OR_SKIP:
335 case ASN1_OP_MATCH_ANY: 320 case ASN1_OP_MATCH_ANY:
336 case ASN1_OP_MATCH_ANY_OR_SKIP: 321 case ASN1_OP_MATCH_ANY_OR_SKIP:
322 case ASN1_OP_MATCH_ANY_ACT:
323 case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
337 case ASN1_OP_COND_MATCH_OR_SKIP: 324 case ASN1_OP_COND_MATCH_OR_SKIP:
325 case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
338 case ASN1_OP_COND_MATCH_ANY: 326 case ASN1_OP_COND_MATCH_ANY:
339 case ASN1_OP_COND_MATCH_ANY_OR_SKIP: 327 case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
340 skip_data: 328 case ASN1_OP_COND_MATCH_ANY_ACT:
329 case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
330
341 if (!(flags & FLAG_CONS)) { 331 if (!(flags & FLAG_CONS)) {
342 if (flags & FLAG_INDEFINITE_LENGTH) { 332 if (flags & FLAG_INDEFINITE_LENGTH) {
333 size_t tmp = dp;
334
343 ret = asn1_find_indefinite_length( 335 ret = asn1_find_indefinite_length(
344 data, datalen, &dp, &len, &errmsg); 336 data, datalen, &tmp, &len, &errmsg);
345 if (ret < 0) 337 if (ret < 0)
346 goto error; 338 goto error;
347 } else {
348 dp += len;
349 } 339 }
350 pr_debug("- LEAF: %zu\n", len); 340 pr_debug("- LEAF: %zu\n", len);
351 } 341 }
342
343 if (op & ASN1_OP_MATCH__ACT) {
344 unsigned char act;
345
346 if (op & ASN1_OP_MATCH__ANY)
347 act = machine[pc + 1];
348 else
349 act = machine[pc + 2];
350 ret = actions[act](context, hdr, tag, data + dp, len);
351 if (ret < 0)
352 return ret;
353 }
354
355 if (!(flags & FLAG_CONS))
356 dp += len;
352 pc += asn1_op_lengths[op]; 357 pc += asn1_op_lengths[op];
353 goto next_op; 358 goto next_op;
354 359
@@ -434,6 +439,8 @@ next_op:
434 else 439 else
435 act = machine[pc + 1]; 440 act = machine[pc + 1];
436 ret = actions[act](context, hdr, 0, data + tdp, len); 441 ret = actions[act](context, hdr, 0, data + tdp, len);
442 if (ret < 0)
443 return ret;
437 } 444 }
438 pc += asn1_op_lengths[op]; 445 pc += asn1_op_lengths[op];
439 goto next_op; 446 goto next_op;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index c3e84edc47c9..2615074d3de5 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -346,7 +346,8 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
346static void zap_modalias_env(struct kobj_uevent_env *env) 346static void zap_modalias_env(struct kobj_uevent_env *env)
347{ 347{
348 static const char modalias_prefix[] = "MODALIAS="; 348 static const char modalias_prefix[] = "MODALIAS=";
349 int i; 349 size_t len;
350 int i, j;
350 351
351 for (i = 0; i < env->envp_idx;) { 352 for (i = 0; i < env->envp_idx;) {
352 if (strncmp(env->envp[i], modalias_prefix, 353 if (strncmp(env->envp[i], modalias_prefix,
@@ -355,11 +356,18 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
355 continue; 356 continue;
356 } 357 }
357 358
358 if (i != env->envp_idx - 1) 359 len = strlen(env->envp[i]) + 1;
359 memmove(&env->envp[i], &env->envp[i + 1], 360
360 sizeof(env->envp[i]) * env->envp_idx - 1); 361 if (i != env->envp_idx - 1) {
362 memmove(env->envp[i], env->envp[i + 1],
363 env->buflen - len);
364
365 for (j = i; j < env->envp_idx - 1; j++)
366 env->envp[j] = env->envp[j + 1] - len;
367 }
361 368
362 env->envp_idx--; 369 env->envp_idx--;
370 env->buflen -= len;
363 } 371 }
364} 372}
365 373
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 57fd45ab7af1..08c60d10747f 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 674#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
675/*
676 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
677 * code below, so we special case MIPS64r6 until the compiler can do better.
678 */
679#define umul_ppmm(w1, w0, u, v) \
680do { \
681 __asm__ ("dmulu %0,%1,%2" \
682 : "=d" ((UDItype)(w0)) \
683 : "d" ((UDItype)(u)), \
684 "d" ((UDItype)(v))); \
685 __asm__ ("dmuhu %0,%1,%2" \
686 : "=d" ((UDItype)(w1)) \
687 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \
689} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 691#define umul_ppmm(w1, w0, u, v) \
676do { \ 692do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 8bf78b4b78f0..dfa55c873c13 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -15,7 +15,11 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <net/netlink.h> 16#include <net/netlink.h>
17 17
18/* for these data types attribute length must be exactly given size */ 18/* For these data types, attribute length should be exactly the given
19 * size. However, to maintain compatibility with broken commands, if the
20 * attribute length does not match the expected size a warning is emitted
21 * to the user that the command is sending invalid data and needs to be fixed.
22 */
19static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { 23static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
20 [NLA_U8] = sizeof(u8), 24 [NLA_U8] = sizeof(u8),
21 [NLA_U16] = sizeof(u16), 25 [NLA_U16] = sizeof(u16),
@@ -28,8 +32,16 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
28}; 32};
29 33
30static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { 34static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
35 [NLA_U8] = sizeof(u8),
36 [NLA_U16] = sizeof(u16),
37 [NLA_U32] = sizeof(u32),
38 [NLA_U64] = sizeof(u64),
31 [NLA_MSECS] = sizeof(u64), 39 [NLA_MSECS] = sizeof(u64),
32 [NLA_NESTED] = NLA_HDRLEN, 40 [NLA_NESTED] = NLA_HDRLEN,
41 [NLA_S8] = sizeof(s8),
42 [NLA_S16] = sizeof(s16),
43 [NLA_S32] = sizeof(s32),
44 [NLA_S64] = sizeof(s64),
33}; 45};
34 46
35static int validate_nla_bitfield32(const struct nlattr *nla, 47static int validate_nla_bitfield32(const struct nlattr *nla,
@@ -69,11 +81,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
69 81
70 BUG_ON(pt->type > NLA_TYPE_MAX); 82 BUG_ON(pt->type > NLA_TYPE_MAX);
71 83
72 /* for data types NLA_U* and NLA_S* require exact length */ 84 if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
73 if (nla_attr_len[pt->type]) { 85 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
74 if (attrlen != nla_attr_len[pt->type]) 86 current->comm, type);
75 return -ERANGE;
76 return 0;
77 } 87 }
78 88
79 switch (pt->type) { 89 switch (pt->type) {
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 41b9e50711a7..0bcac6ccb1b2 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
116 int count; 116 int count;
117 117
118 if (v >= end) 118 if (v >= end)
119 return -EBADMSG; 119 goto bad;
120 120
121 n = *v++; 121 n = *v++;
122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); 122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
123 if (count >= bufsize)
124 return -ENOBUFS;
123 buffer += count; 125 buffer += count;
124 bufsize -= count; 126 bufsize -= count;
125 if (bufsize == 0)
126 return -ENOBUFS;
127 127
128 while (v < end) { 128 while (v < end) {
129 num = 0; 129 num = 0;
@@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
134 num = n & 0x7f; 134 num = n & 0x7f;
135 do { 135 do {
136 if (v >= end) 136 if (v >= end)
137 return -EBADMSG; 137 goto bad;
138 n = *v++; 138 n = *v++;
139 num <<= 7; 139 num <<= 7;
140 num |= n & 0x7f; 140 num |= n & 0x7f;
141 } while (n & 0x80); 141 } while (n & 0x80);
142 } 142 }
143 ret += count = snprintf(buffer, bufsize, ".%lu", num); 143 ret += count = snprintf(buffer, bufsize, ".%lu", num);
144 buffer += count; 144 if (count >= bufsize)
145 if (bufsize <= count)
146 return -ENOBUFS; 145 return -ENOBUFS;
146 buffer += count;
147 bufsize -= count; 147 bufsize -= count;
148 } 148 }
149 149
150 return ret; 150 return ret;
151
152bad:
153 snprintf(buffer, bufsize, "(bad)");
154 return -EBADMSG;
151} 155}
152EXPORT_SYMBOL_GPL(sprint_oid); 156EXPORT_SYMBOL_GPL(sprint_oid);
153 157
diff --git a/lib/rbtree.c b/lib/rbtree.c
index ba4a9d165f1b..d3ff682fd4b8 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
603} 603}
604EXPORT_SYMBOL(rb_replace_node); 604EXPORT_SYMBOL(rb_replace_node);
605 605
606void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
607 struct rb_root_cached *root)
608{
609 rb_replace_node(victim, new, &root->rb_root);
610
611 if (root->rb_leftmost == victim)
612 root->rb_leftmost = new;
613}
614EXPORT_SYMBOL(rb_replace_node_cached);
615
606void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 616void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
607 struct rb_root *root) 617 struct rb_root *root)
608{ 618{
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa8812ae6776..f369889e521d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -435,6 +435,41 @@ loop:
435 return 0; 435 return 0;
436} 436}
437 437
438static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
439{
440 struct bpf_insn *insn;
441
442 insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
443 if (!insn)
444 return -ENOMEM;
445
446 /* Due to func address being non-const, we need to
447 * assemble this here.
448 */
449 insn[0] = BPF_MOV64_REG(R6, R1);
450 insn[1] = BPF_LD_ABS(BPF_B, 0);
451 insn[2] = BPF_LD_ABS(BPF_H, 0);
452 insn[3] = BPF_LD_ABS(BPF_W, 0);
453 insn[4] = BPF_MOV64_REG(R7, R6);
454 insn[5] = BPF_MOV64_IMM(R6, 0);
455 insn[6] = BPF_MOV64_REG(R1, R7);
456 insn[7] = BPF_MOV64_IMM(R2, 1);
457 insn[8] = BPF_MOV64_IMM(R3, 2);
458 insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
459 bpf_skb_vlan_push_proto.func - __bpf_call_base);
460 insn[10] = BPF_MOV64_REG(R6, R7);
461 insn[11] = BPF_LD_ABS(BPF_B, 0);
462 insn[12] = BPF_LD_ABS(BPF_H, 0);
463 insn[13] = BPF_LD_ABS(BPF_W, 0);
464 insn[14] = BPF_MOV64_IMM(R0, 42);
465 insn[15] = BPF_EXIT_INSN();
466
467 self->u.ptr.insns = insn;
468 self->u.ptr.len = 16;
469
470 return 0;
471}
472
438static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) 473static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
439{ 474{
440 unsigned int len = BPF_MAXINSNS; 475 unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
6066 {}, 6101 {},
6067 { {0x1, 0x42 } }, 6102 { {0x1, 0x42 } },
6068 }, 6103 },
6104 {
6105 "LD_ABS with helper changing skb data",
6106 { },
6107 INTERNAL,
6108 { 0x34 },
6109 { { ETH_HLEN, 42 } },
6110 .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
6111 },
6069}; 6112};
6070 6113
6071static struct net_device dev; 6114static struct net_device dev;
@@ -6207,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
6207 return NULL; 6250 return NULL;
6208 } 6251 }
6209 } 6252 }
6210 /* We don't expect to fail. */
6211 if (*err) { 6253 if (*err) {
6212 pr_cont("FAIL to attach err=%d len=%d\n", 6254 pr_cont("FAIL to prog_create err=%d len=%d\n",
6213 *err, fprog.len); 6255 *err, fprog.len);
6214 return NULL; 6256 return NULL;
6215 } 6257 }
@@ -6233,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
6233 * checks. 6275 * checks.
6234 */ 6276 */
6235 fp = bpf_prog_select_runtime(fp, err); 6277 fp = bpf_prog_select_runtime(fp, err);
6278 if (*err) {
6279 pr_cont("FAIL to select_runtime err=%d\n", *err);
6280 return NULL;
6281 }
6236 break; 6282 break;
6237 } 6283 }
6238 6284
@@ -6418,8 +6464,8 @@ static __init int test_bpf(void)
6418 pass_cnt++; 6464 pass_cnt++;
6419 continue; 6465 continue;
6420 } 6466 }
6421 6467 err_cnt++;
6422 return err; 6468 continue;
6423 } 6469 }
6424 6470
6425 pr_cont("jited:%u ", fp->jited); 6471 pr_cont("jited:%u ", fp->jited);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index 4a720ed4fdaf..0d54bcbc8170 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -33,8 +33,9 @@
33 * @head: head of timerqueue 33 * @head: head of timerqueue
34 * @node: timer node to be added 34 * @node: timer node to be added
35 * 35 *
36 * Adds the timer node to the timerqueue, sorted by the 36 * Adds the timer node to the timerqueue, sorted by the node's expires
37 * node's expires value. 37 * value. Returns true if the newly added timer is the first expiring timer in
38 * the queue.
38 */ 39 */
39bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) 40bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
40{ 41{
@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
70 * @head: head of timerqueue 71 * @head: head of timerqueue
71 * @node: timer node to be removed 72 * @node: timer node to be removed
72 * 73 *
73 * Removes the timer node from the timerqueue. 74 * Removes the timer node from the timerqueue. Returns true if the queue is
75 * not empty after the remove.
74 */ 76 */
75bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) 77bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
76{ 78{
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 84b2dc76f140..b5f940ce0143 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
882 if (IS_ERR(dev)) 882 if (IS_ERR(dev))
883 return PTR_ERR(dev); 883 return PTR_ERR(dev);
884 884
885 if (bdi_debug_register(bdi, dev_name(dev))) {
886 device_destroy(bdi_class, dev->devt);
887 return -ENOMEM;
888 }
889 cgwb_bdi_register(bdi); 885 cgwb_bdi_register(bdi);
890 bdi->dev = dev; 886 bdi->dev = dev;
891 887
888 bdi_debug_register(bdi, dev_name(dev));
892 set_bit(WB_registered, &bdi->wb.state); 889 set_bit(WB_registered, &bdi->wb.state);
893 890
894 spin_lock_bh(&bdi_lock); 891 spin_lock_bh(&bdi_lock);
diff --git a/mm/debug.c b/mm/debug.c
index d947f3e03b0d..56e2d9125ea5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
50 */ 50 */
51 int mapcount = PageSlab(page) ? 0 : page_mapcount(page); 51 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
52 52
53 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", 53 pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
54 page, page_ref_count(page), mapcount, 54 page, page_ref_count(page), mapcount,
55 page->mapping, page_to_pgoff(page)); 55 page->mapping, page_to_pgoff(page));
56 if (PageCompound(page)) 56 if (PageCompound(page))
@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
69 69
70#ifdef CONFIG_MEMCG 70#ifdef CONFIG_MEMCG
71 if (page->mem_cgroup) 71 if (page->mem_cgroup)
72 pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); 72 pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
73#endif 73#endif
74} 74}
75 75
@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
84 84
85void dump_vma(const struct vm_area_struct *vma) 85void dump_vma(const struct vm_area_struct *vma)
86{ 86{
87 pr_emerg("vma %p start %p end %p\n" 87 pr_emerg("vma %px start %px end %px\n"
88 "next %p prev %p mm %p\n" 88 "next %px prev %px mm %px\n"
89 "prot %lx anon_vma %p vm_ops %p\n" 89 "prot %lx anon_vma %px vm_ops %px\n"
90 "pgoff %lx file %p private_data %p\n" 90 "pgoff %lx file %px private_data %px\n"
91 "flags: %#lx(%pGv)\n", 91 "flags: %#lx(%pGv)\n",
92 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, 92 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
93 vma->vm_prev, vma->vm_mm, 93 vma->vm_prev, vma->vm_mm,
@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
100 100
101void dump_mm(const struct mm_struct *mm) 101void dump_mm(const struct mm_struct *mm)
102{ 102{
103 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" 103 pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
104#ifdef CONFIG_MMU 104#ifdef CONFIG_MMU
105 "get_unmapped_area %p\n" 105 "get_unmapped_area %px\n"
106#endif 106#endif
107 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" 107 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
108 "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 108 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" 110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
111 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 111 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
112 "start_brk %lx brk %lx start_stack %lx\n" 112 "start_brk %lx brk %lx start_stack %lx\n"
113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
114 "binfmt %p flags %lx core_state %p\n" 114 "binfmt %px flags %lx core_state %px\n"
115#ifdef CONFIG_AIO 115#ifdef CONFIG_AIO
116 "ioctx_table %p\n" 116 "ioctx_table %px\n"
117#endif 117#endif
118#ifdef CONFIG_MEMCG 118#ifdef CONFIG_MEMCG
119 "owner %p " 119 "owner %px "
120#endif 120#endif
121 "exe_file %p\n" 121 "exe_file %px\n"
122#ifdef CONFIG_MMU_NOTIFIER 122#ifdef CONFIG_MMU_NOTIFIER
123 "mmu_notifier_mm %p\n" 123 "mmu_notifier_mm %px\n"
124#endif 124#endif
125#ifdef CONFIG_NUMA_BALANCING 125#ifdef CONFIG_NUMA_BALANCING
126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index d04ac1ec0559..1826f191e72c 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
111 enum fixed_addresses idx; 111 enum fixed_addresses idx;
112 int i, slot; 112 int i, slot;
113 113
114 WARN_ON(system_state != SYSTEM_BOOTING); 114 WARN_ON(system_state >= SYSTEM_RUNNING);
115 115
116 slot = -1; 116 slot = -1;
117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 297c7238f7d4..c64dca6e27c2 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
62 * get_user_pages_longterm() and disallow it for filesystem-dax 62 * get_user_pages_longterm() and disallow it for filesystem-dax
63 * mappings. 63 * mappings.
64 */ 64 */
65 if (vma_is_fsdax(vma)) 65 if (vma_is_fsdax(vma)) {
66 return -EOPNOTSUPP; 66 ret = -EOPNOTSUPP;
67 goto out;
68 }
67 69
68 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { 70 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
69 vec->got_ref = true; 71 vec->got_ref = true;
diff --git a/mm/gup.c b/mm/gup.c
index d3fb60e5bfac..e0d82b6706d7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
66 */ 66 */
67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68{ 68{
69 return pte_access_permitted(pte, WRITE) || 69 return pte_write(pte) ||
70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71} 71}
72 72
diff --git a/mm/hmm.c b/mm/hmm.c
index 3a5c172af560..ea19742a5d60 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
391 if (pmd_protnone(pmd)) 391 if (pmd_protnone(pmd))
392 return hmm_vma_walk_clear(start, end, walk); 392 return hmm_vma_walk_clear(start, end, walk);
393 393
394 if (!pmd_access_permitted(pmd, write_fault)) 394 if (write_fault && !pmd_write(pmd))
395 return hmm_vma_walk_clear(start, end, walk); 395 return hmm_vma_walk_clear(start, end, walk);
396 396
397 pfn = pmd_pfn(pmd) + pte_index(addr); 397 pfn = pmd_pfn(pmd) + pte_index(addr);
398 flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0; 398 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; 400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
401 return 0; 401 return 0;
@@ -456,11 +456,11 @@ again:
456 continue; 456 continue;
457 } 457 }
458 458
459 if (!pte_access_permitted(pte, write_fault)) 459 if (write_fault && !pte_write(pte))
460 goto fault; 460 goto fault;
461 461
462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; 462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
463 pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0; 463 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
464 continue; 464 continue;
465 465
466fault: 466fault:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2f2f5e774902..0e7ded98d114 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -870,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
870 */ 870 */
871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
872 872
873 if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE)) 873 if (flags & FOLL_WRITE && !pmd_write(*pmd))
874 return NULL; 874 return NULL;
875 875
876 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 876 if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1012,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1012 1012
1013 assert_spin_locked(pud_lockptr(mm, pud)); 1013 assert_spin_locked(pud_lockptr(mm, pud));
1014 1014
1015 if (!pud_access_permitted(*pud, flags & FOLL_WRITE)) 1015 if (flags & FOLL_WRITE && !pud_write(*pud))
1016 return NULL; 1016 return NULL;
1017 1017
1018 if (pud_present(*pud) && pud_devmap(*pud)) 1018 if (pud_present(*pud) && pud_devmap(*pud))
@@ -1386,7 +1386,7 @@ out_unlock:
1386 */ 1386 */
1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1388{ 1388{
1389 return pmd_access_permitted(pmd, WRITE) || 1389 return pmd_write(pmd) ||
1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1391} 1391}
1392 1392
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
deleted file mode 100644
index cec594032515..000000000000
--- a/mm/kmemcheck.c
+++ /dev/null
@@ -1 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3d4781756d50..f656ca27f6c2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -127,7 +127,7 @@
127/* GFP bitmask for kmemleak internal allocations */ 127/* GFP bitmask for kmemleak internal allocations */
128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 __GFP_NOWARN) 130 __GFP_NOWARN | __GFP_NOFAIL)
131 131
132/* scanning area inside a memory block */ 132/* scanning area inside a memory block */
133struct kmemleak_scan_area { 133struct kmemleak_scan_area {
@@ -1523,7 +1523,7 @@ static void kmemleak_scan(void)
1523 if (page_count(page) == 0) 1523 if (page_count(page) == 0)
1524 continue; 1524 continue;
1525 scan_block(page, page + 1, NULL); 1525 scan_block(page, page + 1, NULL);
1526 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) 1526 if (!(pfn & 63))
1527 cond_resched(); 1527 cond_resched();
1528 } 1528 }
1529 } 1529 }
diff --git a/mm/memory.c b/mm/memory.c
index 5eb3d2524bdc..793004608332 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2857,8 +2857,11 @@ int do_swap_page(struct vm_fault *vmf)
2857 int ret = 0; 2857 int ret = 0;
2858 bool vma_readahead = swap_use_vma_readahead(); 2858 bool vma_readahead = swap_use_vma_readahead();
2859 2859
2860 if (vma_readahead) 2860 if (vma_readahead) {
2861 page = swap_readahead_detect(vmf, &swap_ra); 2861 page = swap_readahead_detect(vmf, &swap_ra);
2862 swapcache = page;
2863 }
2864
2862 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { 2865 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
2863 if (page) 2866 if (page)
2864 put_page(page); 2867 put_page(page);
@@ -2889,9 +2892,12 @@ int do_swap_page(struct vm_fault *vmf)
2889 2892
2890 2893
2891 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2894 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2892 if (!page) 2895 if (!page) {
2893 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL, 2896 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
2894 vmf->address); 2897 vmf->address);
2898 swapcache = page;
2899 }
2900
2895 if (!page) { 2901 if (!page) {
2896 struct swap_info_struct *si = swp_swap_info(entry); 2902 struct swap_info_struct *si = swp_swap_info(entry);
2897 2903
@@ -3831,7 +3837,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
3831 return VM_FAULT_FALLBACK; 3837 return VM_FAULT_FALLBACK;
3832} 3838}
3833 3839
3834static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) 3840/* `inline' is required to avoid gcc 4.1.2 build error */
3841static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3835{ 3842{
3836 if (vma_is_anonymous(vmf->vma)) 3843 if (vma_is_anonymous(vmf->vma))
3837 return do_huge_pmd_wp_page(vmf, orig_pmd); 3844 return do_huge_pmd_wp_page(vmf, orig_pmd);
@@ -3948,7 +3955,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3948 if (unlikely(!pte_same(*vmf->pte, entry))) 3955 if (unlikely(!pte_same(*vmf->pte, entry)))
3949 goto unlock; 3956 goto unlock;
3950 if (vmf->flags & FAULT_FLAG_WRITE) { 3957 if (vmf->flags & FAULT_FLAG_WRITE) {
3951 if (!pte_access_permitted(entry, WRITE)) 3958 if (!pte_write(entry))
3952 return do_wp_page(vmf); 3959 return do_wp_page(vmf);
3953 entry = pte_mkdirty(entry); 3960 entry = pte_mkdirty(entry);
3954 } 3961 }
@@ -4013,7 +4020,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4013 4020
4014 /* NUMA case for anonymous PUDs would go here */ 4021 /* NUMA case for anonymous PUDs would go here */
4015 4022
4016 if (dirty && !pud_access_permitted(orig_pud, WRITE)) { 4023 if (dirty && !pud_write(orig_pud)) {
4017 ret = wp_huge_pud(&vmf, orig_pud); 4024 ret = wp_huge_pud(&vmf, orig_pud);
4018 if (!(ret & VM_FAULT_FALLBACK)) 4025 if (!(ret & VM_FAULT_FALLBACK))
4019 return ret; 4026 return ret;
@@ -4046,7 +4053,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4046 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 4053 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4047 return do_huge_pmd_numa_page(&vmf, orig_pmd); 4054 return do_huge_pmd_numa_page(&vmf, orig_pmd);
4048 4055
4049 if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) { 4056 if (dirty && !pmd_write(orig_pmd)) {
4050 ret = wp_huge_pmd(&vmf, orig_pmd); 4057 ret = wp_huge_pmd(&vmf, orig_pmd);
4051 if (!(ret & VM_FAULT_FALLBACK)) 4058 if (!(ret & VM_FAULT_FALLBACK))
4052 return ret; 4059 return ret;
@@ -4336,7 +4343,7 @@ int follow_phys(struct vm_area_struct *vma,
4336 goto out; 4343 goto out;
4337 pte = *ptep; 4344 pte = *ptep;
4338 4345
4339 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 4346 if ((flags & FOLL_WRITE) && !pte_write(pte))
4340 goto unlock; 4347 goto unlock;
4341 4348
4342 *prot = pgprot_val(pte_pgprot(pte)); 4349 *prot = pgprot_val(pte_pgprot(pte));
diff --git a/mm/mmap.c b/mm/mmap.c
index a4d546821214..9efdc021ad22 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */
3020 unmap_vmas(&tlb, vma, 0, -1); 3020 unmap_vmas(&tlb, vma, 0, -1);
3021 3021
3022 set_bit(MMF_OOM_SKIP, &mm->flags); 3022 if (unlikely(mm_is_oom_victim(mm))) {
3023 if (unlikely(tsk_is_oom_victim(current))) {
3024 /* 3023 /*
3025 * Wait for oom_reap_task() to stop working on this 3024 * Wait for oom_reap_task() to stop working on this
3026 * mm. Because MMF_OOM_SKIP is already set before 3025 * mm. Because MMF_OOM_SKIP is already set before
3027 * calling down_read(), oom_reap_task() will not run 3026 * calling down_read(), oom_reap_task() will not run
3028 * on this "mm" post up_write(). 3027 * on this "mm" post up_write().
3029 * 3028 *
3030 * tsk_is_oom_victim() cannot be set from under us 3029 * mm_is_oom_victim() cannot be set from under us
3031 * either because current->mm is already set to NULL 3030 * either because victim->mm is already set to NULL
3032 * under task_lock before calling mmput and oom_mm is 3031 * under task_lock before calling mmput and oom_mm is
3033 * set not NULL by the OOM killer only if current->mm 3032 * set not NULL by the OOM killer only if victim->mm
3034 * is found not NULL while holding the task_lock. 3033 * is found not NULL while holding the task_lock.
3035 */ 3034 */
3035 set_bit(MMF_OOM_SKIP, &mm->flags);
3036 down_write(&mm->mmap_sem); 3036 down_write(&mm->mmap_sem);
3037 up_write(&mm->mmap_sem); 3037 up_write(&mm->mmap_sem);
3038 } 3038 }
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ec39f730a0bf..58b629bb70de 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
166 next = pmd_addr_end(addr, end); 166 next = pmd_addr_end(addr, end);
167 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) 167 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
168 && pmd_none_or_clear_bad(pmd)) 168 && pmd_none_or_clear_bad(pmd))
169 continue; 169 goto next;
170 170
171 /* invoke the mmu notifier if the pmd is populated */ 171 /* invoke the mmu notifier if the pmd is populated */
172 if (!mni_start) { 172 if (!mni_start) {
@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
188 } 188 }
189 189
190 /* huge pmd was handled */ 190 /* huge pmd was handled */
191 continue; 191 goto next;
192 } 192 }
193 } 193 }
194 /* fall through, the trans huge pmd just split */ 194 /* fall through, the trans huge pmd just split */
@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
196 this_pages = change_pte_range(vma, pmd, addr, next, newprot, 196 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
197 dirty_accountable, prot_numa); 197 dirty_accountable, prot_numa);
198 pages += this_pages; 198 pages += this_pages;
199next:
200 cond_resched();
199 } while (pmd++, addr = next, addr != end); 201 } while (pmd++, addr = next, addr != end);
200 202
201 if (mni_start) 203 if (mni_start)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c957be32b27a..29f855551efe 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
683 return; 683 return;
684 684
685 /* oom_mm is bound to the signal struct life time. */ 685 /* oom_mm is bound to the signal struct life time. */
686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
687 mmgrab(tsk->signal->oom_mm); 687 mmgrab(tsk->signal->oom_mm);
688 set_bit(MMF_OOM_VICTIM, &mm->flags);
689 }
688 690
689 /* 691 /*
690 * Make sure that the task is woken up from uninterruptible sleep 692 * Make sure that the task is woken up from uninterruptible sleep
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f5d4556b3d..76c9688b6a0a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
2684{ 2684{
2685 struct page *page, *next; 2685 struct page *page, *next;
2686 unsigned long flags, pfn; 2686 unsigned long flags, pfn;
2687 int batch_count = 0;
2687 2688
2688 /* Prepare pages for freeing */ 2689 /* Prepare pages for freeing */
2689 list_for_each_entry_safe(page, next, list, lru) { 2690 list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
2700 set_page_private(page, 0); 2701 set_page_private(page, 0);
2701 trace_mm_page_free_batched(page); 2702 trace_mm_page_free_batched(page);
2702 free_unref_page_commit(page, pfn); 2703 free_unref_page_commit(page, pfn);
2704
2705 /*
2706 * Guard against excessive IRQ disabled times when we get
2707 * a large list of pages to free.
2708 */
2709 if (++batch_count == SWAP_CLUSTER_MAX) {
2710 local_irq_restore(flags);
2711 batch_count = 0;
2712 local_irq_save(flags);
2713 }
2703 } 2714 }
2704 local_irq_restore(flags); 2715 local_irq_restore(flags);
2705} 2716}
@@ -6249,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
6249 pgcnt = 0; 6260 pgcnt = 0;
6250 for_each_resv_unavail_range(i, &start, &end) { 6261 for_each_resv_unavail_range(i, &start, &end) {
6251 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { 6262 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6263 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
6264 continue;
6252 mm_zero_struct_page(pfn_to_page(pfn)); 6265 mm_zero_struct_page(pfn_to_page(pfn));
6253 pgcnt++; 6266 pgcnt++;
6254 } 6267 }
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8592543a0f15..270a8219ccd0 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -616,7 +616,6 @@ static void init_early_allocated_pages(void)
616{ 616{
617 pg_data_t *pgdat; 617 pg_data_t *pgdat;
618 618
619 drain_all_pages(NULL);
620 for_each_online_pgdat(pgdat) 619 for_each_online_pgdat(pgdat)
621 init_zones_in_node(pgdat); 620 init_zones_in_node(pgdat);
622} 621}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index d22b84310f6d..ae3c2a35d61b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
30 return true; 30 return true;
31} 31}
32 32
33static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
34{
35 unsigned long hpage_pfn = page_to_pfn(hpage);
36
37 /* THP can be referenced by any subpage */
38 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
39}
40
41/**
42 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
43 *
44 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
45 * mapped. check_pte() has to validate this.
46 *
47 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
48 * page.
49 *
50 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
51 * entry that points to @pvmw->page or any subpage in case of THP.
52 *
53 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
54 * @pvmw->page or any subpage in case of THP.
55 *
56 * Otherwise, return false.
57 *
58 */
33static bool check_pte(struct page_vma_mapped_walk *pvmw) 59static bool check_pte(struct page_vma_mapped_walk *pvmw)
34{ 60{
61 unsigned long pfn;
62
35 if (pvmw->flags & PVMW_MIGRATION) { 63 if (pvmw->flags & PVMW_MIGRATION) {
36#ifdef CONFIG_MIGRATION
37 swp_entry_t entry; 64 swp_entry_t entry;
38 if (!is_swap_pte(*pvmw->pte)) 65 if (!is_swap_pte(*pvmw->pte))
39 return false; 66 return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
41 68
42 if (!is_migration_entry(entry)) 69 if (!is_migration_entry(entry))
43 return false; 70 return false;
44 if (migration_entry_to_page(entry) - pvmw->page >=
45 hpage_nr_pages(pvmw->page)) {
46 return false;
47 }
48 if (migration_entry_to_page(entry) < pvmw->page)
49 return false;
50#else
51 WARN_ON_ONCE(1);
52#endif
53 } else {
54 if (is_swap_pte(*pvmw->pte)) {
55 swp_entry_t entry;
56 71
57 entry = pte_to_swp_entry(*pvmw->pte); 72 pfn = migration_entry_to_pfn(entry);
58 if (is_device_private_entry(entry) && 73 } else if (is_swap_pte(*pvmw->pte)) {
59 device_private_entry_to_page(entry) == pvmw->page) 74 swp_entry_t entry;
60 return true;
61 }
62 75
63 if (!pte_present(*pvmw->pte)) 76 /* Handle un-addressable ZONE_DEVICE memory */
77 entry = pte_to_swp_entry(*pvmw->pte);
78 if (!is_device_private_entry(entry))
64 return false; 79 return false;
65 80
66 /* THP can be referenced by any subpage */ 81 pfn = device_private_entry_to_pfn(entry);
67 if (pte_page(*pvmw->pte) - pvmw->page >= 82 } else {
68 hpage_nr_pages(pvmw->page)) { 83 if (!pte_present(*pvmw->pte))
69 return false;
70 }
71 if (pte_page(*pvmw->pte) < pvmw->page)
72 return false; 84 return false;
85
86 pfn = pte_pfn(*pvmw->pte);
73 } 87 }
74 88
75 return true; 89 return pfn_in_hpage(pvmw->page, pfn);
76} 90}
77 91
78/** 92/**
diff --git a/mm/percpu.c b/mm/percpu.c
index 79e3549cab0f..50e7fdf84055 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void)
2719 2719
2720 if (pcpu_setup_first_chunk(ai, fc) < 0) 2720 if (pcpu_setup_first_chunk(ai, fc) < 0)
2721 panic("Failed to initialize percpu areas."); 2721 panic("Failed to initialize percpu areas.");
2722#ifdef CONFIG_CRIS
2723#warning "the CRIS architecture has physical and virtual addresses confused"
2724#else
2722 pcpu_free_alloc_info(ai); 2725 pcpu_free_alloc_info(ai);
2726#endif
2723} 2727}
2724 2728
2725#endif /* CONFIG_SMP */ 2729#endif /* CONFIG_SMP */
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..4e51ef954026 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1584 *dbg_redzone2(cachep, objp)); 1584 *dbg_redzone2(cachep, objp));
1585 } 1585 }
1586 1586
1587 if (cachep->flags & SLAB_STORE_USER) { 1587 if (cachep->flags & SLAB_STORE_USER)
1588 pr_err("Last user: [<%p>](%pSR)\n", 1588 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1589 *dbg_userword(cachep, objp),
1590 *dbg_userword(cachep, objp));
1591 }
1592 realobj = (char *)objp + obj_offset(cachep); 1589 realobj = (char *)objp + obj_offset(cachep);
1593 size = cachep->object_size; 1590 size = cachep->object_size;
1594 for (i = 0; i < size && lines; i += 16, lines--) { 1591 for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1621 /* Mismatch ! */ 1618 /* Mismatch ! */
1622 /* Print header */ 1619 /* Print header */
1623 if (lines == 0) { 1620 if (lines == 0) {
1624 pr_err("Slab corruption (%s): %s start=%p, len=%d\n", 1621 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1625 print_tainted(), cachep->name, 1622 print_tainted(), cachep->name,
1626 realobj, size); 1623 realobj, size);
1627 print_objinfo(cachep, objp, 0); 1624 print_objinfo(cachep, objp, 0);
@@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1650 if (objnr) { 1647 if (objnr) {
1651 objp = index_to_obj(cachep, page, objnr - 1); 1648 objp = index_to_obj(cachep, page, objnr - 1);
1652 realobj = (char *)objp + obj_offset(cachep); 1649 realobj = (char *)objp + obj_offset(cachep);
1653 pr_err("Prev obj: start=%p, len=%d\n", realobj, size); 1650 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1654 print_objinfo(cachep, objp, 2); 1651 print_objinfo(cachep, objp, 2);
1655 } 1652 }
1656 if (objnr + 1 < cachep->num) { 1653 if (objnr + 1 < cachep->num) {
1657 objp = index_to_obj(cachep, page, objnr + 1); 1654 objp = index_to_obj(cachep, page, objnr + 1);
1658 realobj = (char *)objp + obj_offset(cachep); 1655 realobj = (char *)objp + obj_offset(cachep);
1659 pr_err("Next obj: start=%p, len=%d\n", realobj, size); 1656 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1660 print_objinfo(cachep, objp, 2); 1657 print_objinfo(cachep, objp, 2);
1661 } 1658 }
1662 } 1659 }
@@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
2608 /* Verify double free bug */ 2605 /* Verify double free bug */
2609 for (i = page->active; i < cachep->num; i++) { 2606 for (i = page->active; i < cachep->num; i++) {
2610 if (get_free_obj(page, i) == objnr) { 2607 if (get_free_obj(page, i) == objnr) {
2611 pr_err("slab: double free detected in cache '%s', objp %p\n", 2608 pr_err("slab: double free detected in cache '%s', objp %px\n",
2612 cachep->name, objp); 2609 cachep->name, objp);
2613 BUG(); 2610 BUG();
2614 } 2611 }
@@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2772 else 2769 else
2773 slab_error(cache, "memory outside object was overwritten"); 2770 slab_error(cache, "memory outside object was overwritten");
2774 2771
2775 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2772 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2776 obj, redzone1, redzone2); 2773 obj, redzone1, redzone2);
2777} 2774}
2778 2775
@@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3078 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3075 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3079 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3076 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3080 slab_error(cachep, "double free, or memory outside object was overwritten"); 3077 slab_error(cachep, "double free, or memory outside object was overwritten");
3081 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3078 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3082 objp, *dbg_redzone1(cachep, objp), 3079 objp, *dbg_redzone1(cachep, objp),
3083 *dbg_redzone2(cachep, objp)); 3080 *dbg_redzone2(cachep, objp));
3084 } 3081 }
@@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3091 cachep->ctor(objp); 3088 cachep->ctor(objp);
3092 if (ARCH_SLAB_MINALIGN && 3089 if (ARCH_SLAB_MINALIGN &&
3093 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3090 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3094 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3091 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3095 objp, (int)ARCH_SLAB_MINALIGN); 3092 objp, (int)ARCH_SLAB_MINALIGN);
3096 } 3093 }
3097 return objp; 3094 return objp;
@@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
4283 return; 4280 return;
4284 } 4281 }
4285#endif 4282#endif
4286 seq_printf(m, "%p", (void *)address); 4283 seq_printf(m, "%px", (void *)address);
4287} 4284}
4288 4285
4289static int leaks_show(struct seq_file *m, void *p) 4286static int leaks_show(struct seq_file *m, void *p)
diff --git a/mm/sparse.c b/mm/sparse.c
index 7a5dacaa06e3..2609aba121e8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
211 if (unlikely(!mem_section)) { 211 if (unlikely(!mem_section)) {
212 unsigned long size, align; 212 unsigned long size, align;
213 213
214 size = sizeof(struct mem_section) * NR_SECTION_ROOTS; 214 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
215 align = 1 << (INTERNODE_CACHE_SHIFT); 215 align = 1 << (INTERNODE_CACHE_SHIFT);
216 mem_section = memblock_virt_alloc(size, align); 216 mem_section = memblock_virt_alloc(size, align);
217 } 217 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c02c850ea349..47d5ced51f2d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
297 */ 297 */
298void unregister_shrinker(struct shrinker *shrinker) 298void unregister_shrinker(struct shrinker *shrinker)
299{ 299{
300 if (!shrinker->nr_deferred)
301 return;
300 down_write(&shrinker_rwsem); 302 down_write(&shrinker_rwsem);
301 list_del(&shrinker->list); 303 list_del(&shrinker->list);
302 up_write(&shrinker_rwsem); 304 up_write(&shrinker_rwsem);
303 kfree(shrinker->nr_deferred); 305 kfree(shrinker->nr_deferred);
306 shrinker->nr_deferred = NULL;
304} 307}
305EXPORT_SYMBOL(unregister_shrinker); 308EXPORT_SYMBOL(unregister_shrinker);
306 309
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 685049a9048d..683c0651098c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
53#include <linux/mount.h> 53#include <linux/mount.h>
54#include <linux/migrate.h> 54#include <linux/migrate.h>
55#include <linux/pagemap.h> 55#include <linux/pagemap.h>
56#include <linux/fs.h>
56 57
57#define ZSPAGE_MAGIC 0x58 58#define ZSPAGE_MAGIC 0x58
58 59
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8dfdd94e430f..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
111 vlan_gvrp_uninit_applicant(real_dev); 111 vlan_gvrp_uninit_applicant(real_dev);
112 } 112 }
113 113
114 /* Take it out of our own structures, but be sure to interlock with 114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115 * HW accelerating devices or SW vlan input packet processing if
116 * VLAN is not 0 (leave it there for 802.1p).
117 */
118 if (vlan_id)
119 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
120 115
121 /* Get rid of the vlan's reference to real_dev */ 116 /* Get rid of the vlan's reference to real_dev */
122 dev_put(real_dev); 117 dev_put(real_dev);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 985046ae4231..80f5c79053a4 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -839,7 +839,6 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
839 if (IS_ERR(file)) { 839 if (IS_ERR(file)) {
840 pr_err("%s (%d): failed to map fd\n", 840 pr_err("%s (%d): failed to map fd\n",
841 __func__, task_pid_nr(current)); 841 __func__, task_pid_nr(current));
842 sock_release(csocket);
843 kfree(p); 842 kfree(p);
844 return PTR_ERR(file); 843 return PTR_ERR(file);
845 } 844 }
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 325c56043007..086a4abdfa7c 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
543 return xenbus_unregister_driver(&xen_9pfs_front_driver); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
544} 544}
545module_exit(p9_trans_xen_exit); 545module_exit(p9_trans_xen_exit);
546
547MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
548MODULE_DESCRIPTION("Xen Transport for 9P");
549MODULE_LICENSE("GPL");
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 1b659ab652fb..bbe8414b6ee7 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1214,7 +1214,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1214 orig_node->last_seen = jiffies; 1214 orig_node->last_seen = jiffies;
1215 1215
1216 /* find packet count of corresponding one hop neighbor */ 1216 /* find packet count of corresponding one hop neighbor */
1217 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1217 spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1218 if_num = if_incoming->if_num; 1218 if_num = if_incoming->if_num;
1219 orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; 1219 orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
1220 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); 1220 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1224,7 +1224,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1224 } else { 1224 } else {
1225 neigh_rq_count = 0; 1225 neigh_rq_count = 0;
1226 } 1226 }
1227 spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1227 spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1228 1228
1229 /* pay attention to not get a value bigger than 100 % */ 1229 /* pay attention to not get a value bigger than 100 % */
1230 if (orig_eq_count > neigh_rq_count) 1230 if (orig_eq_count > neigh_rq_count)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 341ceab8338d..e0e2bfcd6b3e 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -814,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
814 } 814 }
815 815
816 orig_gw = batadv_gw_node_get(bat_priv, orig_node); 816 orig_gw = batadv_gw_node_get(bat_priv, orig_node);
817 if (!orig_node) 817 if (!orig_gw)
818 goto out; 818 goto out;
819 819
820 if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0) 820 if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index a98cf1104a30..ebe6e38934e4 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -499,6 +499,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
499 */ 499 */
500 if (skb->priority >= 256 && skb->priority <= 263) 500 if (skb->priority >= 256 && skb->priority <= 263)
501 frag_header.priority = skb->priority - 256; 501 frag_header.priority = skb->priority - 256;
502 else
503 frag_header.priority = 0;
502 504
503 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); 505 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
504 ether_addr_copy(frag_header.dest, orig_node->orig); 506 ether_addr_copy(frag_header.dest, orig_node->orig);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 15cd2139381e..ebc4e2241c77 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -482,7 +482,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
482 482
483/** 483/**
484 * batadv_tp_sender_timeout - timer that fires in case of packet loss 484 * batadv_tp_sender_timeout - timer that fires in case of packet loss
485 * @arg: address of the related tp_vars 485 * @t: address to timer_list inside tp_vars
486 * 486 *
487 * If fired it means that there was packet loss. 487 * If fired it means that there was packet loss.
488 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and 488 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
@@ -1106,7 +1106,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
1106/** 1106/**
1107 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is 1107 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
1108 * reached without received ack 1108 * reached without received ack
1109 * @arg: address of the related tp_vars 1109 * @t: address to timer_list inside tp_vars
1110 */ 1110 */
1111static void batadv_tp_receiver_shutdown(struct timer_list *t) 1111static void batadv_tp_receiver_shutdown(struct timer_list *t)
1112{ 1112{
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 43ba91c440bc..fc6615d59165 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3363 break; 3363 break;
3364 3364
3365 case L2CAP_CONF_EFS: 3365 case L2CAP_CONF_EFS:
3366 remote_efs = 1; 3366 if (olen == sizeof(efs)) {
3367 if (olen == sizeof(efs)) 3367 remote_efs = 1;
3368 memcpy(&efs, (void *) val, olen); 3368 memcpy(&efs, (void *) val, olen);
3369 }
3369 break; 3370 break;
3370 3371
3371 case L2CAP_CONF_EWS: 3372 case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3584 break; 3585 break;
3585 3586
3586 case L2CAP_CONF_EFS: 3587 case L2CAP_CONF_EFS:
3587 if (olen == sizeof(efs)) 3588 if (olen == sizeof(efs)) {
3588 memcpy(&efs, (void *)val, olen); 3589 memcpy(&efs, (void *)val, olen);
3589 3590
3590 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3591 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3591 efs.stype != L2CAP_SERV_NOTRAFIC && 3592 efs.stype != L2CAP_SERV_NOTRAFIC &&
3592 efs.stype != chan->local_stype) 3593 efs.stype != chan->local_stype)
3593 return -ECONNREFUSED; 3594 return -ECONNREFUSED;
3594 3595
3595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3596 (unsigned long) &efs, endptr - ptr); 3597 (unsigned long) &efs, endptr - ptr);
3598 }
3597 break; 3599 break;
3598 3600
3599 case L2CAP_CONF_FCS: 3601 case L2CAP_CONF_FCS:
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index d0ef0a8e8831..015f465c514b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1262 struct net_bridge *br = netdev_priv(dev); 1262 struct net_bridge *br = netdev_priv(dev);
1263 int err; 1263 int err;
1264 1264
1265 err = register_netdevice(dev);
1266 if (err)
1267 return err;
1268
1265 if (tb[IFLA_ADDRESS]) { 1269 if (tb[IFLA_ADDRESS]) {
1266 spin_lock_bh(&br->lock); 1270 spin_lock_bh(&br->lock);
1267 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1271 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1268 spin_unlock_bh(&br->lock); 1272 spin_unlock_bh(&br->lock);
1269 } 1273 }
1270 1274
1271 err = register_netdevice(dev);
1272 if (err)
1273 return err;
1274
1275 err = br_changelink(dev, tb, data, extack); 1275 err = br_changelink(dev, tb, data, extack);
1276 if (err) 1276 if (err)
1277 unregister_netdevice(dev); 1277 br_dev_delete(dev, NULL);
1278
1278 return err; 1279 return err;
1279} 1280}
1280 1281
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 2d38b6e34203..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
334 mutex_lock(&caifdevs->lock); 334 mutex_lock(&caifdevs->lock);
335 list_add_rcu(&caifd->list, &caifdevs->list); 335 list_add_rcu(&caifd->list, &caifdevs->list);
336 336
337 strncpy(caifd->layer.name, dev->name, 337 strlcpy(caifd->layer.name, dev->name,
338 sizeof(caifd->layer.name) - 1); 338 sizeof(caifd->layer.name));
339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340 caifd->layer.transmit = transmit; 339 caifd->layer.transmit = transmit;
341 cfcnfg_add_phy_layer(cfg, 340 cfcnfg_add_phy_layer(cfg,
342 dev, 341 dev,
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 5cd44f001f64..1a082a946045 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
176 dev_add_pack(&caif_usb_type); 176 dev_add_pack(&caif_usb_type);
177 pack_added = true; 177 pack_added = true;
178 178
179 strncpy(layer->name, dev->name, 179 strlcpy(layer->name, dev->name, sizeof(layer->name));
180 sizeof(layer->name) - 1);
181 layer->name[sizeof(layer->name) - 1] = 0;
182 180
183 return 0; 181 return 0;
184} 182}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 273cb07f57d8..8f00bea093b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
268 case CAIFPROTO_RFM: 268 case CAIFPROTO_RFM:
269 l->linktype = CFCTRL_SRV_RFM; 269 l->linktype = CFCTRL_SRV_RFM;
270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; 270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
271 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, 271 strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
272 sizeof(l->u.rfm.volume)-1); 272 sizeof(l->u.rfm.volume));
273 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
274 break; 273 break;
275 case CAIFPROTO_UTIL: 274 case CAIFPROTO_UTIL:
276 l->linktype = CFCTRL_SRV_UTIL; 275 l->linktype = CFCTRL_SRV_UTIL;
277 l->endpoint = 0x00; 276 l->endpoint = 0x00;
278 l->chtype = 0x00; 277 l->chtype = 0x00;
279 strncpy(l->u.utility.name, s->sockaddr.u.util.service, 278 strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
280 sizeof(l->u.utility.name)-1); 279 sizeof(l->u.utility.name));
281 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
282 caif_assert(sizeof(l->u.utility.name) > 10); 280 caif_assert(sizeof(l->u.utility.name) > 10);
283 l->u.utility.paramlen = s->param.size; 281 l->u.utility.paramlen = s->param.size;
284 if (l->u.utility.paramlen > sizeof(l->u.utility.params)) 282 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..655ed7032150 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); 258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
259 cfpkt_add_body(pkt, &tmp16, 2); 259 cfpkt_add_body(pkt, &tmp16, 2);
260 memset(utility_name, 0, sizeof(utility_name)); 260 memset(utility_name, 0, sizeof(utility_name));
261 strncpy(utility_name, param->u.utility.name, 261 strlcpy(utility_name, param->u.utility.name,
262 UTILITY_NAME_LENGTH - 1); 262 UTILITY_NAME_LENGTH);
263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); 263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
264 tmp8 = param->u.utility.paramlen; 264 tmp8 = param->u.utility.paramlen;
265 cfpkt_add_body(pkt, &tmp8, 1); 265 cfpkt_add_body(pkt, &tmp8, 1);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 003b2d6d655f..4d7f988a3130 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
721{ 721{
722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
723 723
724 if (WARN_ONCE(dev->type != ARPHRD_CAN || 724 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
725 skb->len != CAN_MTU || 725 cfd->len > CAN_MAX_DLEN)) {
726 cfd->len > CAN_MAX_DLEN, 726 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
727 "PF_CAN: dropped non conform CAN skbuf: " 727 dev->type, skb->len, cfd->len);
728 "dev type %d, len %d, datalen %d\n", 728 kfree_skb(skb);
729 dev->type, skb->len, cfd->len)) 729 return NET_RX_DROP;
730 goto drop; 730 }
731 731
732 can_receive(skb, dev); 732 can_receive(skb, dev);
733 return NET_RX_SUCCESS; 733 return NET_RX_SUCCESS;
734
735drop:
736 kfree_skb(skb);
737 return NET_RX_DROP;
738} 734}
739 735
740static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 736static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
742{ 738{
743 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 739 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
744 740
745 if (WARN_ONCE(dev->type != ARPHRD_CAN || 741 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
746 skb->len != CANFD_MTU || 742 cfd->len > CANFD_MAX_DLEN)) {
747 cfd->len > CANFD_MAX_DLEN, 743 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
748 "PF_CAN: dropped non conform CAN FD skbuf: " 744 dev->type, skb->len, cfd->len);
749 "dev type %d, len %d, datalen %d\n", 745 kfree_skb(skb);
750 dev->type, skb->len, cfd->len)) 746 return NET_RX_DROP;
751 goto drop; 747 }
752 748
753 can_receive(skb, dev); 749 can_receive(skb, dev);
754 return NET_RX_SUCCESS; 750 return NET_RX_SUCCESS;
755
756drop:
757 kfree_skb(skb);
758 return NET_RX_DROP;
759} 751}
760 752
761/* 753/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 07ed21d64f92..613fb4066be7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1106,7 +1106,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1106 * when the name is long and there isn't enough space left 1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used. 1107 * for the digits, or if all bits are used.
1108 */ 1108 */
1109 return p ? -ENFILE : -EEXIST; 1109 return -ENFILE;
1110} 1110}
1111 1111
1112static int dev_alloc_name_ns(struct net *net, 1112static int dev_alloc_name_ns(struct net *net,
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
1146int dev_get_valid_name(struct net *net, struct net_device *dev, 1146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name) 1147 const char *name)
1148{ 1148{
1149 return dev_alloc_name_ns(net, dev, name); 1149 BUG_ON(!net);
1150
1151 if (!dev_valid_name(name))
1152 return -EINVAL;
1153
1154 if (strchr(name, '%'))
1155 return dev_alloc_name_ns(net, dev, name);
1156 else if (__dev_get_by_name(net, name))
1157 return -EEXIST;
1158 else if (dev->name != name)
1159 strlcpy(dev->name, name, IFNAMSIZ);
1160
1161 return 0;
1150} 1162}
1151EXPORT_SYMBOL(dev_get_valid_name); 1163EXPORT_SYMBOL(dev_get_valid_name);
1152 1164
@@ -3139,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
3139 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3151 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3140 3152
3141 /* + transport layer */ 3153 /* + transport layer */
3142 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3143 hdr_len += tcp_hdrlen(skb); 3155 const struct tcphdr *th;
3144 else 3156 struct tcphdr _tcphdr;
3145 hdr_len += sizeof(struct udphdr); 3157
3158 th = skb_header_pointer(skb, skb_transport_offset(skb),
3159 sizeof(_tcphdr), &_tcphdr);
3160 if (likely(th))
3161 hdr_len += __tcp_hdrlen(th);
3162 } else {
3163 struct udphdr _udphdr;
3164
3165 if (skb_header_pointer(skb, skb_transport_offset(skb),
3166 sizeof(_udphdr), &_udphdr))
3167 hdr_len += sizeof(struct udphdr);
3168 }
3146 3169
3147 if (shinfo->gso_type & SKB_GSO_DODGY) 3170 if (shinfo->gso_type & SKB_GSO_DODGY)
3148 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3171 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
@@ -3904,7 +3927,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3904 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 3927 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
3905 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 3928 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
3906 goto do_drop; 3929 goto do_drop;
3907 if (troom > 0 && __skb_linearize(skb)) 3930 if (skb_linearize(skb))
3908 goto do_drop; 3931 goto do_drop;
3909 } 3932 }
3910 3933
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f8fcf450a36e..8225416911ae 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
771} 771}
772 772
773static void
774warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
775{
776 char name[sizeof(current->comm)];
777
778 pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
779 get_task_comm(name, current), details);
780}
781
782/* Query device for its ethtool_cmd settings. 773/* Query device for its ethtool_cmd settings.
783 * 774 *
784 * Backward compatibility note: for compatibility with legacy ethtool, 775 * Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
805 &link_ksettings); 796 &link_ksettings);
806 if (err < 0) 797 if (err < 0)
807 return err; 798 return err;
808 if (!convert_link_ksettings_to_legacy_settings(&cmd, 799 convert_link_ksettings_to_legacy_settings(&cmd,
809 &link_ksettings)) 800 &link_ksettings);
810 warn_incomplete_ethtool_legacy_settings_conversion(
811 "link modes are only partially reported");
812 801
813 /* send a sensible cmd tag back to user */ 802 /* send a sensible cmd tag back to user */
814 cmd.cmd = ETHTOOL_GSET; 803 cmd.cmd = ETHTOOL_GSET;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6a85e67fafce..1c0eb436671f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -458,6 +458,10 @@ do_pass:
458 convert_bpf_extensions(fp, &insn)) 458 convert_bpf_extensions(fp, &insn))
459 break; 459 break;
460 460
461 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
462 fp->code == (BPF_ALU | BPF_MOD | BPF_X))
463 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
464
461 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); 465 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
462 break; 466 break;
463 467
@@ -1054,11 +1058,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1054 */ 1058 */
1055 goto out_err_free; 1059 goto out_err_free;
1056 1060
1057 /* We are guaranteed to never error here with cBPF to eBPF
1058 * transitions, since there's no issue with type compatibility
1059 * checks on program arrays.
1060 */
1061 fp = bpf_prog_select_runtime(fp, &err); 1061 fp = bpf_prog_select_runtime(fp, &err);
1062 if (err)
1063 goto out_err_free;
1062 1064
1063 kfree(old_prog); 1065 kfree(old_prog);
1064 return fp; 1066 return fp;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 15ce30063765..544bddf08e13 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -976,8 +976,8 @@ ip_proto_again:
976out_good: 976out_good:
977 ret = true; 977 ret = true;
978 978
979 key_control->thoff = (u16)nhoff;
980out: 979out:
980 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
981 key_basic->n_proto = proto; 981 key_basic->n_proto = proto;
982 key_basic->ip_proto = ip_proto; 982 key_basic->ip_proto = ip_proto;
983 983
@@ -985,7 +985,6 @@ out:
985 985
986out_bad: 986out_bad:
987 ret = false; 987 ret = false;
988 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
989 goto out; 988 goto out;
990} 989}
991EXPORT_SYMBOL(__skb_flow_dissect); 990EXPORT_SYMBOL(__skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d1f5fe986edd..7f831711b6e0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
532 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 532 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
533 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 533 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
534 534
535 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 535 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
536 536
537 if (n->parms->dead) { 537 if (n->parms->dead) {
538 rc = ERR_PTR(-EINVAL); 538 rc = ERR_PTR(-EINVAL);
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
544 n1 != NULL; 544 n1 != NULL;
545 n1 = rcu_dereference_protected(n1->next, 545 n1 = rcu_dereference_protected(n1->next,
546 lockdep_is_held(&tbl->lock))) { 546 lockdep_is_held(&tbl->lock))) {
547 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 547 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
548 if (want_ref) 548 if (want_ref)
549 neigh_hold(n1); 549 neigh_hold(n1);
550 rc = n1; 550 rc = n1;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b797832565d3..60a71be75aea 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
267 spin_lock_bh(&net->nsid_lock); 267 spin_lock_bh(&net->nsid_lock);
268 peer = idr_find(&net->netns_ids, id); 268 peer = idr_find(&net->netns_ids, id);
269 if (peer) 269 if (peer)
270 get_net(peer); 270 peer = maybe_get_net(peer);
271 spin_unlock_bh(&net->nsid_lock); 271 spin_unlock_bh(&net->nsid_lock);
272 rcu_read_unlock(); 272 rcu_read_unlock();
273 273
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 1c4810919a0a..b9057478d69c 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/module.h>
18#include <linux/string.h> 17#include <linux/string.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/skbuff.h> 19#include <linux/skbuff.h>
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dabba2a91fc8..778d7f03404a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
1681 return false; 1681 return false;
1682} 1682}
1683 1683
1684static struct net *get_target_net(struct sk_buff *skb, int netnsid) 1684static struct net *get_target_net(struct sock *sk, int netnsid)
1685{ 1685{
1686 struct net *net; 1686 struct net *net;
1687 1687
1688 net = get_net_ns_by_id(sock_net(skb->sk), netnsid); 1688 net = get_net_ns_by_id(sock_net(sk), netnsid);
1689 if (!net) 1689 if (!net)
1690 return ERR_PTR(-EINVAL); 1690 return ERR_PTR(-EINVAL);
1691 1691
1692 /* For now, the caller is required to have CAP_NET_ADMIN in 1692 /* For now, the caller is required to have CAP_NET_ADMIN in
1693 * the user namespace owning the target net ns. 1693 * the user namespace owning the target net ns.
1694 */ 1694 */
1695 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1695 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1696 put_net(net); 1696 put_net(net);
1697 return ERR_PTR(-EACCES); 1697 return ERR_PTR(-EACCES);
1698 } 1698 }
@@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1733 ifla_policy, NULL) >= 0) { 1733 ifla_policy, NULL) >= 0) {
1734 if (tb[IFLA_IF_NETNSID]) { 1734 if (tb[IFLA_IF_NETNSID]) {
1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1736 tgt_net = get_target_net(skb, netnsid); 1736 tgt_net = get_target_net(skb->sk, netnsid);
1737 if (IS_ERR(tgt_net)) { 1737 if (IS_ERR(tgt_net)) {
1738 tgt_net = net; 1738 tgt_net = net;
1739 netnsid = -1; 1739 netnsid = -1;
@@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2883 2883
2884 if (tb[IFLA_IF_NETNSID]) { 2884 if (tb[IFLA_IF_NETNSID]) {
2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
2886 tgt_net = get_target_net(skb, netnsid); 2886 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
2887 if (IS_ERR(tgt_net)) 2887 if (IS_ERR(tgt_net))
2888 return PTR_ERR(tgt_net); 2888 return PTR_ERR(tgt_net);
2889 } 2889 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6b0ff396fa9d..08f574081315 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177 int i, new_frags; 1177 int i, new_frags;
1178 u32 d_off; 1178 u32 d_off;
1179 1179
1180 if (!num_frags)
1181 return 0;
1182
1183 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1180 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184 return -EINVAL; 1181 return -EINVAL;
1185 1182
1183 if (!num_frags)
1184 goto release;
1185
1186 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1186 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1187 for (i = 0; i < new_frags; i++) { 1187 for (i = 0; i < new_frags; i++) {
1188 page = alloc_page(gfp_mask); 1188 page = alloc_page(gfp_mask);
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1238 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 1238 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1239 skb_shinfo(skb)->nr_frags = new_frags; 1239 skb_shinfo(skb)->nr_frags = new_frags;
1240 1240
1241release:
1241 skb_zcopy_clear(skb, false); 1242 skb_zcopy_clear(skb, false);
1242 return 0; 1243 return 0;
1243} 1244}
@@ -3654,8 +3655,6 @@ normal:
3654 3655
3655 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & 3656 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3656 SKBTX_SHARED_FRAG; 3657 SKBTX_SHARED_FRAG;
3657 if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
3658 goto err;
3659 3658
3660 while (pos < offset + len) { 3659 while (pos < offset + len) {
3661 if (i >= nfrags) { 3660 if (i >= nfrags) {
@@ -3681,6 +3680,8 @@ normal:
3681 3680
3682 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3681 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3683 goto err; 3682 goto err;
3683 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3684 goto err;
3684 3685
3685 *nskb_frag = *frag; 3686 *nskb_frag = *frag;
3686 __skb_frag_ref(nskb_frag); 3687 __skb_frag_ref(nskb_frag);
@@ -4293,7 +4294,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
4293 struct sock *sk = skb->sk; 4294 struct sock *sk = skb->sk;
4294 4295
4295 if (!skb_may_tx_timestamp(sk, false)) 4296 if (!skb_may_tx_timestamp(sk, false))
4296 return; 4297 goto err;
4297 4298
4298 /* Take a reference to prevent skb_orphan() from freeing the socket, 4299 /* Take a reference to prevent skb_orphan() from freeing the socket,
4299 * but only if the socket refcount is not zero. 4300 * but only if the socket refcount is not zero.
@@ -4302,7 +4303,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
4302 *skb_hwtstamps(skb) = *hwtstamps; 4303 *skb_hwtstamps(skb) = *hwtstamps;
4303 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4304 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4304 sock_put(sk); 4305 sock_put(sk);
4306 return;
4305 } 4307 }
4308
4309err:
4310 kfree_skb(skb);
4306} 4311}
4307EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4312EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4308 4313
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 217f4e3b82f6..146b50e30659 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
288 case SKNLGRP_INET6_UDP_DESTROY: 288 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 289 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
291 NETLINK_SOCK_DIAG, AF_INET); 291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 292 break;
293 } 293 }
294 return 0; 294 return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cbc3dde4cfcc..a47ad6cd41c0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
325 .data = &bpf_jit_enable, 325 .data = &bpf_jit_enable,
326 .maxlen = sizeof(int), 326 .maxlen = sizeof(int),
327 .mode = 0644, 327 .mode = 0644,
328#ifndef CONFIG_BPF_JIT_ALWAYS_ON
328 .proc_handler = proc_dointvec 329 .proc_handler = proc_dointvec
330#else
331 .proc_handler = proc_dointvec_minmax,
332 .extra1 = &one,
333 .extra2 = &one,
334#endif
329 }, 335 },
330# ifdef CONFIG_HAVE_EBPF_JIT 336# ifdef CONFIG_HAVE_EBPF_JIT
331 { 337 {
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 1c75cd1255f6..92d016e87816 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
140 140
141 ccid2_pr_debug("RTO_EXPIRE\n"); 141 ccid2_pr_debug("RTO_EXPIRE\n");
142 142
143 if (sk->sk_state == DCCP_CLOSED)
144 goto out;
145
143 /* back-off timer */ 146 /* back-off timer */
144 hc->tx_rto <<= 1; 147 hc->tx_rto <<= 1;
145 if (hc->tx_rto > DCCP_RTO_MAX) 148 if (hc->tx_rto > DCCP_RTO_MAX)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index abd07a443219..178bb9833311 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
57 if (state == DCCP_TIME_WAIT) 57 if (state == DCCP_TIME_WAIT)
58 timeo = DCCP_TIMEWAIT_LEN; 58 timeo = DCCP_TIMEWAIT_LEN;
59 59
60 /* tw_timer is pinned, so we need to make sure BH are disabled
61 * in following section, otherwise timer handler could run before
62 * we complete the initialization.
63 */
64 local_bh_disable();
60 inet_twsk_schedule(tw, timeo); 65 inet_twsk_schedule(tw, timeo);
61 /* Linkage updates. */ 66 /* Linkage updates. */
62 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); 67 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
63 inet_twsk_put(tw); 68 inet_twsk_put(tw);
69 local_bh_enable();
64 } else { 70 } else {
65 /* Sorry, if we're out of memory, just CLOSE this 71 /* Sorry, if we're out of memory, just CLOSE this
66 * socket up. We've got bigger problems than 72 * socket up. We've got bigger problems than
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b68168fcc06a..9d43c1f40274 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
259{ 259{
260 struct inet_connection_sock *icsk = inet_csk(sk); 260 struct inet_connection_sock *icsk = inet_csk(sk);
261 struct inet_sock *inet = inet_sk(sk); 261 struct inet_sock *inet = inet_sk(sk);
262 struct dccp_sock *dp = dccp_sk(sk);
262 int err = 0; 263 int err = 0;
263 const int old_state = sk->sk_state; 264 const int old_state = sk->sk_state;
264 265
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
278 sk->sk_err = ECONNRESET; 279 sk->sk_err = ECONNRESET;
279 280
280 dccp_clear_xmit_timers(sk); 281 dccp_clear_xmit_timers(sk);
282 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
283 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
284 dp->dccps_hc_rx_ccid = NULL;
285 dp->dccps_hc_tx_ccid = NULL;
281 286
282 __skb_queue_purge(&sk->sk_receive_queue); 287 __skb_queue_purge(&sk->sk_receive_queue);
283 __skb_queue_purge(&sk->sk_write_queue); 288 __skb_queue_purge(&sk->sk_write_queue);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index d6e7a642493b..a95a55f79137 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -16,7 +16,6 @@
16#include <linux/of_net.h> 16#include <linux/of_net.h>
17#include <linux/of_mdio.h> 17#include <linux/of_mdio.h>
18#include <linux/mdio.h> 18#include <linux/mdio.h>
19#include <linux/list.h>
20#include <net/rtnetlink.h> 19#include <net/rtnetlink.h>
21#include <net/pkt_cls.h> 20#include <net/pkt_cls.h>
22#include <net/tc_act/tc_mirred.h> 21#include <net/tc_act/tc_mirred.h>
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a8d7c5a9fb05..6c231b43974d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
223 223
224static int arp_constructor(struct neighbour *neigh) 224static int arp_constructor(struct neighbour *neigh)
225{ 225{
226 __be32 addr = *(__be32 *)neigh->primary_key; 226 __be32 addr;
227 struct net_device *dev = neigh->dev; 227 struct net_device *dev = neigh->dev;
228 struct in_device *in_dev; 228 struct in_device *in_dev;
229 struct neigh_parms *parms; 229 struct neigh_parms *parms;
230 u32 inaddr_any = INADDR_ANY;
230 231
232 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
233 memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
234
235 addr = *(__be32 *)neigh->primary_key;
231 rcu_read_lock(); 236 rcu_read_lock();
232 in_dev = __in_dev_get_rcu(dev); 237 in_dev = __in_dev_get_rcu(dev);
233 if (!in_dev) { 238 if (!in_dev) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a4573bccd6da..7a93359fbc72 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1428,7 +1428,7 @@ skip:
1428 1428
1429static bool inetdev_valid_mtu(unsigned int mtu) 1429static bool inetdev_valid_mtu(unsigned int mtu)
1430{ 1430{
1431 return mtu >= 68; 1431 return mtu >= IPV4_MIN_MTU;
1432} 1432}
1433 1433
1434static void inetdev_send_gratuitous_arp(struct net_device *dev, 1434static void inetdev_send_gratuitous_arp(struct net_device *dev,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d57aa64fa7c7..61fe6e4d23fc 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -981,6 +981,7 @@ static int esp_init_state(struct xfrm_state *x)
981 981
982 switch (encap->encap_type) { 982 switch (encap->encap_type) {
983 default: 983 default:
984 err = -EINVAL;
984 goto error; 985 goto error;
985 case UDP_ENCAP_ESPINUDP: 986 case UDP_ENCAP_ESPINUDP:
986 x->props.header_len += sizeof(struct udphdr); 987 x->props.header_len += sizeof(struct udphdr);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index f8b918c766b0..29b333a62ab0 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
38 __be32 spi; 38 __be32 spi;
39 int err; 39 int err;
40 40
41 skb_pull(skb, offset); 41 if (!pskb_pull(skb, offset))
42 return NULL;
42 43
43 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 44 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
44 goto out; 45 goto out;
@@ -121,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
121 if (!xo) 122 if (!xo)
122 goto out; 123 goto out;
123 124
125 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
126 goto out;
127
124 seq = xo->seq.low; 128 seq = xo->seq.low;
125 129
126 x = skb->sp->xvec[skb->sp->len - 1]; 130 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f52d27a422c3..08259d078b1c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
1298 1298
1299static void ip_fib_net_exit(struct net *net) 1299static void ip_fib_net_exit(struct net *net)
1300{ 1300{
1301 unsigned int i; 1301 int i;
1302 1302
1303 rtnl_lock(); 1303 rtnl_lock();
1304#ifdef CONFIG_IP_MULTIPLE_TABLES 1304#ifdef CONFIG_IP_MULTIPLE_TABLES
1305 RCU_INIT_POINTER(net->ipv4.fib_main, NULL); 1305 RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
1306 RCU_INIT_POINTER(net->ipv4.fib_default, NULL); 1306 RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
1307#endif 1307#endif
1308 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1308 /* Destroy the tables in reverse order to guarantee that the
1309 * local table, ID 255, is destroyed before the main table, ID
1310 * 254. This is necessary as the local table may contain
1311 * references to data contained in the main table.
1312 */
1313 for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
1309 struct hlist_head *head = &net->ipv4.fib_table_hash[i]; 1314 struct hlist_head *head = &net->ipv4.fib_table_hash[i];
1310 struct hlist_node *tmp; 1315 struct hlist_node *tmp;
1311 struct fib_table *tb; 1316 struct fib_table *tb;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f04d944f8abe..c586597da20d 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
698 698
699 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 699 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
700 int type = nla_type(nla); 700 int type = nla_type(nla);
701 u32 val; 701 u32 fi_val, val;
702 702
703 if (!type) 703 if (!type)
704 continue; 704 continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
715 val = nla_get_u32(nla); 715 val = nla_get_u32(nla);
716 } 716 }
717 717
718 if (fi->fib_metrics->metrics[type - 1] != val) 718 fi_val = fi->fib_metrics->metrics[type - 1];
719 if (type == RTAX_FEATURES)
720 fi_val &= ~DST_FEATURE_ECN_CA;
721
722 if (fi_val != val)
719 return false; 723 return false;
720 } 724 }
721 725
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d1f8f302dbf3..2d49717a7421 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -89,6 +89,7 @@
89#include <linux/rtnetlink.h> 89#include <linux/rtnetlink.h>
90#include <linux/times.h> 90#include <linux/times.h>
91#include <linux/pkt_sched.h> 91#include <linux/pkt_sched.h>
92#include <linux/byteorder/generic.h>
92 93
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
94#include <net/arp.h> 95#include <net/arp.h>
@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
321 return scount; 322 return scount;
322} 323}
323 324
325/* source address selection per RFC 3376 section 4.2.13 */
326static __be32 igmpv3_get_srcaddr(struct net_device *dev,
327 const struct flowi4 *fl4)
328{
329 struct in_device *in_dev = __in_dev_get_rcu(dev);
330
331 if (!in_dev)
332 return htonl(INADDR_ANY);
333
334 for_ifa(in_dev) {
335 if (fl4->saddr == ifa->ifa_local)
336 return fl4->saddr;
337 } endfor_ifa(in_dev);
338
339 return htonl(INADDR_ANY);
340}
341
324static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 342static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
325{ 343{
326 struct sk_buff *skb; 344 struct sk_buff *skb;
@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
368 pip->frag_off = htons(IP_DF); 386 pip->frag_off = htons(IP_DF);
369 pip->ttl = 1; 387 pip->ttl = 1;
370 pip->daddr = fl4.daddr; 388 pip->daddr = fl4.daddr;
371 pip->saddr = fl4.saddr; 389 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
372 pip->protocol = IPPROTO_IGMP; 390 pip->protocol = IPPROTO_IGMP;
373 pip->tot_len = 0; /* filled in later */ 391 pip->tot_len = 0; /* filled in later */
374 ip_select_ident(net, skb, NULL); 392 ip_select_ident(net, skb, NULL);
@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
404} 422}
405 423
406static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 424static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
407 int type, struct igmpv3_grec **ppgr) 425 int type, struct igmpv3_grec **ppgr, unsigned int mtu)
408{ 426{
409 struct net_device *dev = pmc->interface->dev; 427 struct net_device *dev = pmc->interface->dev;
410 struct igmpv3_report *pih; 428 struct igmpv3_report *pih;
411 struct igmpv3_grec *pgr; 429 struct igmpv3_grec *pgr;
412 430
413 if (!skb) 431 if (!skb) {
414 skb = igmpv3_newpack(dev, dev->mtu); 432 skb = igmpv3_newpack(dev, mtu);
415 if (!skb) 433 if (!skb)
416 return NULL; 434 return NULL;
435 }
417 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 436 pgr = skb_put(skb, sizeof(struct igmpv3_grec));
418 pgr->grec_type = type; 437 pgr->grec_type = type;
419 pgr->grec_auxwords = 0; 438 pgr->grec_auxwords = 0;
@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
436 struct igmpv3_grec *pgr = NULL; 455 struct igmpv3_grec *pgr = NULL;
437 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 456 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
438 int scount, stotal, first, isquery, truncate; 457 int scount, stotal, first, isquery, truncate;
458 unsigned int mtu;
439 459
440 if (pmc->multiaddr == IGMP_ALL_HOSTS) 460 if (pmc->multiaddr == IGMP_ALL_HOSTS)
441 return skb; 461 return skb;
442 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) 462 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
443 return skb; 463 return skb;
444 464
465 mtu = READ_ONCE(dev->mtu);
466 if (mtu < IPV4_MIN_MTU)
467 return skb;
468
445 isquery = type == IGMPV3_MODE_IS_INCLUDE || 469 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
446 type == IGMPV3_MODE_IS_EXCLUDE; 470 type == IGMPV3_MODE_IS_EXCLUDE;
447 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 471 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
462 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 486 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
463 if (skb) 487 if (skb)
464 igmpv3_sendpack(skb); 488 igmpv3_sendpack(skb);
465 skb = igmpv3_newpack(dev, dev->mtu); 489 skb = igmpv3_newpack(dev, mtu);
466 } 490 }
467 } 491 }
468 first = 1; 492 first = 1;
@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
498 pgr->grec_nsrcs = htons(scount); 522 pgr->grec_nsrcs = htons(scount);
499 if (skb) 523 if (skb)
500 igmpv3_sendpack(skb); 524 igmpv3_sendpack(skb);
501 skb = igmpv3_newpack(dev, dev->mtu); 525 skb = igmpv3_newpack(dev, mtu);
502 first = 1; 526 first = 1;
503 scount = 0; 527 scount = 0;
504 } 528 }
505 if (first) { 529 if (first) {
506 skb = add_grhead(skb, pmc, type, &pgr); 530 skb = add_grhead(skb, pmc, type, &pgr, mtu);
507 first = 0; 531 first = 0;
508 } 532 }
509 if (!skb) 533 if (!skb)
@@ -538,7 +562,7 @@ empty_source:
538 igmpv3_sendpack(skb); 562 igmpv3_sendpack(skb);
539 skb = NULL; /* add_grhead will get a new one */ 563 skb = NULL; /* add_grhead will get a new one */
540 } 564 }
541 skb = add_grhead(skb, pmc, type, &pgr); 565 skb = add_grhead(skb, pmc, type, &pgr, mtu);
542 } 566 }
543 } 567 }
544 if (pgr) 568 if (pgr)
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c690cd0d9b3f..b563e0c46bac 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -93,7 +93,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
93} 93}
94 94
95/* 95/*
96 * Enter the time wait state. 96 * Enter the time wait state. This is called with locally disabled BH.
97 * Essentially we whip up a timewait bucket, copy the relevant info into it 97 * Essentially we whip up a timewait bucket, copy the relevant info into it
98 * from the SK, and mess with hash chains and list linkage. 98 * from the SK, and mess with hash chains and list linkage.
99 */ 99 */
@@ -111,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
111 */ 111 */
112 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, 112 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
113 hashinfo->bhash_size)]; 113 hashinfo->bhash_size)];
114 spin_lock_bh(&bhead->lock); 114 spin_lock(&bhead->lock);
115 tw->tw_tb = icsk->icsk_bind_hash; 115 tw->tw_tb = icsk->icsk_bind_hash;
116 WARN_ON(!icsk->icsk_bind_hash); 116 WARN_ON(!icsk->icsk_bind_hash);
117 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 117 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -137,7 +137,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
137 if (__sk_nulls_del_node_init_rcu(sk)) 137 if (__sk_nulls_del_node_init_rcu(sk))
138 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 138 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
139 139
140 spin_unlock_bh(lock); 140 spin_unlock(lock);
141} 141}
142EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 142EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
143 143
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index bb6239169b1a..45ffd3d045d2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -266,7 +266,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
266 len = gre_hdr_len + sizeof(*ershdr); 266 len = gre_hdr_len + sizeof(*ershdr);
267 267
268 if (unlikely(!pskb_may_pull(skb, len))) 268 if (unlikely(!pskb_may_pull(skb, len)))
269 return -ENOMEM; 269 return PACKET_REJECT;
270 270
271 iph = ip_hdr(skb); 271 iph = ip_hdr(skb);
272 ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len); 272 ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
@@ -1310,6 +1310,7 @@ static const struct net_device_ops erspan_netdev_ops = {
1310static void ipgre_tap_setup(struct net_device *dev) 1310static void ipgre_tap_setup(struct net_device *dev)
1311{ 1311{
1312 ether_setup(dev); 1312 ether_setup(dev);
1313 dev->max_mtu = 0;
1313 dev->netdev_ops = &gre_tap_netdev_ops; 1314 dev->netdev_ops = &gre_tap_netdev_ops;
1314 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1315 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1315 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1316 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index fe6fee728ce4..6d21068f9b55 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
349 dev->needed_headroom = t_hlen + hlen; 349 dev->needed_headroom = t_hlen + hlen;
350 mtu -= (dev->hard_header_len + t_hlen); 350 mtu -= (dev->hard_header_len + t_hlen);
351 351
352 if (mtu < 68) 352 if (mtu < IPV4_MIN_MTU)
353 mtu = 68; 353 mtu = IPV4_MIN_MTU;
354 354
355 return mtu; 355 return mtu;
356} 356}
@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
520 else 520 else
521 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 521 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
522 522
523 if (skb_dst(skb)) 523 skb_dst_update_pmtu(skb, mtu);
524 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
525 524
526 if (skb->protocol == htons(ETH_P_IP)) { 525 if (skb->protocol == htons(ETH_P_IP)) {
527 if (!skb_is_gso(skb) && 526 if (!skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 949f432a5f04..51b1669334fe 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
200 200
201 mtu = dst_mtu(dst); 201 mtu = dst_mtu(dst);
202 if (skb->len > mtu) { 202 if (skb->len > mtu) {
203 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 203 skb_dst_update_pmtu(skb, mtu);
204 if (skb->protocol == htons(ETH_P_IP)) { 204 if (skb->protocol == htons(ETH_P_IP)) {
205 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 205 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
206 htonl(mtu)); 206 htonl(mtu));
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f88221aebc9d..0c3c944a7b72 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -373,7 +373,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
373 if (!xt_find_jump_offset(offsets, newpos, 373 if (!xt_find_jump_offset(offsets, newpos,
374 newinfo->number)) 374 newinfo->number))
375 return 0; 375 return 0;
376 e = entry0 + newpos;
377 } else { 376 } else {
378 /* ... this is a fallthru */ 377 /* ... this is a fallthru */
379 newpos = pos + e->next_offset; 378 newpos = pos + e->next_offset;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4cbe5e80f3bf..2e0d339028bb 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -439,7 +439,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
439 if (!xt_find_jump_offset(offsets, newpos, 439 if (!xt_find_jump_offset(offsets, newpos,
440 newinfo->number)) 440 newinfo->number))
441 return 0; 441 return 0;
442 e = entry0 + newpos;
443 } else { 442 } else {
444 /* ... this is a fallthru */ 443 /* ... this is a fallthru */
445 newpos = pos + e->next_offset; 444 newpos = pos + e->next_offset;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 17b4ca562944..69060e3abe85 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -813,12 +813,13 @@ static int clusterip_net_init(struct net *net)
813 813
814static void clusterip_net_exit(struct net *net) 814static void clusterip_net_exit(struct net *net)
815{ 815{
816#ifdef CONFIG_PROC_FS
817 struct clusterip_net *cn = net_generic(net, clusterip_net_id); 816 struct clusterip_net *cn = net_generic(net, clusterip_net_id);
817#ifdef CONFIG_PROC_FS
818 proc_remove(cn->procdir); 818 proc_remove(cn->procdir);
819 cn->procdir = NULL; 819 cn->procdir = NULL;
820#endif 820#endif
821 nf_unregister_net_hook(net, &cip_arp_ops); 821 nf_unregister_net_hook(net, &cip_arp_ops);
822 WARN_ON_ONCE(!list_empty(&cn->configs));
822} 823}
823 824
824static struct pernet_operations clusterip_net_ops = { 825static struct pernet_operations clusterip_net_ops = {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 33b70bfd1122..5e570aa9e43b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -513,11 +513,18 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
513 int err; 513 int err;
514 struct ip_options_data opt_copy; 514 struct ip_options_data opt_copy;
515 struct raw_frag_vec rfv; 515 struct raw_frag_vec rfv;
516 int hdrincl;
516 517
517 err = -EMSGSIZE; 518 err = -EMSGSIZE;
518 if (len > 0xFFFF) 519 if (len > 0xFFFF)
519 goto out; 520 goto out;
520 521
522 /* hdrincl should be READ_ONCE(inet->hdrincl)
523 * but READ_ONCE() doesn't work with bit fields.
524 * Doing this indirectly yields the same result.
525 */
526 hdrincl = inet->hdrincl;
527 hdrincl = READ_ONCE(hdrincl);
521 /* 528 /*
522 * Check the flags. 529 * Check the flags.
523 */ 530 */
@@ -593,7 +600,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
593 /* Linux does not mangle headers on raw sockets, 600 /* Linux does not mangle headers on raw sockets,
594 * so that IP options + IP_HDRINCL is non-sense. 601 * so that IP options + IP_HDRINCL is non-sense.
595 */ 602 */
596 if (inet->hdrincl) 603 if (hdrincl)
597 goto done; 604 goto done;
598 if (ipc.opt->opt.srr) { 605 if (ipc.opt->opt.srr) {
599 if (!daddr) 606 if (!daddr)
@@ -615,12 +622,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
615 622
616 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 623 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
617 RT_SCOPE_UNIVERSE, 624 RT_SCOPE_UNIVERSE,
618 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 625 hdrincl ? IPPROTO_RAW : sk->sk_protocol,
619 inet_sk_flowi_flags(sk) | 626 inet_sk_flowi_flags(sk) |
620 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 627 (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
621 daddr, saddr, 0, 0, sk->sk_uid); 628 daddr, saddr, 0, 0, sk->sk_uid);
622 629
623 if (!inet->hdrincl) { 630 if (!hdrincl) {
624 rfv.msg = msg; 631 rfv.msg = msg;
625 rfv.hlen = 0; 632 rfv.hlen = 0;
626 633
@@ -645,7 +652,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
645 goto do_confirm; 652 goto do_confirm;
646back_from_confirm: 653back_from_confirm:
647 654
648 if (inet->hdrincl) 655 if (hdrincl)
649 err = raw_send_hdrinc(sk, &fl4, msg, len, 656 err = raw_send_hdrinc(sk, &fl4, msg, len,
650 &rt, msg->msg_flags, &ipc.sockc); 657 &rt, msg->msg_flags, &ipc.sockc);
651 658
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 43b69af242e1..4e153b23bcec 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2762 if (err == 0 && rt->dst.error) 2762 if (err == 0 && rt->dst.error)
2763 err = -rt->dst.error; 2763 err = -rt->dst.error;
2764 } else { 2764 } else {
2765 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2765 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 2766 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2766 err = 0; 2767 err = 0;
2767 if (IS_ERR(rt)) 2768 if (IS_ERR(rt))
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bf97317e6c97..8e053ad7cae2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2298,6 +2298,9 @@ adjudge_to_death:
2298 tcp_send_active_reset(sk, GFP_ATOMIC); 2298 tcp_send_active_reset(sk, GFP_ATOMIC);
2299 __NET_INC_STATS(sock_net(sk), 2299 __NET_INC_STATS(sock_net(sk),
2300 LINUX_MIB_TCPABORTONMEMORY); 2300 LINUX_MIB_TCPABORTONMEMORY);
2301 } else if (!check_net(sock_net(sk))) {
2302 /* Not possible to send reset; just close */
2303 tcp_set_state(sk, TCP_CLOSE);
2301 } 2304 }
2302 } 2305 }
2303 2306
@@ -2412,6 +2415,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2412 tp->snd_cwnd_cnt = 0; 2415 tp->snd_cwnd_cnt = 0;
2413 tp->window_clamp = 0; 2416 tp->window_clamp = 0;
2414 tcp_set_ca_state(sk, TCP_CA_Open); 2417 tcp_set_ca_state(sk, TCP_CA_Open);
2418 tp->is_sack_reneg = 0;
2415 tcp_clear_retrans(tp); 2419 tcp_clear_retrans(tp);
2416 inet_csk_delack_init(sk); 2420 inet_csk_delack_init(sk);
2417 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 2421 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 69ee877574d0..8322f26e770e 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -110,7 +110,8 @@ struct bbr {
110 u32 lt_last_lost; /* LT intvl start: tp->lost */ 110 u32 lt_last_lost; /* LT intvl start: tp->lost */
111 u32 pacing_gain:10, /* current gain for setting pacing rate */ 111 u32 pacing_gain:10, /* current gain for setting pacing rate */
112 cwnd_gain:10, /* current gain for setting cwnd */ 112 cwnd_gain:10, /* current gain for setting cwnd */
113 full_bw_cnt:3, /* number of rounds without large bw gains */ 113 full_bw_reached:1, /* reached full bw in Startup? */
114 full_bw_cnt:2, /* number of rounds without large bw gains */
114 cycle_idx:3, /* current index in pacing_gain cycle array */ 115 cycle_idx:3, /* current index in pacing_gain cycle array */
115 has_seen_rtt:1, /* have we seen an RTT sample yet? */ 116 has_seen_rtt:1, /* have we seen an RTT sample yet? */
116 unused_b:5; 117 unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
180{ 181{
181 const struct bbr *bbr = inet_csk_ca(sk); 182 const struct bbr *bbr = inet_csk_ca(sk);
182 183
183 return bbr->full_bw_cnt >= bbr_full_bw_cnt; 184 return bbr->full_bw_reached;
184} 185}
185 186
186/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ 187/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
717 return; 718 return;
718 } 719 }
719 ++bbr->full_bw_cnt; 720 ++bbr->full_bw_cnt;
721 bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
720} 722}
721 723
722/* If pipe is probably full, drain the queue and then enter steady-state. */ 724/* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
850 bbr->restore_cwnd = 0; 852 bbr->restore_cwnd = 0;
851 bbr->round_start = 0; 853 bbr->round_start = 0;
852 bbr->idle_restart = 0; 854 bbr->idle_restart = 0;
855 bbr->full_bw_reached = 0;
853 bbr->full_bw = 0; 856 bbr->full_bw = 0;
854 bbr->full_bw_cnt = 0; 857 bbr->full_bw_cnt = 0;
855 bbr->cycle_mstamp = 0; 858 bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
871 */ 874 */
872static u32 bbr_undo_cwnd(struct sock *sk) 875static u32 bbr_undo_cwnd(struct sock *sk)
873{ 876{
877 struct bbr *bbr = inet_csk_ca(sk);
878
879 bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
880 bbr->full_bw_cnt = 0;
881 bbr_reset_lt_bw_sampling(sk);
874 return tcp_sk(sk)->snd_cwnd; 882 return tcp_sk(sk)->snd_cwnd;
875} 883}
876 884
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 734cfc8ff76e..45f750e85714 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
508 u32 new_sample = tp->rcv_rtt_est.rtt_us; 508 u32 new_sample = tp->rcv_rtt_est.rtt_us;
509 long m = sample; 509 long m = sample;
510 510
511 if (m == 0)
512 m = 1;
513
514 if (new_sample != 0) { 511 if (new_sample != 0) {
515 /* If we sample in larger samples in the non-timestamp 512 /* If we sample in larger samples in the non-timestamp
516 * case, we could grossly overestimate the RTT especially 513 * case, we could grossly overestimate the RTT especially
@@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
547 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 544 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
548 return; 545 return;
549 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 546 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
547 if (!delta_us)
548 delta_us = 1;
550 tcp_rcv_rtt_update(tp, delta_us, 1); 549 tcp_rcv_rtt_update(tp, delta_us, 1);
551 550
552new_measure: 551new_measure:
@@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
563 (TCP_SKB_CB(skb)->end_seq - 562 (TCP_SKB_CB(skb)->end_seq -
564 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 563 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
565 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 564 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
566 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 565 u32 delta_us;
567 566
567 if (!delta)
568 delta = 1;
569 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
568 tcp_rcv_rtt_update(tp, delta_us, 0); 570 tcp_rcv_rtt_update(tp, delta_us, 0);
569 } 571 }
570} 572}
@@ -579,6 +581,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
579 int time; 581 int time;
580 int copied; 582 int copied;
581 583
584 tcp_mstamp_refresh(tp);
582 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); 585 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
583 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) 586 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
584 return; 587 return;
@@ -1941,6 +1944,8 @@ void tcp_enter_loss(struct sock *sk)
1941 if (is_reneg) { 1944 if (is_reneg) {
1942 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1945 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1943 tp->sacked_out = 0; 1946 tp->sacked_out = 0;
1947 /* Mark SACK reneging until we recover from this loss event. */
1948 tp->is_sack_reneg = 1;
1944 } 1949 }
1945 tcp_clear_all_retrans_hints(tp); 1950 tcp_clear_all_retrans_hints(tp);
1946 1951
@@ -2326,6 +2331,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2326 } 2331 }
2327 tp->snd_cwnd_stamp = tcp_jiffies32; 2332 tp->snd_cwnd_stamp = tcp_jiffies32;
2328 tp->undo_marker = 0; 2333 tp->undo_marker = 0;
2334 tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2329} 2335}
2330 2336
2331static inline bool tcp_may_undo(const struct tcp_sock *tp) 2337static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2364,6 +2370,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2364 return true; 2370 return true;
2365 } 2371 }
2366 tcp_set_ca_state(sk, TCP_CA_Open); 2372 tcp_set_ca_state(sk, TCP_CA_Open);
2373 tp->is_sack_reneg = 0;
2367 return false; 2374 return false;
2368} 2375}
2369 2376
@@ -2397,8 +2404,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2397 NET_INC_STATS(sock_net(sk), 2404 NET_INC_STATS(sock_net(sk),
2398 LINUX_MIB_TCPSPURIOUSRTOS); 2405 LINUX_MIB_TCPSPURIOUSRTOS);
2399 inet_csk(sk)->icsk_retransmits = 0; 2406 inet_csk(sk)->icsk_retransmits = 0;
2400 if (frto_undo || tcp_is_sack(tp)) 2407 if (frto_undo || tcp_is_sack(tp)) {
2401 tcp_set_ca_state(sk, TCP_CA_Open); 2408 tcp_set_ca_state(sk, TCP_CA_Open);
2409 tp->is_sack_reneg = 0;
2410 }
2402 return true; 2411 return true;
2403 } 2412 }
2404 return false; 2413 return false;
@@ -3495,6 +3504,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3495 struct tcp_sacktag_state sack_state; 3504 struct tcp_sacktag_state sack_state;
3496 struct rate_sample rs = { .prior_delivered = 0 }; 3505 struct rate_sample rs = { .prior_delivered = 0 };
3497 u32 prior_snd_una = tp->snd_una; 3506 u32 prior_snd_una = tp->snd_una;
3507 bool is_sack_reneg = tp->is_sack_reneg;
3498 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3508 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3499 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3509 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3500 bool is_dupack = false; 3510 bool is_dupack = false;
@@ -3611,7 +3621,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3611 3621
3612 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3622 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
3613 lost = tp->lost - lost; /* freshly marked lost */ 3623 lost = tp->lost - lost; /* freshly marked lost */
3614 tcp_rate_gen(sk, delivered, lost, sack_state.rate); 3624 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
3615 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); 3625 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
3616 tcp_xmit_recovery(sk, rexmit); 3626 tcp_xmit_recovery(sk, rexmit);
3617 return 1; 3627 return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c6bc0c4d19c6..94e28350f420 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -848,7 +848,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
849 req->ts_recent, 849 req->ts_recent,
850 0, 850 0,
851 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 851 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
852 AF_INET), 852 AF_INET),
853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
854 ip_hdr(skb)->tos); 854 ip_hdr(skb)->tos);
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
1591} 1591}
1592EXPORT_SYMBOL(tcp_filter); 1592EXPORT_SYMBOL(tcp_filter);
1593 1593
1594static void tcp_v4_restore_cb(struct sk_buff *skb)
1595{
1596 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1597 sizeof(struct inet_skb_parm));
1598}
1599
1600static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1601 const struct tcphdr *th)
1602{
1603 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1604 * barrier() makes sure compiler wont play fool^Waliasing games.
1605 */
1606 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1607 sizeof(struct inet_skb_parm));
1608 barrier();
1609
1610 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1611 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1612 skb->len - th->doff * 4);
1613 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1614 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1615 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1616 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1617 TCP_SKB_CB(skb)->sacked = 0;
1618 TCP_SKB_CB(skb)->has_rxtstamp =
1619 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1620}
1621
1594/* 1622/*
1595 * From tcp_input.c 1623 * From tcp_input.c
1596 */ 1624 */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
1631 1659
1632 th = (const struct tcphdr *)skb->data; 1660 th = (const struct tcphdr *)skb->data;
1633 iph = ip_hdr(skb); 1661 iph = ip_hdr(skb);
1634 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1635 * barrier() makes sure compiler wont play fool^Waliasing games.
1636 */
1637 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1638 sizeof(struct inet_skb_parm));
1639 barrier();
1640
1641 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1642 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1643 skb->len - th->doff * 4);
1644 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1645 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1646 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1647 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1648 TCP_SKB_CB(skb)->sacked = 0;
1649 TCP_SKB_CB(skb)->has_rxtstamp =
1650 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1651
1652lookup: 1662lookup:
1653 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, 1663 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1654 th->dest, sdif, &refcounted); 1664 th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
1679 sock_hold(sk); 1689 sock_hold(sk);
1680 refcounted = true; 1690 refcounted = true;
1681 nsk = NULL; 1691 nsk = NULL;
1682 if (!tcp_filter(sk, skb)) 1692 if (!tcp_filter(sk, skb)) {
1693 th = (const struct tcphdr *)skb->data;
1694 iph = ip_hdr(skb);
1695 tcp_v4_fill_cb(skb, iph, th);
1683 nsk = tcp_check_req(sk, skb, req, false); 1696 nsk = tcp_check_req(sk, skb, req, false);
1697 }
1684 if (!nsk) { 1698 if (!nsk) {
1685 reqsk_put(req); 1699 reqsk_put(req);
1686 goto discard_and_relse; 1700 goto discard_and_relse;
1687 } 1701 }
1688 if (nsk == sk) { 1702 if (nsk == sk) {
1689 reqsk_put(req); 1703 reqsk_put(req);
1704 tcp_v4_restore_cb(skb);
1690 } else if (tcp_child_process(sk, nsk, skb)) { 1705 } else if (tcp_child_process(sk, nsk, skb)) {
1691 tcp_v4_send_reset(nsk, skb); 1706 tcp_v4_send_reset(nsk, skb);
1692 goto discard_and_relse; 1707 goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
1712 goto discard_and_relse; 1727 goto discard_and_relse;
1713 th = (const struct tcphdr *)skb->data; 1728 th = (const struct tcphdr *)skb->data;
1714 iph = ip_hdr(skb); 1729 iph = ip_hdr(skb);
1730 tcp_v4_fill_cb(skb, iph, th);
1715 1731
1716 skb->dev = NULL; 1732 skb->dev = NULL;
1717 1733
@@ -1742,6 +1758,8 @@ no_tcp_socket:
1742 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1758 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1743 goto discard_it; 1759 goto discard_it;
1744 1760
1761 tcp_v4_fill_cb(skb, iph, th);
1762
1745 if (tcp_checksum_complete(skb)) { 1763 if (tcp_checksum_complete(skb)) {
1746csum_error: 1764csum_error:
1747 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); 1765 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
1768 goto discard_it; 1786 goto discard_it;
1769 } 1787 }
1770 1788
1789 tcp_v4_fill_cb(skb, iph, th);
1790
1771 if (tcp_checksum_complete(skb)) { 1791 if (tcp_checksum_complete(skb)) {
1772 inet_twsk_put(inet_twsk(sk)); 1792 inet_twsk_put(inet_twsk(sk));
1773 goto csum_error; 1793 goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
1784 if (sk2) { 1804 if (sk2) {
1785 inet_twsk_deschedule_put(inet_twsk(sk)); 1805 inet_twsk_deschedule_put(inet_twsk(sk));
1786 sk = sk2; 1806 sk = sk2;
1807 tcp_v4_restore_cb(skb);
1787 refcounted = false; 1808 refcounted = false;
1788 goto process; 1809 goto process;
1789 } 1810 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e36eff0403f4..b079b619b60c 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -310,10 +310,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
310 if (state == TCP_TIME_WAIT) 310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN; 311 timeo = TCP_TIMEWAIT_LEN;
312 312
313 /* tw_timer is pinned, so we need to make sure BH are disabled
314 * in following section, otherwise timer handler could run before
315 * we complete the initialization.
316 */
317 local_bh_disable();
313 inet_twsk_schedule(tw, timeo); 318 inet_twsk_schedule(tw, timeo);
314 /* Linkage updates. */ 319 /* Linkage updates. */
315 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 320 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
316 inet_twsk_put(tw); 321 inet_twsk_put(tw);
322 local_bh_enable();
317 } else { 323 } else {
318 /* Sorry, if we're out of memory, just CLOSE this 324 /* Sorry, if we're out of memory, just CLOSE this
319 * socket up. We've got bigger problems than 325 * socket up. We've got bigger problems than
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b6a2aa1dcf56..4d58e2ce0b5b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features) 33 netdev_features_t features)
34{ 34{
35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 return ERR_PTR(-EINVAL);
37
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 38 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL); 39 return ERR_PTR(-EINVAL);
37 40
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 3330a370d306..c61240e43923 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
106 106
107/* Update the connection delivery information and generate a rate sample. */ 107/* Update the connection delivery information and generate a rate sample. */
108void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, 108void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
109 struct rate_sample *rs) 109 bool is_sack_reneg, struct rate_sample *rs)
110{ 110{
111 struct tcp_sock *tp = tcp_sk(sk); 111 struct tcp_sock *tp = tcp_sk(sk);
112 u32 snd_us, ack_us; 112 u32 snd_us, ack_us;
@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
124 124
125 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ 125 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
126 rs->losses = lost; /* freshly marked lost */ 126 rs->losses = lost; /* freshly marked lost */
127 /* Return an invalid sample if no timing information is available. */ 127 /* Return an invalid sample if no timing information is available or
128 if (!rs->prior_mstamp) { 128 * in recovery from loss with SACK reneging. Rate samples taken during
129 * a SACK reneging event may overestimate bw by including packets that
130 * were SACKed before the reneg.
131 */
132 if (!rs->prior_mstamp || is_sack_reneg) {
129 rs->delivered = -1; 133 rs->delivered = -1;
130 rs->interval_us = -1; 134 rs->interval_us = -1;
131 return; 135 return;
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index d3ea89020c69..3a81720ac0c4 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -55,7 +55,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
55 * to queuing or delayed ACKs. 55 * to queuing or delayed ACKs.
56 */ 56 */
57 reo_wnd = 1000; 57 reo_wnd = 1000;
58 if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) { 58 if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
59 min_rtt != ~0U) {
59 reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd); 60 reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
60 reo_wnd = min(reo_wnd, tp->srtt_us >> 3); 61 reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
61 } 62 }
@@ -79,12 +80,12 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
79 */ 80 */
80 remaining = tp->rack.rtt_us + reo_wnd - 81 remaining = tp->rack.rtt_us + reo_wnd -
81 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); 82 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
82 if (remaining < 0) { 83 if (remaining <= 0) {
83 tcp_rack_mark_skb_lost(sk, skb); 84 tcp_rack_mark_skb_lost(sk, skb);
84 list_del_init(&skb->tcp_tsorted_anchor); 85 list_del_init(&skb->tcp_tsorted_anchor);
85 } else { 86 } else {
86 /* Record maximum wait time (+1 to avoid 0) */ 87 /* Record maximum wait time */
87 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining); 88 *reo_timeout = max_t(u32, *reo_timeout, remaining);
88 } 89 }
89 } 90 }
90} 91}
@@ -116,13 +117,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
116{ 117{
117 u32 rtt_us; 118 u32 rtt_us;
118 119
119 if (tp->rack.mstamp &&
120 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
121 end_seq, tp->rack.end_seq))
122 return;
123
124 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); 120 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
125 if (sacked & TCPCB_RETRANS) { 121 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
126 /* If the sacked packet was retransmitted, it's ambiguous 122 /* If the sacked packet was retransmitted, it's ambiguous
127 * whether the retransmission or the original (or the prior 123 * whether the retransmission or the original (or the prior
128 * retransmission) was sacked. 124 * retransmission) was sacked.
@@ -133,13 +129,15 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
133 * so it's at least one RTT (i.e., retransmission is at least 129 * so it's at least one RTT (i.e., retransmission is at least
134 * an RTT later). 130 * an RTT later).
135 */ 131 */
136 if (rtt_us < tcp_min_rtt(tp)) 132 return;
137 return;
138 } 133 }
139 tp->rack.rtt_us = rtt_us;
140 tp->rack.mstamp = xmit_time;
141 tp->rack.end_seq = end_seq;
142 tp->rack.advanced = 1; 134 tp->rack.advanced = 1;
135 tp->rack.rtt_us = rtt_us;
136 if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
137 end_seq, tp->rack.end_seq)) {
138 tp->rack.mstamp = xmit_time;
139 tp->rack.end_seq = end_seq;
140 }
143} 141}
144 142
145/* We have waited long enough to accommodate reordering. Mark the expired 143/* We have waited long enough to accommodate reordering. Mark the expired
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 16df6dd44b98..388158c9d9f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk)
48 * to prevent DoS attacks. It is called when a retransmission timeout 48 * to prevent DoS attacks. It is called when a retransmission timeout
49 * or zero probe timeout occurs on orphaned socket. 49 * or zero probe timeout occurs on orphaned socket.
50 * 50 *
51 * Also close if our net namespace is exiting; in that case there is no
52 * hope of ever communicating again since all netns interfaces are already
53 * down (or about to be down), and we need to release our dst references,
54 * which have been moved to the netns loopback interface, so the namespace
55 * can finish exiting. This condition is only possible if we are a kernel
56 * socket, as those do not hold references to the namespace.
57 *
51 * Criteria is still not confirmed experimentally and may change. 58 * Criteria is still not confirmed experimentally and may change.
52 * We kill the socket, if: 59 * We kill the socket, if:
53 * 1. If number of orphaned sockets exceeds an administratively configured 60 * 1. If number of orphaned sockets exceeds an administratively configured
54 * limit. 61 * limit.
55 * 2. If we have strong memory pressure. 62 * 2. If we have strong memory pressure.
63 * 3. If our net namespace is exiting.
56 */ 64 */
57static int tcp_out_of_resources(struct sock *sk, bool do_reset) 65static int tcp_out_of_resources(struct sock *sk, bool do_reset)
58{ 66{
@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
81 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); 89 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
82 return 1; 90 return 1;
83 } 91 }
92
93 if (!check_net(sock_net(sk))) {
94 /* Not possible to send reset; just close */
95 tcp_done(sk);
96 return 1;
97 }
98
84 return 0; 99 return 0;
85} 100}
86 101
@@ -264,6 +279,7 @@ void tcp_delack_timer_handler(struct sock *sk)
264 icsk->icsk_ack.pingpong = 0; 279 icsk->icsk_ack.pingpong = 0;
265 icsk->icsk_ack.ato = TCP_ATO_MIN; 280 icsk->icsk_ack.ato = TCP_ATO_MIN;
266 } 281 }
282 tcp_mstamp_refresh(tcp_sk(sk));
267 tcp_send_ack(sk); 283 tcp_send_ack(sk);
268 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); 284 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
269 } 285 }
@@ -632,6 +648,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
632 goto out; 648 goto out;
633 } 649 }
634 650
651 tcp_mstamp_refresh(tp);
635 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 652 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
636 if (tp->linger2 >= 0) { 653 if (tp->linger2 >= 0) {
637 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 654 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 01801b77bd0d..ea6e6e7df0ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
203 goto out; 203 goto out;
204 } 204 }
205 205
206 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207 goto out;
208
206 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 209 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
207 goto out; 210 goto out;
208 211
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index e50b7fea57ee..bcfc00e88756 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
23 return xfrm4_extract_header(skb); 23 return xfrm4_extract_header(skb);
24} 24}
25 25
26static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
27 struct sk_buff *skb)
28{
29 return dst_input(skb);
30}
31
26static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, 32static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
27 struct sk_buff *skb) 33 struct sk_buff *skb)
28{ 34{
@@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
33 iph->tos, skb->dev)) 39 iph->tos, skb->dev))
34 goto drop; 40 goto drop;
35 } 41 }
36 return dst_input(skb); 42
43 if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
44 goto drop;
45
46 return 0;
37drop: 47drop:
38 kfree_skb(skb); 48 kfree_skb(skb);
39 return NET_RX_DROP; 49 return NET_RX_DROP;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e6265e2c274e..20ca486b3cad 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol;
95 96
96 err = 0; 97 err = 0;
97 98
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c26f71234b9c..c9441ca45399 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -210,7 +210,6 @@ lookup_protocol:
210 np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; 210 np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
211 np->mc_loop = 1; 211 np->mc_loop = 1;
212 np->pmtudisc = IPV6_PMTUDISC_WANT; 212 np->pmtudisc = IPV6_PMTUDISC_WANT;
213 np->autoflowlabel = ip6_default_np_autolabel(net);
214 np->repflow = net->ipv6.sysctl.flowlabel_reflect; 213 np->repflow = net->ipv6.sysctl.flowlabel_reflect;
215 sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; 214 sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
216 215
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a902ff8f59be..1a7f00cd4803 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -890,13 +890,12 @@ static int esp6_init_state(struct xfrm_state *x)
890 x->props.header_len += IPV4_BEET_PHMAXLEN + 890 x->props.header_len += IPV4_BEET_PHMAXLEN +
891 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 891 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
892 break; 892 break;
893 default:
893 case XFRM_MODE_TRANSPORT: 894 case XFRM_MODE_TRANSPORT:
894 break; 895 break;
895 case XFRM_MODE_TUNNEL: 896 case XFRM_MODE_TUNNEL:
896 x->props.header_len += sizeof(struct ipv6hdr); 897 x->props.header_len += sizeof(struct ipv6hdr);
897 break; 898 break;
898 default:
899 goto error;
900 } 899 }
901 900
902 align = ALIGN(crypto_aead_blocksize(aead), 4); 901 align = ALIGN(crypto_aead_blocksize(aead), 4);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 333a478aa161..f52c314d4c97 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
60 int nhoff; 60 int nhoff;
61 int err; 61 int err;
62 62
63 skb_pull(skb, offset); 63 if (!pskb_pull(skb, offset))
64 return NULL;
64 65
65 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
66 goto out; 67 goto out;
@@ -148,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
148 if (!xo) 149 if (!xo)
149 goto out; 150 goto out;
150 151
152 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
153 goto out;
154
151 seq = xo->seq.low; 155 seq = xo->seq.low;
152 156
153 x = skb->sp->xvec[skb->sp->len - 1]; 157 x = skb->sp->xvec[skb->sp->len - 1];
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 83bd75713535..bc68eb661970 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
925 sr_phdr->segments[0] = **addr_p; 925 sr_phdr->segments[0] = **addr_p;
926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; 926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
927 927
928 if (sr_ihdr->hdrlen > hops * 2) {
929 int tlvs_offset, tlvs_length;
930
931 tlvs_offset = (1 + hops * 2) << 3;
932 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
933 memcpy((char *)sr_phdr + tlvs_offset,
934 (char *)sr_ihdr + tlvs_offset, tlvs_length);
935 }
936
928#ifdef CONFIG_IPV6_SEG6_HMAC 937#ifdef CONFIG_IPV6_SEG6_HMAC
929 if (sr_has_hmac(sr_phdr)) { 938 if (sr_has_hmac(sr_phdr)) {
930 struct net *net = NULL; 939 struct net *net = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f5285f4e1d08..217683d40f12 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
640 if (!(fn->fn_flags & RTN_RTINFO)) { 640 if (!(fn->fn_flags & RTN_RTINFO)) {
641 RCU_INIT_POINTER(fn->leaf, NULL); 641 RCU_INIT_POINTER(fn->leaf, NULL);
642 rt6_release(leaf); 642 rt6_release(leaf);
643 /* remove null_entry in the root node */
644 } else if (fn->fn_flags & RTN_TL_ROOT &&
645 rcu_access_pointer(fn->leaf) ==
646 net->ipv6.ip6_null_entry) {
647 RCU_INIT_POINTER(fn->leaf, NULL);
643 } 648 }
644 649
645 return fn; 650 return fn;
@@ -1221,8 +1226,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
1221 } 1226 }
1222 1227
1223 if (!rcu_access_pointer(fn->leaf)) { 1228 if (!rcu_access_pointer(fn->leaf)) {
1224 atomic_inc(&rt->rt6i_ref); 1229 if (fn->fn_flags & RTN_TL_ROOT) {
1225 rcu_assign_pointer(fn->leaf, rt); 1230 /* put back null_entry for root node */
1231 rcu_assign_pointer(fn->leaf,
1232 info->nl_net->ipv6.ip6_null_entry);
1233 } else {
1234 atomic_inc(&rt->rt6i_ref);
1235 rcu_assign_pointer(fn->leaf, rt);
1236 }
1226 } 1237 }
1227 fn = sn; 1238 fn = sn;
1228 } 1239 }
@@ -1241,23 +1252,28 @@ out:
1241 * If fib6_add_1 has cleared the old leaf pointer in the 1252 * If fib6_add_1 has cleared the old leaf pointer in the
1242 * super-tree leaf node we have to find a new one for it. 1253 * super-tree leaf node we have to find a new one for it.
1243 */ 1254 */
1244 struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf, 1255 if (pn != fn) {
1245 lockdep_is_held(&table->tb6_lock)); 1256 struct rt6_info *pn_leaf =
1246 if (pn != fn && pn_leaf == rt) { 1257 rcu_dereference_protected(pn->leaf,
1247 pn_leaf = NULL; 1258 lockdep_is_held(&table->tb6_lock));
1248 RCU_INIT_POINTER(pn->leaf, NULL); 1259 if (pn_leaf == rt) {
1249 atomic_dec(&rt->rt6i_ref); 1260 pn_leaf = NULL;
1250 } 1261 RCU_INIT_POINTER(pn->leaf, NULL);
1251 if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { 1262 atomic_dec(&rt->rt6i_ref);
1252 pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
1253#if RT6_DEBUG >= 2
1254 if (!pn_leaf) {
1255 WARN_ON(!pn_leaf);
1256 pn_leaf = info->nl_net->ipv6.ip6_null_entry;
1257 } 1263 }
1264 if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
1265 pn_leaf = fib6_find_prefix(info->nl_net, table,
1266 pn);
1267#if RT6_DEBUG >= 2
1268 if (!pn_leaf) {
1269 WARN_ON(!pn_leaf);
1270 pn_leaf =
1271 info->nl_net->ipv6.ip6_null_entry;
1272 }
1258#endif 1273#endif
1259 atomic_inc(&pn_leaf->rt6i_ref); 1274 atomic_inc(&pn_leaf->rt6i_ref);
1260 rcu_assign_pointer(pn->leaf, pn_leaf); 1275 rcu_assign_pointer(pn->leaf, pn_leaf);
1276 }
1261 } 1277 }
1262#endif 1278#endif
1263 goto failure; 1279 goto failure;
@@ -1265,13 +1281,17 @@ out:
1265 return err; 1281 return err;
1266 1282
1267failure: 1283failure:
1268 /* fn->leaf could be NULL if fn is an intermediate node and we 1284 /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
1269 * failed to add the new route to it in both subtree creation 1285 * 1. fn is an intermediate node and we failed to add the new
1270 * failure and fib6_add_rt2node() failure case. 1286 * route to it in both subtree creation failure and fib6_add_rt2node()
1271 * In both cases, fib6_repair_tree() should be called to fix 1287 * failure case.
1272 * fn->leaf. 1288 * 2. fn is the root node in the table and we fail to add the first
1289 * default route to it.
1273 */ 1290 */
1274 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1291 if (fn &&
1292 (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
1293 (fn->fn_flags & RTN_TL_ROOT &&
1294 !rcu_access_pointer(fn->leaf))))
1275 fib6_repair_tree(info->nl_net, table, fn); 1295 fib6_repair_tree(info->nl_net, table, fn);
1276 /* Always release dst as dst->__refcnt is guaranteed 1296 /* Always release dst as dst->__refcnt is guaranteed
1277 * to be taken before entering this function 1297 * to be taken before entering this function
@@ -1526,6 +1546,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1526 struct fib6_walker *w; 1546 struct fib6_walker *w;
1527 int iter = 0; 1547 int iter = 0;
1528 1548
1549 /* Set fn->leaf to null_entry for root node. */
1550 if (fn->fn_flags & RTN_TL_ROOT) {
1551 rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
1552 return fn;
1553 }
1554
1529 for (;;) { 1555 for (;;) {
1530 struct fib6_node *fn_r = rcu_dereference_protected(fn->right, 1556 struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
1531 lockdep_is_held(&table->tb6_lock)); 1557 lockdep_is_held(&table->tb6_lock));
@@ -1680,10 +1706,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1680 } 1706 }
1681 read_unlock(&net->ipv6.fib6_walker_lock); 1707 read_unlock(&net->ipv6.fib6_walker_lock);
1682 1708
1683 /* If it was last route, expunge its radix tree node */ 1709 /* If it was last route, call fib6_repair_tree() to:
1710 * 1. For root node, put back null_entry as how the table was created.
1711 * 2. For other nodes, expunge its radix tree node.
1712 */
1684 if (!rcu_access_pointer(fn->leaf)) { 1713 if (!rcu_access_pointer(fn->leaf)) {
1685 fn->fn_flags &= ~RTN_RTINFO; 1714 if (!(fn->fn_flags & RTN_TL_ROOT)) {
1686 net->ipv6.rt6_stats->fib_route_nodes--; 1715 fn->fn_flags &= ~RTN_RTINFO;
1716 net->ipv6.rt6_stats->fib_route_nodes--;
1717 }
1687 fn = fib6_repair_tree(net, table, fn); 1718 fn = fib6_repair_tree(net, table, fn);
1688 } 1719 }
1689 1720
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4cfd8e0696fe..873549228ccb 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
337 337
338 nt->dev = dev; 338 nt->dev = dev;
339 nt->net = dev_net(dev); 339 nt->net = dev_net(dev);
340 ip6gre_tnl_link_config(nt, 1);
341 340
342 if (register_netdevice(dev) < 0) 341 if (register_netdevice(dev) < 0)
343 goto failed_free; 342 goto failed_free;
344 343
344 ip6gre_tnl_link_config(nt, 1);
345
345 /* Can use a lockless transmit, unless we generate output sequences */ 346 /* Can use a lockless transmit, unless we generate output sequences */
346 if (!(nt->parms.o_flags & TUNNEL_SEQ)) 347 if (!(nt->parms.o_flags & TUNNEL_SEQ))
347 dev->features |= NETIF_F_LLTX; 348 dev->features |= NETIF_F_LLTX;
@@ -1014,6 +1015,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1014 eth_random_addr(dev->perm_addr); 1015 eth_random_addr(dev->perm_addr);
1015} 1016}
1016 1017
1018#define GRE6_FEATURES (NETIF_F_SG | \
1019 NETIF_F_FRAGLIST | \
1020 NETIF_F_HIGHDMA | \
1021 NETIF_F_HW_CSUM)
1022
1023static void ip6gre_tnl_init_features(struct net_device *dev)
1024{
1025 struct ip6_tnl *nt = netdev_priv(dev);
1026
1027 dev->features |= GRE6_FEATURES;
1028 dev->hw_features |= GRE6_FEATURES;
1029
1030 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1031 /* TCP offload with GRE SEQ is not supported, nor
1032 * can we support 2 levels of outer headers requiring
1033 * an update.
1034 */
1035 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1036 nt->encap.type == TUNNEL_ENCAP_NONE) {
1037 dev->features |= NETIF_F_GSO_SOFTWARE;
1038 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1039 }
1040
1041 /* Can use a lockless transmit, unless we generate
1042 * output sequences
1043 */
1044 dev->features |= NETIF_F_LLTX;
1045 }
1046}
1047
1017static int ip6gre_tunnel_init_common(struct net_device *dev) 1048static int ip6gre_tunnel_init_common(struct net_device *dev)
1018{ 1049{
1019 struct ip6_tnl *tunnel; 1050 struct ip6_tnl *tunnel;
@@ -1048,6 +1079,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1048 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1079 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1049 dev->mtu -= 8; 1080 dev->mtu -= 8;
1050 1081
1082 ip6gre_tnl_init_features(dev);
1083
1051 return 0; 1084 return 0;
1052} 1085}
1053 1086
@@ -1271,7 +1304,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1271 1304
1272static int ip6gre_tap_init(struct net_device *dev) 1305static int ip6gre_tap_init(struct net_device *dev)
1273{ 1306{
1274 struct ip6_tnl *tunnel;
1275 int ret; 1307 int ret;
1276 1308
1277 ret = ip6gre_tunnel_init_common(dev); 1309 ret = ip6gre_tunnel_init_common(dev);
@@ -1280,10 +1312,6 @@ static int ip6gre_tap_init(struct net_device *dev)
1280 1312
1281 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1313 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1282 1314
1283 tunnel = netdev_priv(dev);
1284
1285 ip6gre_tnl_link_config(tunnel, 1);
1286
1287 return 0; 1315 return 0;
1288} 1316}
1289 1317
@@ -1298,16 +1326,12 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1298 .ndo_get_iflink = ip6_tnl_get_iflink, 1326 .ndo_get_iflink = ip6_tnl_get_iflink,
1299}; 1327};
1300 1328
1301#define GRE6_FEATURES (NETIF_F_SG | \
1302 NETIF_F_FRAGLIST | \
1303 NETIF_F_HIGHDMA | \
1304 NETIF_F_HW_CSUM)
1305
1306static void ip6gre_tap_setup(struct net_device *dev) 1329static void ip6gre_tap_setup(struct net_device *dev)
1307{ 1330{
1308 1331
1309 ether_setup(dev); 1332 ether_setup(dev);
1310 1333
1334 dev->max_mtu = 0;
1311 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1335 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1312 dev->needs_free_netdev = true; 1336 dev->needs_free_netdev = true;
1313 dev->priv_destructor = ip6gre_dev_free; 1337 dev->priv_destructor = ip6gre_dev_free;
@@ -1380,32 +1404,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1380 1404
1381 nt->dev = dev; 1405 nt->dev = dev;
1382 nt->net = dev_net(dev); 1406 nt->net = dev_net(dev);
1383 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1384
1385 dev->features |= GRE6_FEATURES;
1386 dev->hw_features |= GRE6_FEATURES;
1387
1388 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1389 /* TCP offload with GRE SEQ is not supported, nor
1390 * can we support 2 levels of outer headers requiring
1391 * an update.
1392 */
1393 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1394 (nt->encap.type == TUNNEL_ENCAP_NONE)) {
1395 dev->features |= NETIF_F_GSO_SOFTWARE;
1396 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1397 }
1398
1399 /* Can use a lockless transmit, unless we generate
1400 * output sequences
1401 */
1402 dev->features |= NETIF_F_LLTX;
1403 }
1404 1407
1405 err = register_netdevice(dev); 1408 err = register_netdevice(dev);
1406 if (err) 1409 if (err)
1407 goto out; 1410 goto out;
1408 1411
1412 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1413
1414 if (tb[IFLA_MTU])
1415 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1416
1409 dev_hold(dev); 1417 dev_hold(dev);
1410 ip6gre_tunnel_link(ign, nt); 1418 ip6gre_tunnel_link(ign, nt);
1411 1419
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5110a418cc4d..3763dc01e374 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
166 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 166 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
167} 167}
168 168
169bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
170{
171 if (!np->autoflowlabel_set)
172 return ip6_default_np_autolabel(net);
173 else
174 return np->autoflowlabel;
175}
176
169/* 177/*
170 * xmit an sk_buff (used by TCP, SCTP and DCCP) 178 * xmit an sk_buff (used by TCP, SCTP and DCCP)
171 * Note : socket lock is not held for SYNACK packets, but might be modified 179 * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
230 hlimit = ip6_dst_hoplimit(dst); 238 hlimit = ip6_dst_hoplimit(dst);
231 239
232 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, 240 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
233 np->autoflowlabel, fl6)); 241 ip6_autoflowlabel(net, np), fl6));
234 242
235 hdr->payload_len = htons(seg_len); 243 hdr->payload_len = htons(seg_len);
236 hdr->nexthdr = proto; 244 hdr->nexthdr = proto;
@@ -1198,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1198 v6_cork->tclass = ipc6->tclass; 1206 v6_cork->tclass = ipc6->tclass;
1199 if (rt->dst.flags & DST_XFRM_TUNNEL) 1207 if (rt->dst.flags & DST_XFRM_TUNNEL)
1200 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1208 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1201 rt->dst.dev->mtu : dst_mtu(&rt->dst); 1209 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1202 else 1210 else
1203 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? 1211 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1204 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1212 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
1205 if (np->frag_size < mtu) { 1213 if (np->frag_size < mtu) {
1206 if (np->frag_size) 1214 if (np->frag_size)
1207 mtu = np->frag_size; 1215 mtu = np->frag_size;
1208 } 1216 }
1217 if (mtu < IPV6_MIN_MTU)
1218 return -EINVAL;
1209 cork->base.fragsize = mtu; 1219 cork->base.fragsize = mtu;
1210 if (dst_allfrag(rt->dst.path)) 1220 if (dst_allfrag(rt->dst.path))
1211 cork->base.flags |= IPCORK_ALLFRAG; 1221 cork->base.flags |= IPCORK_ALLFRAG;
@@ -1626,7 +1636,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
1626 1636
1627 ip6_flow_hdr(hdr, v6_cork->tclass, 1637 ip6_flow_hdr(hdr, v6_cork->tclass,
1628 ip6_make_flowlabel(net, skb, fl6->flowlabel, 1638 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1629 np->autoflowlabel, fl6)); 1639 ip6_autoflowlabel(net, np), fl6));
1630 hdr->hop_limit = v6_cork->hop_limit; 1640 hdr->hop_limit = v6_cork->hop_limit;
1631 hdr->nexthdr = proto; 1641 hdr->nexthdr = proto;
1632 hdr->saddr = fl6->saddr; 1642 hdr->saddr = fl6->saddr;
@@ -1725,11 +1735,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1725 cork.base.flags = 0; 1735 cork.base.flags = 0;
1726 cork.base.addr = 0; 1736 cork.base.addr = 0;
1727 cork.base.opt = NULL; 1737 cork.base.opt = NULL;
1738 cork.base.dst = NULL;
1728 v6_cork.opt = NULL; 1739 v6_cork.opt = NULL;
1729 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); 1740 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1730 if (err) 1741 if (err) {
1742 ip6_cork_release(&cork, &v6_cork);
1731 return ERR_PTR(err); 1743 return ERR_PTR(err);
1732 1744 }
1733 if (ipc6->dontfrag < 0) 1745 if (ipc6->dontfrag < 0)
1734 ipc6->dontfrag = inet6_sk(sk)->dontfrag; 1746 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1735 1747
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3d3092adf1d2..1ee5584c3555 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
642 if (rel_info > dst_mtu(skb_dst(skb2))) 642 if (rel_info > dst_mtu(skb_dst(skb2)))
643 goto out; 643 goto out;
644 644
645 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, 645 skb_dst_update_pmtu(skb2, rel_info);
646 rel_info);
647 } 646 }
648 647
649 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); 648 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -904,7 +903,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
904 if (t->parms.collect_md) { 903 if (t->parms.collect_md) {
905 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); 904 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
906 if (!tun_dst) 905 if (!tun_dst)
907 return 0; 906 goto drop;
908 } 907 }
909 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, 908 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
910 log_ecn_error); 909 log_ecn_error);
@@ -1074,10 +1073,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1073 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1075 neigh_release(neigh); 1074 neigh_release(neigh);
1076 } 1075 }
1077 } else if (!(t->parms.flags & 1076 } else if (t->parms.proto != 0 && !(t->parms.flags &
1078 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1077 (IP6_TNL_F_USE_ORIG_TCLASS |
1079 /* enable the cache only only if the routing decision does 1078 IP6_TNL_F_USE_ORIG_FWMARK))) {
1080 * not depend on the current inner header value 1079 /* enable the cache only if neither the outer protocol nor the
1080 * routing decision depends on the current inner header value
1081 */ 1081 */
1082 use_cache = true; 1082 use_cache = true;
1083 } 1083 }
@@ -1123,10 +1123,14 @@ route_lookup:
1123 max_headroom += 8; 1123 max_headroom += 8;
1124 mtu -= 8; 1124 mtu -= 8;
1125 } 1125 }
1126 if (mtu < IPV6_MIN_MTU) 1126 if (skb->protocol == htons(ETH_P_IPV6)) {
1127 mtu = IPV6_MIN_MTU; 1127 if (mtu < IPV6_MIN_MTU)
1128 if (skb_dst(skb) && !t->parms.collect_md) 1128 mtu = IPV6_MIN_MTU;
1129 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1129 } else if (mtu < 576) {
1130 mtu = 576;
1131 }
1132
1133 skb_dst_update_pmtu(skb, mtu);
1130 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1134 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1131 *pmtu = mtu; 1135 *pmtu = mtu;
1132 err = -EMSGSIZE; 1136 err = -EMSGSIZE;
@@ -1671,11 +1675,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1671{ 1675{
1672 struct ip6_tnl *tnl = netdev_priv(dev); 1676 struct ip6_tnl *tnl = netdev_priv(dev);
1673 1677
1674 if (tnl->parms.proto == IPPROTO_IPIP) { 1678 if (tnl->parms.proto == IPPROTO_IPV6) {
1675 if (new_mtu < ETH_MIN_MTU) 1679 if (new_mtu < IPV6_MIN_MTU)
1676 return -EINVAL; 1680 return -EINVAL;
1677 } else { 1681 } else {
1678 if (new_mtu < IPV6_MIN_MTU) 1682 if (new_mtu < ETH_MIN_MTU)
1679 return -EINVAL; 1683 return -EINVAL;
1680 } 1684 }
1681 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1685 if (new_mtu > 0xFFF8 - dev->hard_header_len)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index dbb74f3c57a7..8c184f84f353 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
483 483
484 mtu = dst_mtu(dst); 484 mtu = dst_mtu(dst);
485 if (!skb->ignore_df && skb->len > mtu) { 485 if (!skb->ignore_df && skb->len > mtu) {
486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 486 skb_dst_update_pmtu(skb, mtu);
487 487
488 if (skb->protocol == htons(ETH_P_IPV6)) { 488 if (skb->protocol == htons(ETH_P_IPV6)) {
489 if (mtu < IPV6_MIN_MTU) 489 if (mtu < IPV6_MIN_MTU)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index b9404feabd78..e8ffb5b5d84e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -886,6 +886,7 @@ pref_skip_coa:
886 break; 886 break;
887 case IPV6_AUTOFLOWLABEL: 887 case IPV6_AUTOFLOWLABEL:
888 np->autoflowlabel = valbool; 888 np->autoflowlabel = valbool;
889 np->autoflowlabel_set = 1;
889 retv = 0; 890 retv = 0;
890 break; 891 break;
891 case IPV6_RECVFRAGSIZE: 892 case IPV6_RECVFRAGSIZE:
@@ -1335,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1335 break; 1336 break;
1336 1337
1337 case IPV6_AUTOFLOWLABEL: 1338 case IPV6_AUTOFLOWLABEL:
1338 val = np->autoflowlabel; 1339 val = ip6_autoflowlabel(sock_net(sk), np);
1339 break; 1340 break;
1340 1341
1341 case IPV6_RECVFRAGSIZE: 1342 case IPV6_RECVFRAGSIZE:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fc6d7d143f2c..844642682b83 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1682} 1682}
1683 1683
1684static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1684static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1685 int type, struct mld2_grec **ppgr) 1685 int type, struct mld2_grec **ppgr, unsigned int mtu)
1686{ 1686{
1687 struct net_device *dev = pmc->idev->dev;
1688 struct mld2_report *pmr; 1687 struct mld2_report *pmr;
1689 struct mld2_grec *pgr; 1688 struct mld2_grec *pgr;
1690 1689
1691 if (!skb) 1690 if (!skb) {
1692 skb = mld_newpack(pmc->idev, dev->mtu); 1691 skb = mld_newpack(pmc->idev, mtu);
1693 if (!skb) 1692 if (!skb)
1694 return NULL; 1693 return NULL;
1694 }
1695 pgr = skb_put(skb, sizeof(struct mld2_grec)); 1695 pgr = skb_put(skb, sizeof(struct mld2_grec));
1696 pgr->grec_type = type; 1696 pgr->grec_type = type;
1697 pgr->grec_auxwords = 0; 1697 pgr->grec_auxwords = 0;
@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1714 struct mld2_grec *pgr = NULL; 1714 struct mld2_grec *pgr = NULL;
1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1716 int scount, stotal, first, isquery, truncate; 1716 int scount, stotal, first, isquery, truncate;
1717 unsigned int mtu;
1717 1718
1718 if (pmc->mca_flags & MAF_NOREPORT) 1719 if (pmc->mca_flags & MAF_NOREPORT)
1719 return skb; 1720 return skb;
1720 1721
1722 mtu = READ_ONCE(dev->mtu);
1723 if (mtu < IPV6_MIN_MTU)
1724 return skb;
1725
1721 isquery = type == MLD2_MODE_IS_INCLUDE || 1726 isquery = type == MLD2_MODE_IS_INCLUDE ||
1722 type == MLD2_MODE_IS_EXCLUDE; 1727 type == MLD2_MODE_IS_EXCLUDE;
1723 truncate = type == MLD2_MODE_IS_EXCLUDE || 1728 truncate = type == MLD2_MODE_IS_EXCLUDE ||
@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1738 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1743 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1739 if (skb) 1744 if (skb)
1740 mld_sendpack(skb); 1745 mld_sendpack(skb);
1741 skb = mld_newpack(idev, dev->mtu); 1746 skb = mld_newpack(idev, mtu);
1742 } 1747 }
1743 } 1748 }
1744 first = 1; 1749 first = 1;
@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1774 pgr->grec_nsrcs = htons(scount); 1779 pgr->grec_nsrcs = htons(scount);
1775 if (skb) 1780 if (skb)
1776 mld_sendpack(skb); 1781 mld_sendpack(skb);
1777 skb = mld_newpack(idev, dev->mtu); 1782 skb = mld_newpack(idev, mtu);
1778 first = 1; 1783 first = 1;
1779 scount = 0; 1784 scount = 0;
1780 } 1785 }
1781 if (first) { 1786 if (first) {
1782 skb = add_grhead(skb, pmc, type, &pgr); 1787 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1783 first = 0; 1788 first = 0;
1784 } 1789 }
1785 if (!skb) 1790 if (!skb)
@@ -1814,7 +1819,7 @@ empty_source:
1814 mld_sendpack(skb); 1819 mld_sendpack(skb);
1815 skb = NULL; /* add_grhead will get a new one */ 1820 skb = NULL; /* add_grhead will get a new one */
1816 } 1821 }
1817 skb = add_grhead(skb, pmc, type, &pgr); 1822 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1818 } 1823 }
1819 } 1824 }
1820 if (pgr) 1825 if (pgr)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index f06e25065a34..1d7ae9366335 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -458,7 +458,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
458 if (!xt_find_jump_offset(offsets, newpos, 458 if (!xt_find_jump_offset(offsets, newpos,
459 newinfo->number)) 459 newinfo->number))
460 return 0; 460 return 0;
461 e = entry0 + newpos;
462 } else { 461 } else {
463 /* ... this is a fallthru */ 462 /* ... this is a fallthru */
464 newpos = pos + e->next_offset; 463 newpos = pos + e->next_offset;
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 2b1a15846f9a..92c0047e7e33 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -33,13 +33,19 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
33 33
34 if (range->flags & NF_NAT_RANGE_MAP_IPS) 34 if (range->flags & NF_NAT_RANGE_MAP_IPS)
35 return -EINVAL; 35 return -EINVAL;
36 return 0; 36 return nf_ct_netns_get(par->net, par->family);
37}
38
39static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par)
40{
41 nf_ct_netns_put(par->net, par->family);
37} 42}
38 43
39static struct xt_target masquerade_tg6_reg __read_mostly = { 44static struct xt_target masquerade_tg6_reg __read_mostly = {
40 .name = "MASQUERADE", 45 .name = "MASQUERADE",
41 .family = NFPROTO_IPV6, 46 .family = NFPROTO_IPV6,
42 .checkentry = masquerade_tg6_checkentry, 47 .checkentry = masquerade_tg6_checkentry,
48 .destroy = masquerade_tg6_destroy,
43 .target = masquerade_tg6, 49 .target = masquerade_tg6,
44 .targetsize = sizeof(struct nf_nat_range), 50 .targetsize = sizeof(struct nf_nat_range),
45 .table = "nat", 51 .table = "nat",
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7a8d1500d374..0458b761f3c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2336 } 2336 }
2337 2337
2338 rt->dst.flags |= DST_HOST; 2338 rt->dst.flags |= DST_HOST;
2339 rt->dst.input = ip6_input;
2339 rt->dst.output = ip6_output; 2340 rt->dst.output = ip6_output;
2340 rt->rt6i_gateway = fl6->daddr; 2341 rt->rt6i_gateway = fl6->daddr;
2341 rt->rt6i_dst.addr = fl6->daddr; 2342 rt->rt6i_dst.addr = fl6->daddr;
@@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4297 if (!ipv6_addr_any(&fl6.saddr)) 4298 if (!ipv6_addr_any(&fl6.saddr))
4298 flags |= RT6_LOOKUP_F_HAS_SADDR; 4299 flags |= RT6_LOOKUP_F_HAS_SADDR;
4299 4300
4300 if (!fibmatch) 4301 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4301 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4302 else
4303 dst = ip6_route_lookup(net, &fl6, 0);
4304 4302
4305 rcu_read_unlock(); 4303 rcu_read_unlock();
4306 } else { 4304 } else {
4307 fl6.flowi6_oif = oif; 4305 fl6.flowi6_oif = oif;
4308 4306
4309 if (!fibmatch) 4307 dst = ip6_route_output(net, NULL, &fl6);
4310 dst = ip6_route_output(net, NULL, &fl6);
4311 else
4312 dst = ip6_route_lookup(net, &fl6, 0);
4313 } 4308 }
4314 4309
4315 4310
@@ -4326,6 +4321,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4326 goto errout; 4321 goto errout;
4327 } 4322 }
4328 4323
4324 if (fibmatch && rt->dst.from) {
4325 struct rt6_info *ort = container_of(rt->dst.from,
4326 struct rt6_info, dst);
4327
4328 dst_hold(&ort->dst);
4329 ip6_rt_put(rt);
4330 rt = ort;
4331 }
4332
4329 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 4333 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4330 if (!skb) { 4334 if (!skb) {
4331 ip6_rt_put(rt); 4335 ip6_rt_put(rt);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d60ddcb0bfe2..3873d3877135 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
934 df = 0; 934 df = 0;
935 } 935 }
936 936
937 if (tunnel->parms.iph.daddr && skb_dst(skb)) 937 if (tunnel->parms.iph.daddr)
938 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 938 skb_dst_update_pmtu(skb, mtu);
939 939
940 if (skb->len > mtu && !skb_is_gso(skb)) { 940 if (skb->len > mtu && !skb_is_gso(skb)) {
941 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 941 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -1098,6 +1098,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
1098 ipip6_tunnel_link(sitn, t); 1098 ipip6_tunnel_link(sitn, t);
1099 t->parms.iph.ttl = p->iph.ttl; 1099 t->parms.iph.ttl = p->iph.ttl;
1100 t->parms.iph.tos = p->iph.tos; 1100 t->parms.iph.tos = p->iph.tos;
1101 t->parms.iph.frag_off = p->iph.frag_off;
1101 if (t->parms.link != p->link || t->fwmark != fwmark) { 1102 if (t->parms.link != p->link || t->fwmark != fwmark) {
1102 t->parms.link = p->link; 1103 t->parms.link = p->link;
1103 t->fwmark = fwmark; 1104 t->fwmark = fwmark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6bb98c93edfe..7178476b3d2f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -994,7 +994,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
996 req->ts_recent, sk->sk_bound_dev_if, 996 req->ts_recent, sk->sk_bound_dev_if,
997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
998 0, 0); 998 0, 0);
999} 999}
1000 1000
@@ -1454,7 +1454,6 @@ process:
1454 struct sock *nsk; 1454 struct sock *nsk;
1455 1455
1456 sk = req->rsk_listener; 1456 sk = req->rsk_listener;
1457 tcp_v6_fill_cb(skb, hdr, th);
1458 if (tcp_v6_inbound_md5_hash(sk, skb)) { 1457 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1459 sk_drops_add(sk, skb); 1458 sk_drops_add(sk, skb);
1460 reqsk_put(req); 1459 reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
1467 sock_hold(sk); 1466 sock_hold(sk);
1468 refcounted = true; 1467 refcounted = true;
1469 nsk = NULL; 1468 nsk = NULL;
1470 if (!tcp_filter(sk, skb)) 1469 if (!tcp_filter(sk, skb)) {
1470 th = (const struct tcphdr *)skb->data;
1471 hdr = ipv6_hdr(skb);
1472 tcp_v6_fill_cb(skb, hdr, th);
1471 nsk = tcp_check_req(sk, skb, req, false); 1473 nsk = tcp_check_req(sk, skb, req, false);
1474 }
1472 if (!nsk) { 1475 if (!nsk) {
1473 reqsk_put(req); 1476 reqsk_put(req);
1474 goto discard_and_relse; 1477 goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
1492 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1495 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1493 goto discard_and_relse; 1496 goto discard_and_relse;
1494 1497
1495 tcp_v6_fill_cb(skb, hdr, th);
1496
1497 if (tcp_v6_inbound_md5_hash(sk, skb)) 1498 if (tcp_v6_inbound_md5_hash(sk, skb))
1498 goto discard_and_relse; 1499 goto discard_and_relse;
1499 1500
@@ -1501,6 +1502,7 @@ process:
1501 goto discard_and_relse; 1502 goto discard_and_relse;
1502 th = (const struct tcphdr *)skb->data; 1503 th = (const struct tcphdr *)skb->data;
1503 hdr = ipv6_hdr(skb); 1504 hdr = ipv6_hdr(skb);
1505 tcp_v6_fill_cb(skb, hdr, th);
1504 1506
1505 skb->dev = NULL; 1507 skb->dev = NULL;
1506 1508
@@ -1590,7 +1592,6 @@ do_time_wait:
1590 tcp_v6_timewait_ack(sk, skb); 1592 tcp_v6_timewait_ack(sk, skb);
1591 break; 1593 break;
1592 case TCP_TW_RST: 1594 case TCP_TW_RST:
1593 tcp_v6_restore_cb(skb);
1594 tcp_v6_send_reset(sk, skb); 1595 tcp_v6_send_reset(sk, skb);
1595 inet_twsk_deschedule_put(inet_twsk(sk)); 1596 inet_twsk_deschedule_put(inet_twsk(sk));
1596 goto discard_it; 1597 goto discard_it;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d883c9204c01..278e49cd67d4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
46{ 46{
47 struct tcphdr *th; 47 struct tcphdr *th;
48 48
49 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
50 return ERR_PTR(-EINVAL);
51
49 if (!pskb_may_pull(skb, sizeof(*th))) 52 if (!pskb_may_pull(skb, sizeof(*th)))
50 return ERR_PTR(-EINVAL); 53 return ERR_PTR(-EINVAL);
51 54
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a0f89ad76f9d..2a04dc9c781b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
42 const struct ipv6hdr *ipv6h; 42 const struct ipv6hdr *ipv6h;
43 struct udphdr *uh; 43 struct udphdr *uh;
44 44
45 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
46 goto out;
47
45 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 48 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
46 goto out; 49 goto out;
47 50
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index fe04e23af986..841f4a07438e 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
32} 32}
33EXPORT_SYMBOL(xfrm6_rcv_spi); 33EXPORT_SYMBOL(xfrm6_rcv_spi);
34 34
35static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
36 struct sk_buff *skb)
37{
38 if (xfrm_trans_queue(skb, ip6_rcv_finish))
39 __kfree_skb(skb);
40 return -1;
41}
42
35int xfrm6_transport_finish(struct sk_buff *skb, int async) 43int xfrm6_transport_finish(struct sk_buff *skb, int async)
36{ 44{
37 struct xfrm_offload *xo = xfrm_offload(skb); 45 struct xfrm_offload *xo = xfrm_offload(skb);
@@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
56 64
57 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 65 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
58 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 66 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
59 ip6_rcv_finish); 67 xfrm6_transport_finish2);
60 return -1; 68 return -1;
61} 69}
62 70
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 02556e356f87..dc93002ff9d1 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol;
95 96
96 err = 0; 97 err = 0;
97 98
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 0b750a22c4b9..4a8d407f8902 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 /* We must prevent loops or risk deadlock ! */ 1390 /* Only allow TCP sockets to be attached for now */
1391 if (csk->sk_family == PF_KCM) 1391 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1392 csk->sk_protocol != IPPROTO_TCP)
1393 return -EOPNOTSUPP;
1394
1395 /* Don't allow listeners or closed sockets */
1396 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
1392 return -EOPNOTSUPP; 1397 return -EOPNOTSUPP;
1393 1398
1394 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1405 return err; 1410 return err;
1406 } 1411 }
1407 1412
1408 sock_hold(csk);
1409
1410 write_lock_bh(&csk->sk_callback_lock); 1413 write_lock_bh(&csk->sk_callback_lock);
1414
1415 /* Check if sk_user_data is aready by KCM or someone else.
1416 * Must be done under lock to prevent race conditions.
1417 */
1418 if (csk->sk_user_data) {
1419 write_unlock_bh(&csk->sk_callback_lock);
1420 strp_done(&psock->strp);
1421 kmem_cache_free(kcm_psockp, psock);
1422 return -EALREADY;
1423 }
1424
1411 psock->save_data_ready = csk->sk_data_ready; 1425 psock->save_data_ready = csk->sk_data_ready;
1412 psock->save_write_space = csk->sk_write_space; 1426 psock->save_write_space = csk->sk_write_space;
1413 psock->save_state_change = csk->sk_state_change; 1427 psock->save_state_change = csk->sk_state_change;
@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1415 csk->sk_data_ready = psock_data_ready; 1429 csk->sk_data_ready = psock_data_ready;
1416 csk->sk_write_space = psock_write_space; 1430 csk->sk_write_space = psock_write_space;
1417 csk->sk_state_change = psock_state_change; 1431 csk->sk_state_change = psock_state_change;
1432
1418 write_unlock_bh(&csk->sk_callback_lock); 1433 write_unlock_bh(&csk->sk_callback_lock);
1419 1434
1435 sock_hold(csk);
1436
1420 /* Finished initialization, now add the psock to the MUX. */ 1437 /* Finished initialization, now add the psock to the MUX. */
1421 spin_lock_bh(&mux->lock); 1438 spin_lock_bh(&mux->lock);
1422 head = &mux->psocks; 1439 head = &mux->psocks;
@@ -1625,60 +1642,30 @@ static struct proto kcm_proto = {
1625}; 1642};
1626 1643
1627/* Clone a kcm socket. */ 1644/* Clone a kcm socket. */
1628static int kcm_clone(struct socket *osock, struct kcm_clone *info, 1645static struct file *kcm_clone(struct socket *osock)
1629 struct socket **newsockp)
1630{ 1646{
1631 struct socket *newsock; 1647 struct socket *newsock;
1632 struct sock *newsk; 1648 struct sock *newsk;
1633 struct file *newfile;
1634 int err, newfd;
1635 1649
1636 err = -ENFILE;
1637 newsock = sock_alloc(); 1650 newsock = sock_alloc();
1638 if (!newsock) 1651 if (!newsock)
1639 goto out; 1652 return ERR_PTR(-ENFILE);
1640 1653
1641 newsock->type = osock->type; 1654 newsock->type = osock->type;
1642 newsock->ops = osock->ops; 1655 newsock->ops = osock->ops;
1643 1656
1644 __module_get(newsock->ops->owner); 1657 __module_get(newsock->ops->owner);
1645 1658
1646 newfd = get_unused_fd_flags(0);
1647 if (unlikely(newfd < 0)) {
1648 err = newfd;
1649 goto out_fd_fail;
1650 }
1651
1652 newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1653 if (IS_ERR(newfile)) {
1654 err = PTR_ERR(newfile);
1655 goto out_sock_alloc_fail;
1656 }
1657
1658 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, 1659 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1659 &kcm_proto, true); 1660 &kcm_proto, true);
1660 if (!newsk) { 1661 if (!newsk) {
1661 err = -ENOMEM; 1662 sock_release(newsock);
1662 goto out_sk_alloc_fail; 1663 return ERR_PTR(-ENOMEM);
1663 } 1664 }
1664
1665 sock_init_data(newsock, newsk); 1665 sock_init_data(newsock, newsk);
1666 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); 1666 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1667 1667
1668 fd_install(newfd, newfile); 1668 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1669 *newsockp = newsock;
1670 info->fd = newfd;
1671
1672 return 0;
1673
1674out_sk_alloc_fail:
1675 fput(newfile);
1676out_sock_alloc_fail:
1677 put_unused_fd(newfd);
1678out_fd_fail:
1679 sock_release(newsock);
1680out:
1681 return err;
1682} 1669}
1683 1670
1684static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1671static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1695,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1708 } 1695 }
1709 case SIOCKCMCLONE: { 1696 case SIOCKCMCLONE: {
1710 struct kcm_clone info; 1697 struct kcm_clone info;
1711 struct socket *newsock = NULL; 1698 struct file *file;
1712
1713 err = kcm_clone(sock, &info, &newsock);
1714 if (!err) {
1715 if (copy_to_user((void __user *)arg, &info,
1716 sizeof(info))) {
1717 err = -EFAULT;
1718 sys_close(info.fd);
1719 }
1720 }
1721 1699
1700 info.fd = get_unused_fd_flags(0);
1701 if (unlikely(info.fd < 0))
1702 return info.fd;
1703
1704 file = kcm_clone(sock);
1705 if (IS_ERR(file)) {
1706 put_unused_fd(info.fd);
1707 return PTR_ERR(file);
1708 }
1709 if (copy_to_user((void __user *)arg, &info,
1710 sizeof(info))) {
1711 put_unused_fd(info.fd);
1712 fput(file);
1713 return -EFAULT;
1714 }
1715 fd_install(info.fd, file);
1716 err = 0;
1722 break; 1717 break;
1723 } 1718 }
1724 default: 1719 default:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3dffb892d52c..7e2e7188e7f4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
401#endif 401#endif
402 int len; 402 int len;
403 403
404 if (sp->sadb_address_len <
405 DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
406 sizeof(uint64_t)))
407 return -EINVAL;
408
404 switch (addr->sa_family) { 409 switch (addr->sa_family) {
405 case AF_INET: 410 case AF_INET:
406 len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); 411 len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
511 uint16_t ext_type; 516 uint16_t ext_type;
512 int ext_len; 517 int ext_len;
513 518
519 if (len < sizeof(*ehdr))
520 return -EINVAL;
521
514 ext_len = ehdr->sadb_ext_len; 522 ext_len = ehdr->sadb_ext_len;
515 ext_len *= sizeof(uint64_t); 523 ext_len *= sizeof(uint64_t);
516 ext_type = ehdr->sadb_ext_type; 524 ext_type = ehdr->sadb_ext_type;
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
2194 return PTR_ERR(out_skb); 2202 return PTR_ERR(out_skb);
2195 2203
2196 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2204 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2197 if (err < 0) 2205 if (err < 0) {
2206 kfree_skb(out_skb);
2198 return err; 2207 return err;
2208 }
2199 2209
2200 out_hdr = (struct sadb_msg *) out_skb->data; 2210 out_hdr = (struct sadb_msg *) out_skb->data;
2201 out_hdr->sadb_msg_version = PF_KEY_V2; 2211 out_hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 167f83b853e6..1621b6ab17ba 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -291,16 +291,15 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
291 int i; 291 int i;
292 292
293 mutex_lock(&sta->ampdu_mlme.mtx); 293 mutex_lock(&sta->ampdu_mlme.mtx);
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 294 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
296 WLAN_REASON_QSTA_LEAVE_QBSS, 296 WLAN_REASON_QSTA_LEAVE_QBSS,
297 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
298 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
299 }
300 mutex_unlock(&sta->ampdu_mlme.mtx);
301 299
302 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 300 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
303 ___ieee80211_stop_tx_ba_session(sta, i, reason); 301 ___ieee80211_stop_tx_ba_session(sta, i, reason);
302 mutex_unlock(&sta->ampdu_mlme.mtx);
304 303
305 /* stopping might queue the work again - so cancel only afterwards */ 304 /* stopping might queue the work again - so cancel only afterwards */
306 cancel_work_sync(&sta->ampdu_mlme.work); 305 cancel_work_sync(&sta->ampdu_mlme.work);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 70e9d2ca8bbe..4daafb07602f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3632 } 3632 }
3633 return true; 3633 return true;
3634 case NL80211_IFTYPE_MESH_POINT: 3634 case NL80211_IFTYPE_MESH_POINT:
3635 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3636 return false;
3635 if (multicast) 3637 if (multicast)
3636 return true; 3638 return true;
3637 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3639 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index cf1bf2605c10..dc6347342e34 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -103,7 +103,6 @@ struct bitstr {
103#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} 103#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} 104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} 105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
106#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
107static unsigned int get_len(struct bitstr *bs); 106static unsigned int get_len(struct bitstr *bs);
108static unsigned int get_bit(struct bitstr *bs); 107static unsigned int get_bit(struct bitstr *bs);
109static unsigned int get_bits(struct bitstr *bs, unsigned int b); 108static unsigned int get_bits(struct bitstr *bs, unsigned int b);
@@ -165,6 +164,19 @@ static unsigned int get_len(struct bitstr *bs)
165 return v; 164 return v;
166} 165}
167 166
167static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
168{
169 bits += bs->bit;
170 bytes += bits / BITS_PER_BYTE;
171 if (bits % BITS_PER_BYTE > 0)
172 bytes++;
173
174 if (*bs->cur + bytes > *bs->end)
175 return 1;
176
177 return 0;
178}
179
168/****************************************************************************/ 180/****************************************************************************/
169static unsigned int get_bit(struct bitstr *bs) 181static unsigned int get_bit(struct bitstr *bs)
170{ 182{
@@ -279,8 +291,8 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
279 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 291 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
280 292
281 INC_BIT(bs); 293 INC_BIT(bs);
282 294 if (nf_h323_error_boundary(bs, 0, 0))
283 CHECK_BOUND(bs, 0); 295 return H323_ERROR_BOUND;
284 return H323_ERROR_NONE; 296 return H323_ERROR_NONE;
285} 297}
286 298
@@ -293,11 +305,14 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
293 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 305 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
294 306
295 BYTE_ALIGN(bs); 307 BYTE_ALIGN(bs);
296 CHECK_BOUND(bs, 1); 308 if (nf_h323_error_boundary(bs, 1, 0))
309 return H323_ERROR_BOUND;
310
297 len = *bs->cur++; 311 len = *bs->cur++;
298 bs->cur += len; 312 bs->cur += len;
313 if (nf_h323_error_boundary(bs, 0, 0))
314 return H323_ERROR_BOUND;
299 315
300 CHECK_BOUND(bs, 0);
301 return H323_ERROR_NONE; 316 return H323_ERROR_NONE;
302} 317}
303 318
@@ -319,6 +334,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
319 bs->cur += 2; 334 bs->cur += 2;
320 break; 335 break;
321 case CONS: /* 64K < Range < 4G */ 336 case CONS: /* 64K < Range < 4G */
337 if (nf_h323_error_boundary(bs, 0, 2))
338 return H323_ERROR_BOUND;
322 len = get_bits(bs, 2) + 1; 339 len = get_bits(bs, 2) + 1;
323 BYTE_ALIGN(bs); 340 BYTE_ALIGN(bs);
324 if (base && (f->attr & DECODE)) { /* timeToLive */ 341 if (base && (f->attr & DECODE)) { /* timeToLive */
@@ -330,7 +347,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
330 break; 347 break;
331 case UNCO: 348 case UNCO:
332 BYTE_ALIGN(bs); 349 BYTE_ALIGN(bs);
333 CHECK_BOUND(bs, 2); 350 if (nf_h323_error_boundary(bs, 2, 0))
351 return H323_ERROR_BOUND;
334 len = get_len(bs); 352 len = get_len(bs);
335 bs->cur += len; 353 bs->cur += len;
336 break; 354 break;
@@ -341,7 +359,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
341 359
342 PRINT("\n"); 360 PRINT("\n");
343 361
344 CHECK_BOUND(bs, 0); 362 if (nf_h323_error_boundary(bs, 0, 0))
363 return H323_ERROR_BOUND;
345 return H323_ERROR_NONE; 364 return H323_ERROR_NONE;
346} 365}
347 366
@@ -357,7 +376,8 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
357 INC_BITS(bs, f->sz); 376 INC_BITS(bs, f->sz);
358 } 377 }
359 378
360 CHECK_BOUND(bs, 0); 379 if (nf_h323_error_boundary(bs, 0, 0))
380 return H323_ERROR_BOUND;
361 return H323_ERROR_NONE; 381 return H323_ERROR_NONE;
362} 382}
363 383
@@ -375,12 +395,14 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
375 len = f->lb; 395 len = f->lb;
376 break; 396 break;
377 case WORD: /* 2-byte length */ 397 case WORD: /* 2-byte length */
378 CHECK_BOUND(bs, 2); 398 if (nf_h323_error_boundary(bs, 2, 0))
399 return H323_ERROR_BOUND;
379 len = (*bs->cur++) << 8; 400 len = (*bs->cur++) << 8;
380 len += (*bs->cur++) + f->lb; 401 len += (*bs->cur++) + f->lb;
381 break; 402 break;
382 case SEMI: 403 case SEMI:
383 CHECK_BOUND(bs, 2); 404 if (nf_h323_error_boundary(bs, 2, 0))
405 return H323_ERROR_BOUND;
384 len = get_len(bs); 406 len = get_len(bs);
385 break; 407 break;
386 default: 408 default:
@@ -391,7 +413,8 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
391 bs->cur += len >> 3; 413 bs->cur += len >> 3;
392 bs->bit = len & 7; 414 bs->bit = len & 7;
393 415
394 CHECK_BOUND(bs, 0); 416 if (nf_h323_error_boundary(bs, 0, 0))
417 return H323_ERROR_BOUND;
395 return H323_ERROR_NONE; 418 return H323_ERROR_NONE;
396} 419}
397 420
@@ -404,12 +427,15 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
404 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 427 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
405 428
406 /* 2 <= Range <= 255 */ 429 /* 2 <= Range <= 255 */
430 if (nf_h323_error_boundary(bs, 0, f->sz))
431 return H323_ERROR_BOUND;
407 len = get_bits(bs, f->sz) + f->lb; 432 len = get_bits(bs, f->sz) + f->lb;
408 433
409 BYTE_ALIGN(bs); 434 BYTE_ALIGN(bs);
410 INC_BITS(bs, (len << 2)); 435 INC_BITS(bs, (len << 2));
411 436
412 CHECK_BOUND(bs, 0); 437 if (nf_h323_error_boundary(bs, 0, 0))
438 return H323_ERROR_BOUND;
413 return H323_ERROR_NONE; 439 return H323_ERROR_NONE;
414} 440}
415 441
@@ -440,15 +466,19 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
440 break; 466 break;
441 case BYTE: /* Range == 256 */ 467 case BYTE: /* Range == 256 */
442 BYTE_ALIGN(bs); 468 BYTE_ALIGN(bs);
443 CHECK_BOUND(bs, 1); 469 if (nf_h323_error_boundary(bs, 1, 0))
470 return H323_ERROR_BOUND;
444 len = (*bs->cur++) + f->lb; 471 len = (*bs->cur++) + f->lb;
445 break; 472 break;
446 case SEMI: 473 case SEMI:
447 BYTE_ALIGN(bs); 474 BYTE_ALIGN(bs);
448 CHECK_BOUND(bs, 2); 475 if (nf_h323_error_boundary(bs, 2, 0))
476 return H323_ERROR_BOUND;
449 len = get_len(bs) + f->lb; 477 len = get_len(bs) + f->lb;
450 break; 478 break;
451 default: /* 2 <= Range <= 255 */ 479 default: /* 2 <= Range <= 255 */
480 if (nf_h323_error_boundary(bs, 0, f->sz))
481 return H323_ERROR_BOUND;
452 len = get_bits(bs, f->sz) + f->lb; 482 len = get_bits(bs, f->sz) + f->lb;
453 BYTE_ALIGN(bs); 483 BYTE_ALIGN(bs);
454 break; 484 break;
@@ -458,7 +488,8 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
458 488
459 PRINT("\n"); 489 PRINT("\n");
460 490
461 CHECK_BOUND(bs, 0); 491 if (nf_h323_error_boundary(bs, 0, 0))
492 return H323_ERROR_BOUND;
462 return H323_ERROR_NONE; 493 return H323_ERROR_NONE;
463} 494}
464 495
@@ -473,10 +504,13 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
473 switch (f->sz) { 504 switch (f->sz) {
474 case BYTE: /* Range == 256 */ 505 case BYTE: /* Range == 256 */
475 BYTE_ALIGN(bs); 506 BYTE_ALIGN(bs);
476 CHECK_BOUND(bs, 1); 507 if (nf_h323_error_boundary(bs, 1, 0))
508 return H323_ERROR_BOUND;
477 len = (*bs->cur++) + f->lb; 509 len = (*bs->cur++) + f->lb;
478 break; 510 break;
479 default: /* 2 <= Range <= 255 */ 511 default: /* 2 <= Range <= 255 */
512 if (nf_h323_error_boundary(bs, 0, f->sz))
513 return H323_ERROR_BOUND;
480 len = get_bits(bs, f->sz) + f->lb; 514 len = get_bits(bs, f->sz) + f->lb;
481 BYTE_ALIGN(bs); 515 BYTE_ALIGN(bs);
482 break; 516 break;
@@ -484,7 +518,8 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
484 518
485 bs->cur += len << 1; 519 bs->cur += len << 1;
486 520
487 CHECK_BOUND(bs, 0); 521 if (nf_h323_error_boundary(bs, 0, 0))
522 return H323_ERROR_BOUND;
488 return H323_ERROR_NONE; 523 return H323_ERROR_NONE;
489} 524}
490 525
@@ -503,9 +538,13 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
503 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 538 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
504 539
505 /* Extensible? */ 540 /* Extensible? */
541 if (nf_h323_error_boundary(bs, 0, 1))
542 return H323_ERROR_BOUND;
506 ext = (f->attr & EXT) ? get_bit(bs) : 0; 543 ext = (f->attr & EXT) ? get_bit(bs) : 0;
507 544
508 /* Get fields bitmap */ 545 /* Get fields bitmap */
546 if (nf_h323_error_boundary(bs, 0, f->sz))
547 return H323_ERROR_BOUND;
509 bmp = get_bitmap(bs, f->sz); 548 bmp = get_bitmap(bs, f->sz);
510 if (base) 549 if (base)
511 *(unsigned int *)base = bmp; 550 *(unsigned int *)base = bmp;
@@ -525,9 +564,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
525 564
526 /* Decode */ 565 /* Decode */
527 if (son->attr & OPEN) { /* Open field */ 566 if (son->attr & OPEN) { /* Open field */
528 CHECK_BOUND(bs, 2); 567 if (nf_h323_error_boundary(bs, 2, 0))
568 return H323_ERROR_BOUND;
529 len = get_len(bs); 569 len = get_len(bs);
530 CHECK_BOUND(bs, len); 570 if (nf_h323_error_boundary(bs, len, 0))
571 return H323_ERROR_BOUND;
531 if (!base || !(son->attr & DECODE)) { 572 if (!base || !(son->attr & DECODE)) {
532 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 573 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
533 " ", son->name); 574 " ", son->name);
@@ -555,8 +596,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
555 return H323_ERROR_NONE; 596 return H323_ERROR_NONE;
556 597
557 /* Get the extension bitmap */ 598 /* Get the extension bitmap */
599 if (nf_h323_error_boundary(bs, 0, 7))
600 return H323_ERROR_BOUND;
558 bmp2_len = get_bits(bs, 7) + 1; 601 bmp2_len = get_bits(bs, 7) + 1;
559 CHECK_BOUND(bs, (bmp2_len + 7) >> 3); 602 if (nf_h323_error_boundary(bs, 0, bmp2_len))
603 return H323_ERROR_BOUND;
560 bmp2 = get_bitmap(bs, bmp2_len); 604 bmp2 = get_bitmap(bs, bmp2_len);
561 bmp |= bmp2 >> f->sz; 605 bmp |= bmp2 >> f->sz;
562 if (base) 606 if (base)
@@ -567,9 +611,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
567 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 611 for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
568 /* Check Range */ 612 /* Check Range */
569 if (i >= f->ub) { /* Newer Version? */ 613 if (i >= f->ub) { /* Newer Version? */
570 CHECK_BOUND(bs, 2); 614 if (nf_h323_error_boundary(bs, 2, 0))
615 return H323_ERROR_BOUND;
571 len = get_len(bs); 616 len = get_len(bs);
572 CHECK_BOUND(bs, len); 617 if (nf_h323_error_boundary(bs, len, 0))
618 return H323_ERROR_BOUND;
573 bs->cur += len; 619 bs->cur += len;
574 continue; 620 continue;
575 } 621 }
@@ -583,9 +629,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
583 if (!((0x80000000 >> opt) & bmp2)) /* Not present */ 629 if (!((0x80000000 >> opt) & bmp2)) /* Not present */
584 continue; 630 continue;
585 631
586 CHECK_BOUND(bs, 2); 632 if (nf_h323_error_boundary(bs, 2, 0))
633 return H323_ERROR_BOUND;
587 len = get_len(bs); 634 len = get_len(bs);
588 CHECK_BOUND(bs, len); 635 if (nf_h323_error_boundary(bs, len, 0))
636 return H323_ERROR_BOUND;
589 if (!base || !(son->attr & DECODE)) { 637 if (!base || !(son->attr & DECODE)) {
590 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 638 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
591 son->name); 639 son->name);
@@ -623,22 +671,27 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
623 switch (f->sz) { 671 switch (f->sz) {
624 case BYTE: 672 case BYTE:
625 BYTE_ALIGN(bs); 673 BYTE_ALIGN(bs);
626 CHECK_BOUND(bs, 1); 674 if (nf_h323_error_boundary(bs, 1, 0))
675 return H323_ERROR_BOUND;
627 count = *bs->cur++; 676 count = *bs->cur++;
628 break; 677 break;
629 case WORD: 678 case WORD:
630 BYTE_ALIGN(bs); 679 BYTE_ALIGN(bs);
631 CHECK_BOUND(bs, 2); 680 if (nf_h323_error_boundary(bs, 2, 0))
681 return H323_ERROR_BOUND;
632 count = *bs->cur++; 682 count = *bs->cur++;
633 count <<= 8; 683 count <<= 8;
634 count += *bs->cur++; 684 count += *bs->cur++;
635 break; 685 break;
636 case SEMI: 686 case SEMI:
637 BYTE_ALIGN(bs); 687 BYTE_ALIGN(bs);
638 CHECK_BOUND(bs, 2); 688 if (nf_h323_error_boundary(bs, 2, 0))
689 return H323_ERROR_BOUND;
639 count = get_len(bs); 690 count = get_len(bs);
640 break; 691 break;
641 default: 692 default:
693 if (nf_h323_error_boundary(bs, 0, f->sz))
694 return H323_ERROR_BOUND;
642 count = get_bits(bs, f->sz); 695 count = get_bits(bs, f->sz);
643 break; 696 break;
644 } 697 }
@@ -658,8 +711,11 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
658 for (i = 0; i < count; i++) { 711 for (i = 0; i < count; i++) {
659 if (son->attr & OPEN) { 712 if (son->attr & OPEN) {
660 BYTE_ALIGN(bs); 713 BYTE_ALIGN(bs);
714 if (nf_h323_error_boundary(bs, 2, 0))
715 return H323_ERROR_BOUND;
661 len = get_len(bs); 716 len = get_len(bs);
662 CHECK_BOUND(bs, len); 717 if (nf_h323_error_boundary(bs, len, 0))
718 return H323_ERROR_BOUND;
663 if (!base || !(son->attr & DECODE)) { 719 if (!base || !(son->attr & DECODE)) {
664 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 720 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
665 " ", son->name); 721 " ", son->name);
@@ -710,11 +766,17 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
710 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 766 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
711 767
712 /* Decode the choice index number */ 768 /* Decode the choice index number */
769 if (nf_h323_error_boundary(bs, 0, 1))
770 return H323_ERROR_BOUND;
713 if ((f->attr & EXT) && get_bit(bs)) { 771 if ((f->attr & EXT) && get_bit(bs)) {
714 ext = 1; 772 ext = 1;
773 if (nf_h323_error_boundary(bs, 0, 7))
774 return H323_ERROR_BOUND;
715 type = get_bits(bs, 7) + f->lb; 775 type = get_bits(bs, 7) + f->lb;
716 } else { 776 } else {
717 ext = 0; 777 ext = 0;
778 if (nf_h323_error_boundary(bs, 0, f->sz))
779 return H323_ERROR_BOUND;
718 type = get_bits(bs, f->sz); 780 type = get_bits(bs, f->sz);
719 if (type >= f->lb) 781 if (type >= f->lb)
720 return H323_ERROR_RANGE; 782 return H323_ERROR_RANGE;
@@ -727,8 +789,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
727 /* Check Range */ 789 /* Check Range */
728 if (type >= f->ub) { /* Newer version? */ 790 if (type >= f->ub) { /* Newer version? */
729 BYTE_ALIGN(bs); 791 BYTE_ALIGN(bs);
792 if (nf_h323_error_boundary(bs, 2, 0))
793 return H323_ERROR_BOUND;
730 len = get_len(bs); 794 len = get_len(bs);
731 CHECK_BOUND(bs, len); 795 if (nf_h323_error_boundary(bs, len, 0))
796 return H323_ERROR_BOUND;
732 bs->cur += len; 797 bs->cur += len;
733 return H323_ERROR_NONE; 798 return H323_ERROR_NONE;
734 } 799 }
@@ -742,8 +807,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
742 807
743 if (ext || (son->attr & OPEN)) { 808 if (ext || (son->attr & OPEN)) {
744 BYTE_ALIGN(bs); 809 BYTE_ALIGN(bs);
810 if (nf_h323_error_boundary(bs, len, 0))
811 return H323_ERROR_BOUND;
745 len = get_len(bs); 812 len = get_len(bs);
746 CHECK_BOUND(bs, len); 813 if (nf_h323_error_boundary(bs, len, 0))
814 return H323_ERROR_BOUND;
747 if (!base || !(son->attr & DECODE)) { 815 if (!base || !(son->attr & DECODE)) {
748 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 816 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
749 son->name); 817 son->name);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 59c08997bfdf..382d49792f42 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,7 +45,6 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h> 48#include <net/netfilter/nf_conntrack_synproxy.h>
50#ifdef CONFIG_NF_NAT_NEEDED 49#ifdef CONFIG_NF_NAT_NEEDED
51#include <net/netfilter/nf_nat_core.h> 50#include <net/netfilter/nf_nat_core.h>
@@ -1566,9 +1565,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1566static int ctnetlink_change_timeout(struct nf_conn *ct, 1565static int ctnetlink_change_timeout(struct nf_conn *ct,
1567 const struct nlattr * const cda[]) 1566 const struct nlattr * const cda[])
1568{ 1567{
1569 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1568 u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1570 1569
1571 ct->timeout = nfct_time_stamp + timeout * HZ; 1570 if (timeout > INT_MAX)
1571 timeout = INT_MAX;
1572 ct->timeout = nfct_time_stamp + (u32)timeout;
1572 1573
1573 if (test_bit(IPS_DYING_BIT, &ct->status)) 1574 if (test_bit(IPS_DYING_BIT, &ct->status))
1574 return -ETIME; 1575 return -ETIME;
@@ -1768,6 +1769,7 @@ ctnetlink_create_conntrack(struct net *net,
1768 int err = -EINVAL; 1769 int err = -EINVAL;
1769 struct nf_conntrack_helper *helper; 1770 struct nf_conntrack_helper *helper;
1770 struct nf_conn_tstamp *tstamp; 1771 struct nf_conn_tstamp *tstamp;
1772 u64 timeout;
1771 1773
1772 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 1774 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1773 if (IS_ERR(ct)) 1775 if (IS_ERR(ct))
@@ -1776,7 +1778,10 @@ ctnetlink_create_conntrack(struct net *net,
1776 if (!cda[CTA_TIMEOUT]) 1778 if (!cda[CTA_TIMEOUT])
1777 goto err1; 1779 goto err1;
1778 1780
1779 ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 1781 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1782 if (timeout > INT_MAX)
1783 timeout = INT_MAX;
1784 ct->timeout = (u32)timeout + nfct_time_stamp;
1780 1785
1781 rcu_read_lock(); 1786 rcu_read_lock();
1782 if (cda[CTA_HELP]) { 1787 if (cda[CTA_HELP]) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b12fc07111d0..37ef35b861f2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1039,6 +1039,9 @@ static int tcp_packet(struct nf_conn *ct,
1039 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1039 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1040 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) 1040 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1041 timeout = timeouts[TCP_CONNTRACK_UNACK]; 1041 timeout = timeouts[TCP_CONNTRACK_UNACK];
1042 else if (ct->proto.tcp.last_win == 0 &&
1043 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1044 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1042 else 1045 else
1043 timeout = timeouts[new_state]; 1046 timeout = timeouts[new_state];
1044 spin_unlock_bh(&ct->lock); 1047 spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d8327b43e4dc..07bd4138c84e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2072 continue; 2072 continue;
2073 2073
2074 list_for_each_entry_rcu(chain, &table->chains, list) { 2074 list_for_each_entry_rcu(chain, &table->chains, list) {
2075 if (ctx && ctx->chain[0] && 2075 if (ctx && ctx->chain &&
2076 strcmp(ctx->chain, chain->name) != 0) 2076 strcmp(ctx->chain, chain->name) != 0)
2077 continue; 2077 continue;
2078 2078
@@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
4665{ 4665{
4666 struct nft_obj_filter *filter = cb->data; 4666 struct nft_obj_filter *filter = cb->data;
4667 4667
4668 kfree(filter->table); 4668 if (filter) {
4669 kfree(filter); 4669 kfree(filter->table);
4670 kfree(filter);
4671 }
4670 4672
4671 return 0; 4673 return 0;
4672} 4674}
@@ -5847,6 +5849,12 @@ static int __net_init nf_tables_init_net(struct net *net)
5847 return 0; 5849 return 0;
5848} 5850}
5849 5851
5852static void __net_exit nf_tables_exit_net(struct net *net)
5853{
5854 WARN_ON_ONCE(!list_empty(&net->nft.af_info));
5855 WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5856}
5857
5850int __nft_release_basechain(struct nft_ctx *ctx) 5858int __nft_release_basechain(struct nft_ctx *ctx)
5851{ 5859{
5852 struct nft_rule *rule, *nr; 5860 struct nft_rule *rule, *nr;
@@ -5917,6 +5925,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
5917 5925
5918static struct pernet_operations nf_tables_net_ops = { 5926static struct pernet_operations nf_tables_net_ops = {
5919 .init = nf_tables_init_net, 5927 .init = nf_tables_init_net,
5928 .exit = nf_tables_exit_net,
5920}; 5929};
5921 5930
5922static int __init nf_tables_module_init(void) 5931static int __init nf_tables_module_init(void)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 41628b393673..d33ce6d5ebce 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/capability.h>
20#include <net/netlink.h> 21#include <net/netlink.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
@@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
407 struct nfnl_cthelper *nlcth; 408 struct nfnl_cthelper *nlcth;
408 int ret = 0; 409 int ret = 0;
409 410
411 if (!capable(CAP_NET_ADMIN))
412 return -EPERM;
413
410 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 414 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
411 return -EINVAL; 415 return -EINVAL;
412 416
@@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
611 struct nfnl_cthelper *nlcth; 615 struct nfnl_cthelper *nlcth;
612 bool tuple_set = false; 616 bool tuple_set = false;
613 617
618 if (!capable(CAP_NET_ADMIN))
619 return -EPERM;
620
614 if (nlh->nlmsg_flags & NLM_F_DUMP) { 621 if (nlh->nlmsg_flags & NLM_F_DUMP) {
615 struct netlink_dump_control c = { 622 struct netlink_dump_control c = {
616 .dump = nfnl_cthelper_dump_table, 623 .dump = nfnl_cthelper_dump_table,
@@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
678 struct nfnl_cthelper *nlcth, *n; 685 struct nfnl_cthelper *nlcth, *n;
679 int j = 0, ret; 686 int j = 0, ret;
680 687
688 if (!capable(CAP_NET_ADMIN))
689 return -EPERM;
690
681 if (tb[NFCTH_NAME]) 691 if (tb[NFCTH_NAME])
682 helper_name = nla_data(tb[NFCTH_NAME]); 692 helper_name = nla_data(tb[NFCTH_NAME]);
683 693
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e5afab86381c..e955bec0acc6 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1093,10 +1093,15 @@ static int __net_init nfnl_log_net_init(struct net *net)
1093 1093
1094static void __net_exit nfnl_log_net_exit(struct net *net) 1094static void __net_exit nfnl_log_net_exit(struct net *net)
1095{ 1095{
1096 struct nfnl_log_net *log = nfnl_log_pernet(net);
1097 unsigned int i;
1098
1096#ifdef CONFIG_PROC_FS 1099#ifdef CONFIG_PROC_FS
1097 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1100 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1098#endif 1101#endif
1099 nf_log_unset(net, &nfulnl_logger); 1102 nf_log_unset(net, &nfulnl_logger);
1103 for (i = 0; i < INSTANCE_BUCKETS; i++)
1104 WARN_ON_ONCE(!hlist_empty(&log->instance_table[i]));
1100} 1105}
1101 1106
1102static struct pernet_operations nfnl_log_net_ops = { 1107static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a16356cacec3..c09b36755ed7 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1512,10 +1512,15 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1512 1512
1513static void __net_exit nfnl_queue_net_exit(struct net *net) 1513static void __net_exit nfnl_queue_net_exit(struct net *net)
1514{ 1514{
1515 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1516 unsigned int i;
1517
1515 nf_unregister_queue_handler(net); 1518 nf_unregister_queue_handler(net);
1516#ifdef CONFIG_PROC_FS 1519#ifdef CONFIG_PROC_FS
1517 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1520 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1518#endif 1521#endif
1522 for (i = 0; i < INSTANCE_BUCKETS; i++)
1523 WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
1519} 1524}
1520 1525
1521static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) 1526static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a0a93d987a3b..47ec1046ad11 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -214,6 +214,8 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
214 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, 214 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
215 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, 215 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
216 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, 216 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
217 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
218 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
217}; 219};
218 220
219static int nft_exthdr_init(const struct nft_ctx *ctx, 221static int nft_exthdr_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a77dd514297c..55802e97f906 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1729,8 +1729,17 @@ static int __net_init xt_net_init(struct net *net)
1729 return 0; 1729 return 0;
1730} 1730}
1731 1731
1732static void __net_exit xt_net_exit(struct net *net)
1733{
1734 int i;
1735
1736 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1737 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1738}
1739
1732static struct pernet_operations xt_net_ops = { 1740static struct pernet_operations xt_net_ops = {
1733 .init = xt_net_init, 1741 .init = xt_net_init,
1742 .exit = xt_net_exit,
1734}; 1743};
1735 1744
1736static int __init xt_init(void) 1745static int __init xt_init(void)
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 041da0d9c06f..06b090d8e901 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
27{ 27{
28 struct sock_fprog_kern program; 28 struct sock_fprog_kern program;
29 29
30 if (len > XT_BPF_MAX_NUM_INSTR)
31 return -EINVAL;
32
30 program.len = len; 33 program.len = len;
31 program.filter = insns; 34 program.filter = insns;
32 35
@@ -52,18 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
52 55
53static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) 56static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
54{ 57{
55 mm_segment_t oldfs = get_fs(); 58 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
56 int retval, fd; 59 return -EINVAL;
57 60
58 set_fs(KERNEL_DS); 61 *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
59 fd = bpf_obj_get_user(path, 0); 62 return PTR_ERR_OR_ZERO(*ret);
60 set_fs(oldfs);
61 if (fd < 0)
62 return fd;
63
64 retval = __bpf_mt_check_fd(fd, ret);
65 sys_close(fd);
66 return retval;
67} 63}
68 64
69static int bpf_mt_check(const struct xt_mtchk_param *par) 65static int bpf_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 36e14b1f061d..a34f314a8c23 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21 21
22#include <linux/capability.h>
22#include <linux/if.h> 23#include <linux/if.h>
23#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
24#include <linux/ip.h> 25#include <linux/ip.h>
@@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
70 struct xt_osf_finger *kf = NULL, *sf; 71 struct xt_osf_finger *kf = NULL, *sf;
71 int err = 0; 72 int err = 0;
72 73
74 if (!capable(CAP_NET_ADMIN))
75 return -EPERM;
76
73 if (!osf_attrs[OSF_ATTR_FINGER]) 77 if (!osf_attrs[OSF_ATTR_FINGER])
74 return -EINVAL; 78 return -EINVAL;
75 79
@@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
115 struct xt_osf_finger *sf; 119 struct xt_osf_finger *sf;
116 int err = -ENOENT; 120 int err = -ENOENT;
117 121
122 if (!capable(CAP_NET_ADMIN))
123 return -EPERM;
124
118 if (!osf_attrs[OSF_ATTR_FINGER]) 125 if (!osf_attrs[OSF_ATTR_FINGER])
119 return -EINVAL; 126 return -EINVAL;
120 127
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b9e0ee4e22f5..84a4e4c3be4b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -253,6 +253,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
253 struct sock *sk = skb->sk; 253 struct sock *sk = skb->sk;
254 int ret = -ENOMEM; 254 int ret = -ENOMEM;
255 255
256 if (!net_eq(dev_net(dev), sock_net(sk)))
257 return 0;
258
256 dev_hold(dev); 259 dev_hold(dev);
257 260
258 if (is_vmalloc_addr(skb->head)) 261 if (is_vmalloc_addr(skb->head))
@@ -2381,13 +2384,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2381 struct nlmsghdr *, 2384 struct nlmsghdr *,
2382 struct netlink_ext_ack *)) 2385 struct netlink_ext_ack *))
2383{ 2386{
2384 struct netlink_ext_ack extack = {}; 2387 struct netlink_ext_ack extack;
2385 struct nlmsghdr *nlh; 2388 struct nlmsghdr *nlh;
2386 int err; 2389 int err;
2387 2390
2388 while (skb->len >= nlmsg_total_size(0)) { 2391 while (skb->len >= nlmsg_total_size(0)) {
2389 int msglen; 2392 int msglen;
2390 2393
2394 memset(&extack, 0, sizeof(extack));
2391 nlh = nlmsg_hdr(skb); 2395 nlh = nlmsg_hdr(skb);
2392 err = 0; 2396 err = 0;
2393 2397
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dbe2379329c5..f039064ce922 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
579 return -EINVAL; 579 return -EINVAL;
580 580
581 skb_reset_network_header(skb); 581 skb_reset_network_header(skb);
582 key->eth.type = skb->protocol;
582 } else { 583 } else {
583 eth = eth_hdr(skb); 584 eth = eth_hdr(skb);
584 ether_addr_copy(key->eth.src, eth->h_source); 585 ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
592 if (unlikely(parse_vlan(skb, key))) 593 if (unlikely(parse_vlan(skb, key)))
593 return -ENOMEM; 594 return -ENOMEM;
594 595
595 skb->protocol = parse_ethertype(skb); 596 key->eth.type = parse_ethertype(skb);
596 if (unlikely(skb->protocol == htons(0))) 597 if (unlikely(key->eth.type == htons(0)))
597 return -ENOMEM; 598 return -ENOMEM;
598 599
600 /* Multiple tagged packets need to retain TPID to satisfy
601 * skb_vlan_pop(), which will later shift the ethertype into
602 * skb->protocol.
603 */
604 if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
605 skb->protocol = key->eth.cvlan.tpid;
606 else
607 skb->protocol = key->eth.type;
608
599 skb_reset_network_header(skb); 609 skb_reset_network_header(skb);
600 __skb_push(skb, skb->data - skb_mac_header(skb)); 610 __skb_push(skb, skb->data - skb_mac_header(skb));
601 } 611 }
602 skb_reset_mac_len(skb); 612 skb_reset_mac_len(skb);
603 key->eth.type = skb->protocol;
604 613
605 /* Network layer. */ 614 /* Network layer. */
606 if (key->eth.type == htons(ETH_P_IP)) { 615 if (key->eth.type == htons(ETH_P_IP)) {
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624ea74353dd..f143908b651d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -49,7 +49,6 @@
49#include <net/mpls.h> 49#include <net/mpls.h>
50#include <net/vxlan.h> 50#include <net/vxlan.h>
51#include <net/tun_proto.h> 51#include <net/tun_proto.h>
52#include <net/erspan.h>
53 52
54#include "flow_netlink.h" 53#include "flow_netlink.h"
55 54
@@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void)
334 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it. 333 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
335 */ 334 */
336 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ 335 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
337 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */ 336 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
338 + nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
339} 337}
340 338
341static size_t ovs_nsh_key_attr_size(void) 339static size_t ovs_nsh_key_attr_size(void)
@@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
402 .next = ovs_vxlan_ext_key_lens }, 400 .next = ovs_vxlan_ext_key_lens },
403 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 401 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
404 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 402 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
405 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) },
406}; 403};
407 404
408static const struct ovs_len_tbl 405static const struct ovs_len_tbl
@@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
634 return 0; 631 return 0;
635} 632}
636 633
637static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
638 struct sw_flow_match *match, bool is_mask,
639 bool log)
640{
641 unsigned long opt_key_offset;
642 struct erspan_metadata opts;
643
644 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
645
646 memset(&opts, 0, sizeof(opts));
647 opts.index = nla_get_be32(attr);
648
649 /* Index has only 20-bit */
650 if (ntohl(opts.index) & ~INDEX_MASK) {
651 OVS_NLERR(log, "ERSPAN index number %x too large.",
652 ntohl(opts.index));
653 return -EINVAL;
654 }
655
656 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
657 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
658 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
659 is_mask);
660
661 return 0;
662}
663
664static int ip_tun_from_nlattr(const struct nlattr *attr, 634static int ip_tun_from_nlattr(const struct nlattr *attr,
665 struct sw_flow_match *match, bool is_mask, 635 struct sw_flow_match *match, bool is_mask,
666 bool log) 636 bool log)
@@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
768 break; 738 break;
769 case OVS_TUNNEL_KEY_ATTR_PAD: 739 case OVS_TUNNEL_KEY_ATTR_PAD:
770 break; 740 break;
771 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
772 if (opts_type) {
773 OVS_NLERR(log, "Multiple metadata blocks provided");
774 return -EINVAL;
775 }
776
777 err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
778 if (err)
779 return err;
780
781 tun_flags |= TUNNEL_ERSPAN_OPT;
782 opts_type = type;
783 break;
784 default: 741 default:
785 OVS_NLERR(log, "Unknown IP tunnel attribute %d", 742 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
786 type); 743 type);
@@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
905 else if (output->tun_flags & TUNNEL_VXLAN_OPT && 862 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
906 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) 863 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
907 return -EMSGSIZE; 864 return -EMSGSIZE;
908 else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
909 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
910 ((struct erspan_metadata *)tun_opts)->index))
911 return -EMSGSIZE;
912 } 865 }
913 866
914 return 0; 867 return 0;
@@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
2533 break; 2486 break;
2534 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: 2487 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2535 break; 2488 break;
2536 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
2537 break;
2538 } 2489 }
2539 }; 2490 };
2540 2491
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8886f15abe90..634cfcb7bba6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
183 long i; 183 long i;
184 int ret; 184 int ret;
185 185
186 if (rs->rs_bound_addr == 0) { 186 if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
187 ret = -ENOTCONN; /* XXX not a great errno */ 187 ret = -ENOTCONN; /* XXX not a great errno */
188 goto out; 188 goto out;
189 } 189 }
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
525 525
526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; 526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
527 527
528 if (args->nr_local == 0)
529 return -EINVAL;
530
528 /* figure out the number of pages in the vector */ 531 /* figure out the number of pages in the vector */
529 for (i = 0; i < args->nr_local; i++) { 532 for (i = 0; i < args->nr_local; i++) {
530 if (copy_from_user(&vec, &local_vec[i], 533 if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
874err: 877err:
875 if (page) 878 if (page)
876 put_page(page); 879 put_page(page);
880 rm->atomic.op_active = 0;
877 kfree(rm->atomic.op_notifier); 881 kfree(rm->atomic.op_notifier);
878 882
879 return ret; 883 return ret;
diff --git a/net/rds/send.c b/net/rds/send.c
index b52cdc8ae428..f72466c63f0c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1009 continue; 1009 continue;
1010 1010
1011 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { 1011 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1012 if (cmsg->cmsg_len <
1013 CMSG_LEN(sizeof(struct rds_rdma_args)))
1014 return -EINVAL;
1012 args = CMSG_DATA(cmsg); 1015 args = CMSG_DATA(cmsg);
1013 *rdma_bytes += args->remote_vec.bytes; 1016 *rdma_bytes += args->remote_vec.bytes;
1014 } 1017 }
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 6b7ee71f40c6..ab7356e0ba83 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
90 sizeof(val)); 90 sizeof(val));
91} 91}
92 92
93u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) 93u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94{ 94{
95 return tcp_sk(tc->t_sock->sk)->snd_nxt; 95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
96} 97}
97 98
98u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) 99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1aafbf7c3011..864ca7d8f019 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp); 54void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
55void rds_tcp_restore_callbacks(struct socket *sock, 55void rds_tcp_restore_callbacks(struct socket *sock,
56 struct rds_tcp_connection *tc); 56 struct rds_tcp_connection *tc);
57u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 57u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 58u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); 59u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
60extern struct rds_transport rds_tcp_transport; 60extern struct rds_transport rds_tcp_transport;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index dc860d1bb608..9b76e0fa1722 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
86 * m_ack_seq is set to the sequence number of the last byte of 86 * m_ack_seq is set to the sequence number of the last byte of
87 * header and data. see rds_tcp_is_acked(). 87 * header and data. see rds_tcp_is_acked().
88 */ 88 */
89 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); 89 tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
90 rm->m_ack_seq = tc->t_last_sent_nxt + 90 rm->m_ack_seq = tc->t_last_sent_nxt +
91 sizeof(struct rds_header) + 91 sizeof(struct rds_header) +
92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
99 99
100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", 100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
101 rm, rds_tcp_snd_nxt(tc), 101 rm, rds_tcp_write_seq(tc),
102 (unsigned long long)rm->m_ack_seq); 102 (unsigned long long)rm->m_ack_seq);
103 } 103 }
104 104
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 8f7cf4c042be..dcd818fa837e 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -860,6 +860,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
860static int rxrpc_release_sock(struct sock *sk) 860static int rxrpc_release_sock(struct sock *sk)
861{ 861{
862 struct rxrpc_sock *rx = rxrpc_sk(sk); 862 struct rxrpc_sock *rx = rxrpc_sk(sk);
863 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
863 864
864 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 865 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
865 866
@@ -895,8 +896,8 @@ static int rxrpc_release_sock(struct sock *sk)
895 rxrpc_release_calls_on_socket(rx); 896 rxrpc_release_calls_on_socket(rx);
896 flush_workqueue(rxrpc_workqueue); 897 flush_workqueue(rxrpc_workqueue);
897 rxrpc_purge_queue(&sk->sk_receive_queue); 898 rxrpc_purge_queue(&sk->sk_receive_queue);
898 rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper); 899 rxrpc_queue_work(&rxnet->service_conn_reaper);
899 rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper); 900 rxrpc_queue_work(&rxnet->client_conn_reaper);
900 901
901 rxrpc_put_local(rx->local); 902 rxrpc_put_local(rx->local);
902 rx->local = NULL; 903 rx->local = NULL;
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bda952ffe6a6..ad2ab1103189 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -123,7 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
123 else 123 else
124 ack_at = expiry; 124 ack_at = expiry;
125 125
126 ack_at = jiffies + expiry; 126 ack_at += now;
127 if (time_before(ack_at, call->ack_at)) { 127 if (time_before(ack_at, call->ack_at)) {
128 WRITE_ONCE(call->ack_at, ack_at); 128 WRITE_ONCE(call->ack_at, ack_at);
129 rxrpc_reduce_call_timer(call, ack_at, now, 129 rxrpc_reduce_call_timer(call, ack_at, now,
@@ -426,7 +426,7 @@ recheck_state:
426 next = call->expect_rx_by; 426 next = call->expect_rx_by;
427 427
428#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } 428#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
429 429
430 set(call->expect_req_by); 430 set(call->expect_req_by);
431 set(call->expect_term_by); 431 set(call->expect_term_by);
432 set(call->ack_at); 432 set(call->ack_at);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 9e9a8db1bc9c..4ca11be6be3c 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -30,22 +30,18 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
30 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; 30 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
31 struct rxrpc_channel *chan; 31 struct rxrpc_channel *chan;
32 struct msghdr msg; 32 struct msghdr msg;
33 struct kvec iov; 33 struct kvec iov[3];
34 struct { 34 struct {
35 struct rxrpc_wire_header whdr; 35 struct rxrpc_wire_header whdr;
36 union { 36 union {
37 struct { 37 __be32 abort_code;
38 __be32 code; 38 struct rxrpc_ackpacket ack;
39 } abort;
40 struct {
41 struct rxrpc_ackpacket ack;
42 u8 padding[3];
43 struct rxrpc_ackinfo info;
44 };
45 }; 39 };
46 } __attribute__((packed)) pkt; 40 } __attribute__((packed)) pkt;
41 struct rxrpc_ackinfo ack_info;
47 size_t len; 42 size_t len;
48 u32 serial, mtu, call_id; 43 int ioc;
44 u32 serial, mtu, call_id, padding;
49 45
50 _enter("%d", conn->debug_id); 46 _enter("%d", conn->debug_id);
51 47
@@ -66,6 +62,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
66 msg.msg_controllen = 0; 62 msg.msg_controllen = 0;
67 msg.msg_flags = 0; 63 msg.msg_flags = 0;
68 64
65 iov[0].iov_base = &pkt;
66 iov[0].iov_len = sizeof(pkt.whdr);
67 iov[1].iov_base = &padding;
68 iov[1].iov_len = 3;
69 iov[2].iov_base = &ack_info;
70 iov[2].iov_len = sizeof(ack_info);
71
69 pkt.whdr.epoch = htonl(conn->proto.epoch); 72 pkt.whdr.epoch = htonl(conn->proto.epoch);
70 pkt.whdr.cid = htonl(conn->proto.cid); 73 pkt.whdr.cid = htonl(conn->proto.cid);
71 pkt.whdr.callNumber = htonl(call_id); 74 pkt.whdr.callNumber = htonl(call_id);
@@ -80,8 +83,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
80 len = sizeof(pkt.whdr); 83 len = sizeof(pkt.whdr);
81 switch (chan->last_type) { 84 switch (chan->last_type) {
82 case RXRPC_PACKET_TYPE_ABORT: 85 case RXRPC_PACKET_TYPE_ABORT:
83 pkt.abort.code = htonl(chan->last_abort); 86 pkt.abort_code = htonl(chan->last_abort);
84 len += sizeof(pkt.abort); 87 iov[0].iov_len += sizeof(pkt.abort_code);
88 len += sizeof(pkt.abort_code);
89 ioc = 1;
85 break; 90 break;
86 91
87 case RXRPC_PACKET_TYPE_ACK: 92 case RXRPC_PACKET_TYPE_ACK:
@@ -94,13 +99,19 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
94 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); 99 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
95 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; 100 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
96 pkt.ack.nAcks = 0; 101 pkt.ack.nAcks = 0;
97 pkt.info.rxMTU = htonl(rxrpc_rx_mtu); 102 ack_info.rxMTU = htonl(rxrpc_rx_mtu);
98 pkt.info.maxMTU = htonl(mtu); 103 ack_info.maxMTU = htonl(mtu);
99 pkt.info.rwind = htonl(rxrpc_rx_window_size); 104 ack_info.rwind = htonl(rxrpc_rx_window_size);
100 pkt.info.jumbo_max = htonl(rxrpc_rx_jumbo_max); 105 ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
101 pkt.whdr.flags |= RXRPC_SLOW_START_OK; 106 pkt.whdr.flags |= RXRPC_SLOW_START_OK;
102 len += sizeof(pkt.ack) + sizeof(pkt.info); 107 padding = 0;
108 iov[0].iov_len += sizeof(pkt.ack);
109 len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
110 ioc = 3;
103 break; 111 break;
112
113 default:
114 return;
104 } 115 }
105 116
106 /* Resync with __rxrpc_disconnect_call() and check that the last call 117 /* Resync with __rxrpc_disconnect_call() and check that the last call
@@ -110,9 +121,6 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
110 if (READ_ONCE(chan->last_call) != call_id) 121 if (READ_ONCE(chan->last_call) != call_id)
111 return; 122 return;
112 123
113 iov.iov_base = &pkt;
114 iov.iov_len = len;
115
116 serial = atomic_inc_return(&conn->serial); 124 serial = atomic_inc_return(&conn->serial);
117 pkt.whdr.serial = htonl(serial); 125 pkt.whdr.serial = htonl(serial);
118 126
@@ -127,7 +135,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
127 break; 135 break;
128 } 136 }
129 137
130 kernel_sendmsg(conn->params.local->socket, &msg, &iov, 1, len); 138 kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
131 _leave(""); 139 _leave("");
132 return; 140 return;
133} 141}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 1aad04a32d5e..c628351eb900 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -424,7 +424,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
424 if (earliest != now + MAX_JIFFY_OFFSET) { 424 if (earliest != now + MAX_JIFFY_OFFSET) {
425 _debug("reschedule reaper %ld", (long)earliest - (long)now); 425 _debug("reschedule reaper %ld", (long)earliest - (long)now);
426 ASSERT(time_after(earliest, now)); 426 ASSERT(time_after(earliest, now));
427 rxrpc_set_service_reap_timer(rxnet, earliest); 427 rxrpc_set_service_reap_timer(rxnet, earliest);
428 } 428 }
429 429
430 while (!list_empty(&graveyard)) { 430 while (!list_empty(&graveyard)) {
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 23a5e61d8f79..6fc61400337f 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -976,7 +976,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
976 rxrpc_reduce_call_timer(call, expect_rx_by, now, 976 rxrpc_reduce_call_timer(call, expect_rx_by, now,
977 rxrpc_timer_set_for_normal); 977 rxrpc_timer_set_for_normal);
978 } 978 }
979 979
980 switch (sp->hdr.type) { 980 switch (sp->hdr.type) {
981 case RXRPC_PACKET_TYPE_DATA: 981 case RXRPC_PACKET_TYPE_DATA:
982 rxrpc_input_data(call, skb, skew); 982 rxrpc_input_data(call, skb, skew);
@@ -1213,7 +1213,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1213 goto reupgrade; 1213 goto reupgrade;
1214 conn->service_id = sp->hdr.serviceId; 1214 conn->service_id = sp->hdr.serviceId;
1215 } 1215 }
1216 1216
1217 if (sp->hdr.callNumber == 0) { 1217 if (sp->hdr.callNumber == 0) {
1218 /* Connection-level packet */ 1218 /* Connection-level packet */
1219 _debug("CONN %p {%d}", conn, conn->debug_id); 1219 _debug("CONN %p {%d}", conn, conn->debug_id);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index a1c53ac066a1..09f2a3e05221 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -233,7 +233,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
233 if (resend_at < 1) 233 if (resend_at < 1)
234 resend_at = 1; 234 resend_at = 1;
235 235
236 resend_at = now + rxrpc_resend_timeout; 236 resend_at += now;
237 WRITE_ONCE(call->resend_at, resend_at); 237 WRITE_ONCE(call->resend_at, resend_at);
238 rxrpc_reduce_call_timer(call, resend_at, now, 238 rxrpc_reduce_call_timer(call, resend_at, now,
239 rxrpc_timer_set_for_send); 239 rxrpc_timer_set_for_send);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e29a48ef7fc3..a0ac42b3ed06 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
159 if (action == TC_ACT_SHOT) 159 if (action == TC_ACT_SHOT)
160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; 160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
161 161
162 tm->lastuse = lastuse; 162 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
163} 163}
164 164
165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, 165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_meta_mark.c b/net/sched/act_meta_mark.c
index 1e3f10e5da99..6445184b2759 100644
--- a/net/sched/act_meta_mark.c
+++ b/net/sched/act_meta_mark.c
@@ -22,7 +22,6 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <uapi/linux/tc_act/tc_ife.h> 23#include <uapi/linux/tc_act/tc_ife.h>
24#include <net/tc_act/tc_ife.h> 24#include <net/tc_act/tc_ife.h>
25#include <linux/rtnetlink.h>
26 25
27static int skbmark_encode(struct sk_buff *skb, void *skbdata, 26static int skbmark_encode(struct sk_buff *skb, void *skbdata,
28 struct tcf_meta_info *e) 27 struct tcf_meta_info *e)
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c
index 2ea1f26c9e96..7221437ca3a6 100644
--- a/net/sched/act_meta_skbtcindex.c
+++ b/net/sched/act_meta_skbtcindex.c
@@ -22,7 +22,6 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <uapi/linux/tc_act/tc_ife.h> 23#include <uapi/linux/tc_act/tc_ife.h>
24#include <net/tc_act/tc_ife.h> 24#include <net/tc_act/tc_ife.h>
25#include <linux/rtnetlink.h>
26 25
27static int skbtcindex_encode(struct sk_buff *skb, void *skbdata, 26static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
28 struct tcf_meta_info *e) 27 struct tcf_meta_info *e)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8b3e59388480..08b61849c2a2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
239 struct tcf_t *tm = &m->tcf_tm; 239 struct tcf_t *tm = &m->tcf_tm;
240 240
241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
242 tm->lastuse = lastuse; 242 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
243} 243}
244 244
245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 8b5abcd2f32f..9438969290a6 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -96,23 +96,16 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
96 return ret; 96 return ret;
97} 97}
98 98
99static void tcf_sample_cleanup_rcu(struct rcu_head *rcu) 99static void tcf_sample_cleanup(struct tc_action *a, int bind)
100{ 100{
101 struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu); 101 struct tcf_sample *s = to_sample(a);
102 struct psample_group *psample_group; 102 struct psample_group *psample_group;
103 103
104 psample_group = rcu_dereference_protected(s->psample_group, 1); 104 psample_group = rtnl_dereference(s->psample_group);
105 RCU_INIT_POINTER(s->psample_group, NULL); 105 RCU_INIT_POINTER(s->psample_group, NULL);
106 psample_group_put(psample_group); 106 psample_group_put(psample_group);
107} 107}
108 108
109static void tcf_sample_cleanup(struct tc_action *a, int bind)
110{
111 struct tcf_sample *s = to_sample(a);
112
113 call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
114}
115
116static bool tcf_sample_dev_ok_push(struct net_device *dev) 109static bool tcf_sample_dev_ok_push(struct net_device *dev)
117{ 110{
118 switch (dev->type) { 111 switch (dev->type) {
@@ -264,7 +257,6 @@ static int __init sample_init_module(void)
264 257
265static void __exit sample_cleanup_module(void) 258static void __exit sample_cleanup_module(void)
266{ 259{
267 rcu_barrier();
268 tcf_unregister_action(&act_sample_ops, &sample_net_ops); 260 tcf_unregister_action(&act_sample_ops, &sample_net_ops);
269} 261}
270 262
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ddcf04b4ab43..b9d63d2246e6 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -23,7 +23,6 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/err.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <net/net_namespace.h> 27#include <net/net_namespace.h>
29#include <net/sock.h> 28#include <net/sock.h>
@@ -352,6 +351,8 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
352{ 351{
353 struct tcf_chain *chain; 352 struct tcf_chain *chain;
354 353
354 if (!block)
355 return;
355 /* Hold a refcnt for all chains, except 0, so that they don't disappear 356 /* Hold a refcnt for all chains, except 0, so that they don't disappear
356 * while we are iterating. 357 * while we are iterating.
357 */ 358 */
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 6fe798c2df1a..a62586e2dbdb 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
42 struct list_head link; 42 struct list_head link;
43 struct tcf_result res; 43 struct tcf_result res;
44 bool exts_integrated; 44 bool exts_integrated;
45 bool offloaded;
46 u32 gen_flags; 45 u32 gen_flags;
47 struct tcf_exts exts; 46 struct tcf_exts exts;
48 u32 handle; 47 u32 handle;
@@ -148,73 +147,63 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
148} 147}
149 148
150static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 149static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
151 enum tc_clsbpf_command cmd) 150 struct cls_bpf_prog *oldprog)
152{ 151{
153 bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
154 struct tcf_block *block = tp->chain->block; 152 struct tcf_block *block = tp->chain->block;
155 bool skip_sw = tc_skip_sw(prog->gen_flags);
156 struct tc_cls_bpf_offload cls_bpf = {}; 153 struct tc_cls_bpf_offload cls_bpf = {};
154 struct cls_bpf_prog *obj;
155 bool skip_sw;
157 int err; 156 int err;
158 157
158 skip_sw = prog && tc_skip_sw(prog->gen_flags);
159 obj = prog ?: oldprog;
160
159 tc_cls_common_offload_init(&cls_bpf.common, tp); 161 tc_cls_common_offload_init(&cls_bpf.common, tp);
160 cls_bpf.command = cmd; 162 cls_bpf.command = TC_CLSBPF_OFFLOAD;
161 cls_bpf.exts = &prog->exts; 163 cls_bpf.exts = &obj->exts;
162 cls_bpf.prog = prog->filter; 164 cls_bpf.prog = prog ? prog->filter : NULL;
163 cls_bpf.name = prog->bpf_name; 165 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
164 cls_bpf.exts_integrated = prog->exts_integrated; 166 cls_bpf.name = obj->bpf_name;
165 cls_bpf.gen_flags = prog->gen_flags; 167 cls_bpf.exts_integrated = obj->exts_integrated;
168 cls_bpf.gen_flags = obj->gen_flags;
166 169
167 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); 170 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
168 if (addorrep) { 171 if (prog) {
169 if (err < 0) { 172 if (err < 0) {
170 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY); 173 cls_bpf_offload_cmd(tp, oldprog, prog);
171 return err; 174 return err;
172 } else if (err > 0) { 175 } else if (err > 0) {
173 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; 176 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
174 } 177 }
175 } 178 }
176 179
177 if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) 180 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
178 return -EINVAL; 181 return -EINVAL;
179 182
180 return 0; 183 return 0;
181} 184}
182 185
186static u32 cls_bpf_flags(u32 flags)
187{
188 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
189}
190
183static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 191static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
184 struct cls_bpf_prog *oldprog) 192 struct cls_bpf_prog *oldprog)
185{ 193{
186 struct cls_bpf_prog *obj = prog; 194 if (prog && oldprog &&
187 enum tc_clsbpf_command cmd; 195 cls_bpf_flags(prog->gen_flags) !=
188 bool skip_sw; 196 cls_bpf_flags(oldprog->gen_flags))
189 int ret; 197 return -EINVAL;
190
191 skip_sw = tc_skip_sw(prog->gen_flags) ||
192 (oldprog && tc_skip_sw(oldprog->gen_flags));
193
194 if (oldprog && oldprog->offloaded) {
195 if (!tc_skip_hw(prog->gen_flags)) {
196 cmd = TC_CLSBPF_REPLACE;
197 } else if (!tc_skip_sw(prog->gen_flags)) {
198 obj = oldprog;
199 cmd = TC_CLSBPF_DESTROY;
200 } else {
201 return -EINVAL;
202 }
203 } else {
204 if (tc_skip_hw(prog->gen_flags))
205 return skip_sw ? -EINVAL : 0;
206 cmd = TC_CLSBPF_ADD;
207 }
208
209 ret = cls_bpf_offload_cmd(tp, obj, cmd);
210 if (ret)
211 return ret;
212 198
213 obj->offloaded = true; 199 if (prog && tc_skip_hw(prog->gen_flags))
214 if (oldprog) 200 prog = NULL;
215 oldprog->offloaded = false; 201 if (oldprog && tc_skip_hw(oldprog->gen_flags))
202 oldprog = NULL;
203 if (!prog && !oldprog)
204 return 0;
216 205
217 return 0; 206 return cls_bpf_offload_cmd(tp, prog, oldprog);
218} 207}
219 208
220static void cls_bpf_stop_offload(struct tcf_proto *tp, 209static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +211,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
222{ 211{
223 int err; 212 int err;
224 213
225 if (!prog->offloaded) 214 err = cls_bpf_offload_cmd(tp, NULL, prog);
226 return; 215 if (err)
227
228 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
229 if (err) {
230 pr_err("Stopping hardware offload failed: %d\n", err); 216 pr_err("Stopping hardware offload failed: %d\n", err);
231 return;
232 }
233
234 prog->offloaded = false;
235} 217}
236 218
237static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 219static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
238 struct cls_bpf_prog *prog) 220 struct cls_bpf_prog *prog)
239{ 221{
240 if (!prog->offloaded) 222 struct tcf_block *block = tp->chain->block;
241 return; 223 struct tc_cls_bpf_offload cls_bpf = {};
224
225 tc_cls_common_offload_init(&cls_bpf.common, tp);
226 cls_bpf.command = TC_CLSBPF_STATS;
227 cls_bpf.exts = &prog->exts;
228 cls_bpf.prog = prog->filter;
229 cls_bpf.name = prog->bpf_name;
230 cls_bpf.exts_integrated = prog->exts_integrated;
231 cls_bpf.gen_flags = prog->gen_flags;
242 232
243 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS); 233 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
244} 234}
245 235
246static int cls_bpf_init(struct tcf_proto *tp) 236static int cls_bpf_init(struct tcf_proto *tp)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index ac152b4f4247..507859cdd1cb 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -45,7 +45,6 @@
45#include <net/netlink.h> 45#include <net/netlink.h>
46#include <net/act_api.h> 46#include <net/act_api.h>
47#include <net/pkt_cls.h> 47#include <net/pkt_cls.h>
48#include <linux/netdevice.h>
49#include <linux/idr.h> 48#include <linux/idr.h>
50 49
51struct tc_u_knode { 50struct tc_u_knode {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index df3110d69585..07c10bac06a0 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
51 if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len)) 51 if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
52 return 0; 52 return 0;
53 53
54 return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len); 54 return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
55} 55}
56 56
57static struct tcf_ematch_ops em_nbyte_ops = { 57static struct tcf_ematch_ops em_nbyte_ops = {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b6c4f536876b..52529b7f8d96 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -795,6 +795,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
795 tcm->tcm_info = refcount_read(&q->refcnt); 795 tcm->tcm_info = refcount_read(&q->refcnt);
796 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 796 if (nla_put_string(skb, TCA_KIND, q->ops->id))
797 goto nla_put_failure; 797 goto nla_put_failure;
798 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
799 goto nla_put_failure;
798 if (q->ops->dump && q->ops->dump(q, skb) < 0) 800 if (q->ops->dump && q->ops->dump(q, skb) < 0)
799 goto nla_put_failure; 801 goto nla_put_failure;
800 qlen = q->q.qlen; 802 qlen = q->q.qlen;
@@ -1061,17 +1063,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1061 } 1063 }
1062 1064
1063 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 1065 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
1064 if (qdisc_is_percpu_stats(sch)) {
1065 sch->cpu_bstats =
1066 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
1067 if (!sch->cpu_bstats)
1068 goto err_out4;
1069
1070 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
1071 if (!sch->cpu_qstats)
1072 goto err_out4;
1073 }
1074
1075 if (tca[TCA_STAB]) { 1066 if (tca[TCA_STAB]) {
1076 stab = qdisc_get_stab(tca[TCA_STAB]); 1067 stab = qdisc_get_stab(tca[TCA_STAB]);
1077 if (IS_ERR(stab)) { 1068 if (IS_ERR(stab)) {
@@ -1113,7 +1104,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1113 ops->destroy(sch); 1104 ops->destroy(sch);
1114err_out3: 1105err_out3:
1115 dev_put(dev); 1106 dev_put(dev);
1116 kfree((char *) sch - sch->padded); 1107 qdisc_free(sch);
1117err_out2: 1108err_out2:
1118 module_put(ops->owner); 1109 module_put(ops->owner);
1119err_out: 1110err_out:
@@ -1121,8 +1112,6 @@ err_out:
1121 return NULL; 1112 return NULL;
1122 1113
1123err_out4: 1114err_out4:
1124 free_percpu(sch->cpu_bstats);
1125 free_percpu(sch->cpu_qstats);
1126 /* 1115 /*
1127 * Any broken qdiscs that would require a ops->reset() here? 1116 * Any broken qdiscs that would require a ops->reset() here?
1128 * The qdisc was never in action so it shouldn't be necessary. 1117 * The qdisc was never in action so it shouldn't be necessary.
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index b30a2c70bd48..531250fceb9e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
369 369
370 ctl = nla_data(tb[TCA_CHOKE_PARMS]); 370 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
371 371
372 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
373 return -EINVAL;
374
372 if (ctl->limit > CHOKE_MAX_QUEUE) 375 if (ctl->limit > CHOKE_MAX_QUEUE)
373 return -EINVAL; 376 return -EINVAL;
374 377
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3839cbbdc32b..cac003fddf3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -26,6 +26,7 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h> 28#include <linux/if_vlan.h>
29#include <linux/if_macvlan.h>
29#include <net/sch_generic.h> 30#include <net/sch_generic.h>
30#include <net/pkt_sched.h> 31#include <net/pkt_sched.h>
31#include <net/dst.h> 32#include <net/dst.h>
@@ -277,6 +278,8 @@ unsigned long dev_trans_start(struct net_device *dev)
277 278
278 if (is_vlan_dev(dev)) 279 if (is_vlan_dev(dev))
279 dev = vlan_dev_real_dev(dev); 280 dev = vlan_dev_real_dev(dev);
281 else if (netif_is_macvlan(dev))
282 dev = macvlan_dev_real_dev(dev);
280 res = netdev_get_tx_queue(dev, 0)->trans_start; 283 res = netdev_get_tx_queue(dev, 0)->trans_start;
281 for (i = 1; i < dev->num_tx_queues; i++) { 284 for (i = 1; i < dev->num_tx_queues; i++) {
282 val = netdev_get_tx_queue(dev, i)->trans_start; 285 val = netdev_get_tx_queue(dev, i)->trans_start;
@@ -630,6 +633,19 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
630 qdisc_skb_head_init(&sch->q); 633 qdisc_skb_head_init(&sch->q);
631 spin_lock_init(&sch->q.lock); 634 spin_lock_init(&sch->q.lock);
632 635
636 if (ops->static_flags & TCQ_F_CPUSTATS) {
637 sch->cpu_bstats =
638 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
639 if (!sch->cpu_bstats)
640 goto errout1;
641
642 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
643 if (!sch->cpu_qstats) {
644 free_percpu(sch->cpu_bstats);
645 goto errout1;
646 }
647 }
648
633 spin_lock_init(&sch->busylock); 649 spin_lock_init(&sch->busylock);
634 lockdep_set_class(&sch->busylock, 650 lockdep_set_class(&sch->busylock,
635 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); 651 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -639,6 +655,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
639 dev->qdisc_running_key ?: &qdisc_running_key); 655 dev->qdisc_running_key ?: &qdisc_running_key);
640 656
641 sch->ops = ops; 657 sch->ops = ops;
658 sch->flags = ops->static_flags;
642 sch->enqueue = ops->enqueue; 659 sch->enqueue = ops->enqueue;
643 sch->dequeue = ops->dequeue; 660 sch->dequeue = ops->dequeue;
644 sch->dev_queue = dev_queue; 661 sch->dev_queue = dev_queue;
@@ -646,6 +663,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
646 refcount_set(&sch->refcnt, 1); 663 refcount_set(&sch->refcnt, 1);
647 664
648 return sch; 665 return sch;
666errout1:
667 kfree(p);
649errout: 668errout:
650 return ERR_PTR(err); 669 return ERR_PTR(err);
651} 670}
@@ -695,7 +714,7 @@ void qdisc_reset(struct Qdisc *qdisc)
695} 714}
696EXPORT_SYMBOL(qdisc_reset); 715EXPORT_SYMBOL(qdisc_reset);
697 716
698static void qdisc_free(struct Qdisc *qdisc) 717void qdisc_free(struct Qdisc *qdisc)
699{ 718{
700 if (qdisc_is_percpu_stats(qdisc)) { 719 if (qdisc_is_percpu_stats(qdisc)) {
701 free_percpu(qdisc->cpu_bstats); 720 free_percpu(qdisc->cpu_bstats);
@@ -1037,6 +1056,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1037 1056
1038 if (!tp_head) { 1057 if (!tp_head) {
1039 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); 1058 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1059 /* Wait for flying RCU callback before it is freed. */
1060 rcu_barrier_bh();
1040 return; 1061 return;
1041 } 1062 }
1042 1063
@@ -1052,7 +1073,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1052 rcu_assign_pointer(*miniqp->p_miniq, miniq); 1073 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1053 1074
1054 if (miniq_old) 1075 if (miniq_old)
1055 /* This is counterpart of the rcu barrier above. We need to 1076 /* This is counterpart of the rcu barriers above. We need to
1056 * block potential new user of miniq_old until all readers 1077 * block potential new user of miniq_old until all readers
1057 * are not seeing it. 1078 * are not seeing it.
1058 */ 1079 */
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 17c7130454bd..bc30f9186ac6 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
356 struct gred_sched *table = qdisc_priv(sch); 356 struct gred_sched *table = qdisc_priv(sch);
357 struct gred_sched_data *q = table->tab[dp]; 357 struct gred_sched_data *q = table->tab[dp];
358 358
359 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
360 return -EINVAL;
361
359 if (!q) { 362 if (!q) {
360 table->tab[dp] = q = *prealloc; 363 table->tab[dp] = q = *prealloc;
361 *prealloc = NULL; 364 *prealloc = NULL;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5ecc38f35d47..003e1b063447 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -66,7 +66,8 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
66{ 66{
67 struct ingress_sched_data *q = qdisc_priv(sch); 67 struct ingress_sched_data *q = qdisc_priv(sch);
68 struct net_device *dev = qdisc_dev(sch); 68 struct net_device *dev = qdisc_dev(sch);
69 int err; 69
70 net_inc_ingress_queue();
70 71
71 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); 72 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
72 73
@@ -74,14 +75,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
74 q->block_info.chain_head_change = clsact_chain_head_change; 75 q->block_info.chain_head_change = clsact_chain_head_change;
75 q->block_info.chain_head_change_priv = &q->miniqp; 76 q->block_info.chain_head_change_priv = &q->miniqp;
76 77
77 err = tcf_block_get_ext(&q->block, sch, &q->block_info); 78 return tcf_block_get_ext(&q->block, sch, &q->block_info);
78 if (err)
79 return err;
80
81 net_inc_ingress_queue();
82 sch->flags |= TCQ_F_CPUSTATS;
83
84 return 0;
85} 79}
86 80
87static void ingress_destroy(struct Qdisc *sch) 81static void ingress_destroy(struct Qdisc *sch)
@@ -120,6 +114,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
120 .cl_ops = &ingress_class_ops, 114 .cl_ops = &ingress_class_ops,
121 .id = "ingress", 115 .id = "ingress",
122 .priv_size = sizeof(struct ingress_sched_data), 116 .priv_size = sizeof(struct ingress_sched_data),
117 .static_flags = TCQ_F_CPUSTATS,
123 .init = ingress_init, 118 .init = ingress_init,
124 .destroy = ingress_destroy, 119 .destroy = ingress_destroy,
125 .dump = ingress_dump, 120 .dump = ingress_dump,
@@ -172,6 +167,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
172 struct net_device *dev = qdisc_dev(sch); 167 struct net_device *dev = qdisc_dev(sch);
173 int err; 168 int err;
174 169
170 net_inc_ingress_queue();
171 net_inc_egress_queue();
172
175 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); 173 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
176 174
177 q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 175 q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -188,20 +186,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
188 q->egress_block_info.chain_head_change = clsact_chain_head_change; 186 q->egress_block_info.chain_head_change = clsact_chain_head_change;
189 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; 187 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
190 188
191 err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); 189 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
192 if (err)
193 goto err_egress_block_get;
194
195 net_inc_ingress_queue();
196 net_inc_egress_queue();
197
198 sch->flags |= TCQ_F_CPUSTATS;
199
200 return 0;
201
202err_egress_block_get:
203 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
204 return err;
205} 190}
206 191
207static void clsact_destroy(struct Qdisc *sch) 192static void clsact_destroy(struct Qdisc *sch)
@@ -228,6 +213,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
228 .cl_ops = &clsact_class_ops, 213 .cl_ops = &clsact_class_ops,
229 .id = "clsact", 214 .id = "clsact",
230 .priv_size = sizeof(struct clsact_sched_data), 215 .priv_size = sizeof(struct clsact_sched_data),
216 .static_flags = TCQ_F_CPUSTATS,
231 .init = clsact_init, 217 .init = clsact_init,
232 .destroy = clsact_destroy, 218 .destroy = clsact_destroy,
233 .dump = ingress_dump, 219 .dump = ingress_dump,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 7f8ea9e297c3..f0747eb87dc4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -157,6 +157,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
157 .handle = sch->handle, 157 .handle = sch->handle,
158 .parent = sch->parent, 158 .parent = sch->parent,
159 }; 159 };
160 int err;
160 161
161 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 162 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
162 return -EOPNOTSUPP; 163 return -EOPNOTSUPP;
@@ -171,7 +172,14 @@ static int red_offload(struct Qdisc *sch, bool enable)
171 opt.command = TC_RED_DESTROY; 172 opt.command = TC_RED_DESTROY;
172 } 173 }
173 174
174 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); 175 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
176
177 if (!err && enable)
178 sch->flags |= TCQ_F_OFFLOADED;
179 else
180 sch->flags &= ~TCQ_F_OFFLOADED;
181
182 return err;
175} 183}
176 184
177static void red_destroy(struct Qdisc *sch) 185static void red_destroy(struct Qdisc *sch)
@@ -212,6 +220,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
212 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; 220 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
213 221
214 ctl = nla_data(tb[TCA_RED_PARMS]); 222 ctl = nla_data(tb[TCA_RED_PARMS]);
223 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
224 return -EINVAL;
215 225
216 if (ctl->limit > 0) { 226 if (ctl->limit > 0) {
217 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); 227 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
@@ -272,7 +282,7 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt)
272 return red_change(sch, opt); 282 return red_change(sch, opt);
273} 283}
274 284
275static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) 285static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
276{ 286{
277 struct net_device *dev = qdisc_dev(sch); 287 struct net_device *dev = qdisc_dev(sch);
278 struct tc_red_qopt_offload hw_stats = { 288 struct tc_red_qopt_offload hw_stats = {
@@ -284,21 +294,12 @@ static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
284 .stats.qstats = &sch->qstats, 294 .stats.qstats = &sch->qstats,
285 }, 295 },
286 }; 296 };
287 int err;
288 297
289 opt->flags &= ~TC_RED_OFFLOADED; 298 if (!(sch->flags & TCQ_F_OFFLOADED))
290 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
291 return 0;
292
293 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
294 &hw_stats);
295 if (err == -EOPNOTSUPP)
296 return 0; 299 return 0;
297 300
298 if (!err) 301 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
299 opt->flags |= TC_RED_OFFLOADED; 302 &hw_stats);
300
301 return err;
302} 303}
303 304
304static int red_dump(struct Qdisc *sch, struct sk_buff *skb) 305static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -317,7 +318,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
317 int err; 318 int err;
318 319
319 sch->qstats.backlog = q->qdisc->qstats.backlog; 320 sch->qstats.backlog = q->qdisc->qstats.backlog;
320 err = red_dump_offload(sch, &opt); 321 err = red_dump_offload_stats(sch, &opt);
321 if (err) 322 if (err)
322 goto nla_put_failure; 323 goto nla_put_failure;
323 324
@@ -345,7 +346,7 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
345 .marked = q->stats.prob_mark + q->stats.forced_mark, 346 .marked = q->stats.prob_mark + q->stats.forced_mark,
346 }; 347 };
347 348
348 if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc) { 349 if (sch->flags & TCQ_F_OFFLOADED) {
349 struct red_stats hw_stats = {0}; 350 struct red_stats hw_stats = {0};
350 struct tc_red_qopt_offload hw_stats_request = { 351 struct tc_red_qopt_offload hw_stats_request = {
351 .command = TC_RED_XSTATS, 352 .command = TC_RED_XSTATS,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 09c1203c1711..930e5bd26d3d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -639,6 +639,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
639 if (ctl->divisor && 639 if (ctl->divisor &&
640 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) 640 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
641 return -EINVAL; 641 return -EINVAL;
642 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
643 ctl_v1->Wlog))
644 return -EINVAL;
642 if (ctl_v1 && ctl_v1->qth_min) { 645 if (ctl_v1 && ctl_v1->qth_min) {
643 p = kmalloc(sizeof(*p), GFP_KERNEL); 646 p = kmalloc(sizeof(*p), GFP_KERNEL);
644 if (!p) 647 if (!p)
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7b261afc47b9..7f8baa48e7c2 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -53,6 +53,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
53 msg->send_failed = 0; 53 msg->send_failed = 0;
54 msg->send_error = 0; 54 msg->send_error = 0;
55 msg->can_delay = 1; 55 msg->can_delay = 1;
56 msg->abandoned = 0;
56 msg->expires_at = 0; 57 msg->expires_at = 0;
57 INIT_LIST_HEAD(&msg->chunks); 58 INIT_LIST_HEAD(&msg->chunks);
58} 59}
@@ -304,6 +305,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
304 if (!chunk->asoc->peer.prsctp_capable) 305 if (!chunk->asoc->peer.prsctp_capable)
305 return 0; 306 return 0;
306 307
308 if (chunk->msg->abandoned)
309 return 1;
310
311 if (!chunk->has_tsn &&
312 !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
313 return 0;
314
307 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && 315 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
308 time_after(jiffies, chunk->msg->expires_at)) { 316 time_after(jiffies, chunk->msg->expires_at)) {
309 struct sctp_stream_out *streamout = 317 struct sctp_stream_out *streamout =
@@ -316,6 +324,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
316 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; 324 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
317 streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; 325 streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
318 } 326 }
327 chunk->msg->abandoned = 1;
319 return 1; 328 return 1;
320 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && 329 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
321 chunk->sent_count > chunk->sinfo.sinfo_timetolive) { 330 chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
@@ -324,10 +333,12 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
324 333
325 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; 334 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
326 streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++; 335 streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
336 chunk->msg->abandoned = 1;
327 return 1; 337 return 1;
328 } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) && 338 } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
329 chunk->msg->expires_at && 339 chunk->msg->expires_at &&
330 time_after(jiffies, chunk->msg->expires_at)) { 340 time_after(jiffies, chunk->msg->expires_at)) {
341 chunk->msg->abandoned = 1;
331 return 1; 342 return 1;
332 } 343 }
333 /* PRIO policy is processed by sendmsg, not here */ 344 /* PRIO policy is processed by sendmsg, not here */
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 3f619fdcbf0a..291c97b07058 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
78 case SCTP_CID_AUTH: 78 case SCTP_CID_AUTH:
79 return "AUTH"; 79 return "AUTH";
80 80
81 case SCTP_CID_RECONF:
82 return "RECONF";
83
81 default: 84 default:
82 break; 85 break;
83 } 86 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 621b5ca3fd1c..141c9c466ec1 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
399 return; 399 return;
400 } 400 }
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (!(t->param_flags & SPP_PMTUD_ENABLE))
403 /* Update transports view of the MTU */ 403 /* We can't allow retransmitting in such case, as the
404 sctp_transport_update_pmtu(t, pmtu); 404 * retransmission would be sized just as before, and thus we
405 405 * would get another icmp, and retransmit again.
406 /* Update association pmtu. */ 406 */
407 sctp_assoc_sync_pmtu(asoc); 407 return;
408 }
409 408
410 /* Retransmit with the new pmtu setting. 409 /* Update transports view of the MTU. Return if no update was needed.
411 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation 410 * If an update wasn't needed/possible, it also doesn't make sense to
412 * Needed will never be sent, but if a message was sent before 411 * try to retransmit now.
413 * PMTU discovery was disabled that was larger than the PMTU, it
414 * would not be fragmented, so it must be re-transmitted fragmented.
415 */ 412 */
413 if (!sctp_transport_update_pmtu(t, pmtu))
414 return;
415
416 /* Update association pmtu. */
417 sctp_assoc_sync_pmtu(asoc);
418
419 /* Retransmit with the new pmtu setting. */
416 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); 420 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
417} 421}
418 422
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3b18085e3b10..5d4c15bf66d2 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
826 case AF_INET: 826 case AF_INET:
827 if (!__ipv6_only_sock(sctp_opt2sk(sp))) 827 if (!__ipv6_only_sock(sctp_opt2sk(sp)))
828 return 1; 828 return 1;
829 /* fallthru */
829 default: 830 default:
830 return 0; 831 return 0;
831 } 832 }
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 275925b93b29..35bc7106d182 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
49 goto out;
50
48 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
49 if (!pskb_may_pull(skb, sizeof(*sh))) 52 if (!pskb_may_pull(skb, sizeof(*sh)))
50 goto out; 53 goto out;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 4db012aa25f7..c4ec99b20150 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -364,10 +364,12 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
364 list_for_each_entry_safe(chk, temp, queue, transmitted_list) { 364 list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
365 struct sctp_stream_out *streamout; 365 struct sctp_stream_out *streamout;
366 366
367 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 367 if (!chk->msg->abandoned &&
368 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 368 (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
369 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
369 continue; 370 continue;
370 371
372 chk->msg->abandoned = 1;
371 list_del_init(&chk->transmitted_list); 373 list_del_init(&chk->transmitted_list);
372 sctp_insert_list(&asoc->outqueue.abandoned, 374 sctp_insert_list(&asoc->outqueue.abandoned,
373 &chk->transmitted_list); 375 &chk->transmitted_list);
@@ -377,7 +379,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
377 asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; 379 asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
378 streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; 380 streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
379 381
380 if (!chk->tsn_gap_acked) { 382 if (queue != &asoc->outqueue.retransmit &&
383 !chk->tsn_gap_acked) {
381 if (chk->transport) 384 if (chk->transport)
382 chk->transport->flight_size -= 385 chk->transport->flight_size -=
383 sctp_data_size(chk); 386 sctp_data_size(chk);
@@ -403,10 +406,13 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
403 q->sched->unsched_all(&asoc->stream); 406 q->sched->unsched_all(&asoc->stream);
404 407
405 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { 408 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
406 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 409 if (!chk->msg->abandoned &&
407 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 410 (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
411 !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
412 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
408 continue; 413 continue;
409 414
415 chk->msg->abandoned = 1;
410 sctp_sched_dequeue_common(q, chk); 416 sctp_sched_dequeue_common(q, chk);
411 asoc->sent_cnt_removable--; 417 asoc->sent_cnt_removable--;
412 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; 418 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -912,9 +918,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
912 break; 918 break;
913 919
914 case SCTP_CID_ABORT: 920 case SCTP_CID_ABORT:
915 if (sctp_test_T_bit(chunk)) { 921 if (sctp_test_T_bit(chunk))
916 packet->vtag = asoc->c.my_vtag; 922 packet->vtag = asoc->c.my_vtag;
917 } 923 /* fallthru */
918 /* The following chunks are "response" chunks, i.e. 924 /* The following chunks are "response" chunks, i.e.
919 * they are generated in response to something we 925 * they are generated in response to something we
920 * received. If we are sending these, then we can 926 * received. If we are sending these, then we can
@@ -1434,7 +1440,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1434 /* If this chunk has not been acked, stop 1440 /* If this chunk has not been acked, stop
1435 * considering it as 'outstanding'. 1441 * considering it as 'outstanding'.
1436 */ 1442 */
1437 if (!tchunk->tsn_gap_acked) { 1443 if (transmitted_queue != &q->retransmit &&
1444 !tchunk->tsn_gap_acked) {
1438 if (tchunk->transport) 1445 if (tchunk->transport)
1439 tchunk->transport->flight_size -= 1446 tchunk->transport->flight_size -=
1440 sctp_data_size(tchunk); 1447 sctp_data_size(tchunk);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 014847e25648..039fcb618c34 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -85,7 +85,7 @@
85static int sctp_writeable(struct sock *sk); 85static int sctp_writeable(struct sock *sk);
86static void sctp_wfree(struct sk_buff *skb); 86static void sctp_wfree(struct sk_buff *skb);
87static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 87static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
88 size_t msg_len, struct sock **orig_sk); 88 size_t msg_len);
89static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 89static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
90static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 90static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
91static int sctp_wait_for_accept(struct sock *sk, long timeo); 91static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -335,16 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
335 if (len < sizeof (struct sockaddr)) 335 if (len < sizeof (struct sockaddr))
336 return NULL; 336 return NULL;
337 337
338 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
339 return NULL;
340
338 /* V4 mapped address are really of AF_INET family */ 341 /* V4 mapped address are really of AF_INET family */
339 if (addr->sa.sa_family == AF_INET6 && 342 if (addr->sa.sa_family == AF_INET6 &&
340 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 343 ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
341 if (!opt->pf->af_supported(AF_INET, opt)) 344 !opt->pf->af_supported(AF_INET, opt))
342 return NULL; 345 return NULL;
343 } else {
344 /* Does this PF support this AF? */
345 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
346 return NULL;
347 }
348 346
349 /* If we get this far, af is valid. */ 347 /* If we get this far, af is valid. */
350 af = sctp_get_af_specific(addr->sa.sa_family); 348 af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1883,8 +1881,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1883 */ 1881 */
1884 if (sinit) { 1882 if (sinit) {
1885 if (sinit->sinit_num_ostreams) { 1883 if (sinit->sinit_num_ostreams) {
1886 asoc->c.sinit_num_ostreams = 1884 __u16 outcnt = sinit->sinit_num_ostreams;
1887 sinit->sinit_num_ostreams; 1885
1886 asoc->c.sinit_num_ostreams = outcnt;
1887 /* outcnt has been changed, so re-init stream */
1888 err = sctp_stream_init(&asoc->stream, outcnt, 0,
1889 GFP_KERNEL);
1890 if (err)
1891 goto out_free;
1888 } 1892 }
1889 if (sinit->sinit_max_instreams) { 1893 if (sinit->sinit_max_instreams) {
1890 asoc->c.sinit_max_instreams = 1894 asoc->c.sinit_max_instreams =
@@ -1971,7 +1975,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1971 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1975 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1972 if (!sctp_wspace(asoc)) { 1976 if (!sctp_wspace(asoc)) {
1973 /* sk can be changed by peel off when waiting for buf. */ 1977 /* sk can be changed by peel off when waiting for buf. */
1974 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); 1978 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1975 if (err) { 1979 if (err) {
1976 if (err == -ESRCH) { 1980 if (err == -ESRCH) {
1977 /* asoc is already dead. */ 1981 /* asoc is already dead. */
@@ -2277,7 +2281,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2277 2281
2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2282 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2279 event = sctp_ulpevent_make_sender_dry_event(asoc, 2283 event = sctp_ulpevent_make_sender_dry_event(asoc,
2280 GFP_ATOMIC); 2284 GFP_USER | __GFP_NOWARN);
2281 if (!event) 2285 if (!event)
2282 return -ENOMEM; 2286 return -ENOMEM;
2283 2287
@@ -3498,6 +3502,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3498 3502
3499 if (optlen < sizeof(struct sctp_hmacalgo)) 3503 if (optlen < sizeof(struct sctp_hmacalgo))
3500 return -EINVAL; 3504 return -EINVAL;
3505 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3506 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3501 3507
3502 hmacs = memdup_user(optval, optlen); 3508 hmacs = memdup_user(optval, optlen);
3503 if (IS_ERR(hmacs)) 3509 if (IS_ERR(hmacs))
@@ -3536,6 +3542,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3536 3542
3537 if (optlen <= sizeof(struct sctp_authkey)) 3543 if (optlen <= sizeof(struct sctp_authkey))
3538 return -EINVAL; 3544 return -EINVAL;
3545 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3546 * this.
3547 */
3548 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3549 sizeof(struct sctp_authkey));
3539 3550
3540 authkey = memdup_user(optval, optlen); 3551 authkey = memdup_user(optval, optlen);
3541 if (IS_ERR(authkey)) 3552 if (IS_ERR(authkey))
@@ -3891,13 +3902,20 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
3891 struct sctp_association *asoc; 3902 struct sctp_association *asoc;
3892 int retval = -EINVAL; 3903 int retval = -EINVAL;
3893 3904
3894 if (optlen < sizeof(struct sctp_reset_streams)) 3905 if (optlen < sizeof(*params))
3895 return -EINVAL; 3906 return -EINVAL;
3907 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3908 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3909 sizeof(__u16) * sizeof(*params));
3896 3910
3897 params = memdup_user(optval, optlen); 3911 params = memdup_user(optval, optlen);
3898 if (IS_ERR(params)) 3912 if (IS_ERR(params))
3899 return PTR_ERR(params); 3913 return PTR_ERR(params);
3900 3914
3915 if (params->srs_number_streams * sizeof(__u16) >
3916 optlen - sizeof(*params))
3917 goto out;
3918
3901 asoc = sctp_id2assoc(sk, params->srs_assoc_id); 3919 asoc = sctp_id2assoc(sk, params->srs_assoc_id);
3902 if (!asoc) 3920 if (!asoc)
3903 goto out; 3921 goto out;
@@ -4494,7 +4512,7 @@ static int sctp_init_sock(struct sock *sk)
4494 SCTP_DBG_OBJCNT_INC(sock); 4512 SCTP_DBG_OBJCNT_INC(sock);
4495 4513
4496 local_bh_disable(); 4514 local_bh_disable();
4497 percpu_counter_inc(&sctp_sockets_allocated); 4515 sk_sockets_allocated_inc(sk);
4498 sock_prot_inuse_add(net, sk->sk_prot, 1); 4516 sock_prot_inuse_add(net, sk->sk_prot, 1);
4499 4517
4500 /* Nothing can fail after this block, otherwise 4518 /* Nothing can fail after this block, otherwise
@@ -4538,7 +4556,7 @@ static void sctp_destroy_sock(struct sock *sk)
4538 } 4556 }
4539 sctp_endpoint_free(sp->ep); 4557 sctp_endpoint_free(sp->ep);
4540 local_bh_disable(); 4558 local_bh_disable();
4541 percpu_counter_dec(&sctp_sockets_allocated); 4559 sk_sockets_allocated_dec(sk);
4542 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4560 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4543 local_bh_enable(); 4561 local_bh_enable();
4544} 4562}
@@ -5011,7 +5029,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
5011 len = sizeof(int); 5029 len = sizeof(int);
5012 if (put_user(len, optlen)) 5030 if (put_user(len, optlen))
5013 return -EFAULT; 5031 return -EFAULT;
5014 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 5032 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
5015 return -EFAULT; 5033 return -EFAULT;
5016 return 0; 5034 return 0;
5017} 5035}
@@ -5080,7 +5098,6 @@ static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *p
5080 *newfile = sock_alloc_file(newsock, 0, NULL); 5098 *newfile = sock_alloc_file(newsock, 0, NULL);
5081 if (IS_ERR(*newfile)) { 5099 if (IS_ERR(*newfile)) {
5082 put_unused_fd(retval); 5100 put_unused_fd(retval);
5083 sock_release(newsock);
5084 retval = PTR_ERR(*newfile); 5101 retval = PTR_ERR(*newfile);
5085 *newfile = NULL; 5102 *newfile = NULL;
5086 return retval; 5103 return retval;
@@ -5642,6 +5659,9 @@ copy_getaddrs:
5642 err = -EFAULT; 5659 err = -EFAULT;
5643 goto out; 5660 goto out;
5644 } 5661 }
5662 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5663 * but we can't change it anymore.
5664 */
5645 if (put_user(bytes_copied, optlen)) 5665 if (put_user(bytes_copied, optlen))
5646 err = -EFAULT; 5666 err = -EFAULT;
5647out: 5667out:
@@ -6078,7 +6098,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6078 params.assoc_id = 0; 6098 params.assoc_id = 0;
6079 } else if (len >= sizeof(struct sctp_assoc_value)) { 6099 } else if (len >= sizeof(struct sctp_assoc_value)) {
6080 len = sizeof(struct sctp_assoc_value); 6100 len = sizeof(struct sctp_assoc_value);
6081 if (copy_from_user(&params, optval, sizeof(params))) 6101 if (copy_from_user(&params, optval, len))
6082 return -EFAULT; 6102 return -EFAULT;
6083 } else 6103 } else
6084 return -EINVAL; 6104 return -EINVAL;
@@ -6248,7 +6268,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6248 6268
6249 if (len < sizeof(struct sctp_authkeyid)) 6269 if (len < sizeof(struct sctp_authkeyid))
6250 return -EINVAL; 6270 return -EINVAL;
6251 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6271
6272 len = sizeof(struct sctp_authkeyid);
6273 if (copy_from_user(&val, optval, len))
6252 return -EFAULT; 6274 return -EFAULT;
6253 6275
6254 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6276 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6260,7 +6282,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6260 else 6282 else
6261 val.scact_keynumber = ep->active_key_id; 6283 val.scact_keynumber = ep->active_key_id;
6262 6284
6263 len = sizeof(struct sctp_authkeyid);
6264 if (put_user(len, optlen)) 6285 if (put_user(len, optlen))
6265 return -EFAULT; 6286 return -EFAULT;
6266 if (copy_to_user(optval, &val, len)) 6287 if (copy_to_user(optval, &val, len))
@@ -6286,7 +6307,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6286 if (len < sizeof(struct sctp_authchunks)) 6307 if (len < sizeof(struct sctp_authchunks))
6287 return -EINVAL; 6308 return -EINVAL;
6288 6309
6289 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6310 if (copy_from_user(&val, optval, sizeof(val)))
6290 return -EFAULT; 6311 return -EFAULT;
6291 6312
6292 to = p->gauth_chunks; 6313 to = p->gauth_chunks;
@@ -6331,7 +6352,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6331 if (len < sizeof(struct sctp_authchunks)) 6352 if (len < sizeof(struct sctp_authchunks))
6332 return -EINVAL; 6353 return -EINVAL;
6333 6354
6334 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6355 if (copy_from_user(&val, optval, sizeof(val)))
6335 return -EFAULT; 6356 return -EFAULT;
6336 6357
6337 to = p->gauth_chunks; 6358 to = p->gauth_chunks;
@@ -7999,12 +8020,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
7999 8020
8000/* Helper function to wait for space in the sndbuf. */ 8021/* Helper function to wait for space in the sndbuf. */
8001static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 8022static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
8002 size_t msg_len, struct sock **orig_sk) 8023 size_t msg_len)
8003{ 8024{
8004 struct sock *sk = asoc->base.sk; 8025 struct sock *sk = asoc->base.sk;
8005 int err = 0;
8006 long current_timeo = *timeo_p; 8026 long current_timeo = *timeo_p;
8007 DEFINE_WAIT(wait); 8027 DEFINE_WAIT(wait);
8028 int err = 0;
8008 8029
8009 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 8030 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
8010 *timeo_p, msg_len); 8031 *timeo_p, msg_len);
@@ -8033,17 +8054,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
8033 release_sock(sk); 8054 release_sock(sk);
8034 current_timeo = schedule_timeout(current_timeo); 8055 current_timeo = schedule_timeout(current_timeo);
8035 lock_sock(sk); 8056 lock_sock(sk);
8036 if (sk != asoc->base.sk) { 8057 if (sk != asoc->base.sk)
8037 release_sock(sk); 8058 goto do_error;
8038 sk = asoc->base.sk;
8039 lock_sock(sk);
8040 }
8041 8059
8042 *timeo_p = current_timeo; 8060 *timeo_p = current_timeo;
8043 } 8061 }
8044 8062
8045out: 8063out:
8046 *orig_sk = sk;
8047 finish_wait(&asoc->wait, &wait); 8064 finish_wait(&asoc->wait, &wait);
8048 8065
8049 /* Release the association's refcnt. */ 8066 /* Release the association's refcnt. */
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 76ea66be0bbe..524dfeb94c41 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
156 sctp_stream_outq_migrate(stream, NULL, outcnt); 156 sctp_stream_outq_migrate(stream, NULL, outcnt);
157 sched->sched_all(stream); 157 sched->sched_all(stream);
158 158
159 i = sctp_stream_alloc_out(stream, outcnt, gfp); 159 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
160 if (i) 160 if (ret)
161 return i; 161 goto out;
162 162
163 stream->outcnt = outcnt; 163 stream->outcnt = outcnt;
164 for (i = 0; i < stream->outcnt; i++) 164 for (i = 0; i < stream->outcnt; i++)
@@ -170,19 +170,17 @@ in:
170 if (!incnt) 170 if (!incnt)
171 goto out; 171 goto out;
172 172
173 i = sctp_stream_alloc_in(stream, incnt, gfp); 173 ret = sctp_stream_alloc_in(stream, incnt, gfp);
174 if (i) { 174 if (ret) {
175 ret = -ENOMEM; 175 sched->free(stream);
176 goto free; 176 kfree(stream->out);
177 stream->out = NULL;
178 stream->outcnt = 0;
179 goto out;
177 } 180 }
178 181
179 stream->incnt = incnt; 182 stream->incnt = incnt;
180 goto out;
181 183
182free:
183 sched->free(stream);
184 kfree(stream->out);
185 stream->out = NULL;
186out: 184out:
187 return ret; 185 return ret;
188} 186}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1e5a22430cf5..47f82bd794d9 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
249} 249}
250 250
251void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 251bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
252{ 252{
253 struct dst_entry *dst = sctp_transport_dst_check(t); 253 struct dst_entry *dst = sctp_transport_dst_check(t);
254 bool change = true;
254 255
255 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 256 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
256 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 257 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
257 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); 258 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
258 /* Use default minimum segment size and disable 259 /* Use default minimum segment instead */
259 * pmtu discovery on this transport. 260 pmtu = SCTP_DEFAULT_MINSEGMENT;
260 */
261 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
262 } else {
263 t->pathmtu = pmtu;
264 } 261 }
262 pmtu = SCTP_TRUNC4(pmtu);
265 263
266 if (dst) { 264 if (dst) {
267 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); 265 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
268 dst = sctp_transport_dst_check(t); 266 dst = sctp_transport_dst_check(t);
269 } 267 }
270 268
271 if (!dst) 269 if (!dst) {
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); 270 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
271 dst = t->dst;
272 }
273
274 if (dst) {
275 /* Re-fetch, as under layers may have a higher minimum size */
276 pmtu = SCTP_TRUNC4(dst_mtu(dst));
277 change = t->pathmtu != pmtu;
278 }
279 t->pathmtu = pmtu;
280
281 return change;
273} 282}
274 283
275/* Caches the dst entry and source address for a transport's destination 284/* Caches the dst entry and source address for a transport's destination
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index a71be33f3afe..e36ec5dd64c6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1084void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 1084void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1085 gfp_t gfp) 1085 gfp_t gfp)
1086{ 1086{
1087 struct sctp_association *asoc; 1087 struct sctp_association *asoc = ulpq->asoc;
1088 __u16 needed, freed; 1088 __u32 freed = 0;
1089 1089 __u16 needed;
1090 asoc = ulpq->asoc;
1091 1090
1092 if (chunk) { 1091 needed = ntohs(chunk->chunk_hdr->length) -
1093 needed = ntohs(chunk->chunk_hdr->length); 1092 sizeof(struct sctp_data_chunk);
1094 needed -= sizeof(struct sctp_data_chunk);
1095 } else
1096 needed = SCTP_DEFAULT_MAXWINDOW;
1097
1098 freed = 0;
1099 1093
1100 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 1094 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1101 freed = sctp_ulpq_renege_order(ulpq, needed); 1095 freed = sctp_ulpq_renege_order(ulpq, needed);
1102 if (freed < needed) { 1096 if (freed < needed)
1103 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); 1097 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1104 }
1105 } 1098 }
1106 /* If able to free enough room, accept this chunk. */ 1099 /* If able to free enough room, accept this chunk. */
1107 if (chunk && (freed >= needed)) { 1100 if (freed >= needed) {
1108 int retval; 1101 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1109 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1110 /* 1102 /*
1111 * Enter partial delivery if chunk has not been 1103 * Enter partial delivery if chunk has not been
1112 * delivered; otherwise, drain the reassembly queue. 1104 * delivered; otherwise, drain the reassembly queue.
diff --git a/net/socket.c b/net/socket.c
index 42d8e9c9ccd5..6f05d5c4bf30 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -406,8 +406,10 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
406 name.len = strlen(name.name); 406 name.len = strlen(name.name);
407 } 407 }
408 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); 408 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
409 if (unlikely(!path.dentry)) 409 if (unlikely(!path.dentry)) {
410 sock_release(sock);
410 return ERR_PTR(-ENOMEM); 411 return ERR_PTR(-ENOMEM);
412 }
411 path.mnt = mntget(sock_mnt); 413 path.mnt = mntget(sock_mnt);
412 414
413 d_instantiate(path.dentry, SOCK_INODE(sock)); 415 d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -415,9 +417,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
415 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, 417 file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
416 &socket_file_ops); 418 &socket_file_ops);
417 if (IS_ERR(file)) { 419 if (IS_ERR(file)) {
418 /* drop dentry, keep inode */ 420 /* drop dentry, keep inode for a bit */
419 ihold(d_inode(path.dentry)); 421 ihold(d_inode(path.dentry));
420 path_put(&path); 422 path_put(&path);
423 /* ... and now kill it properly */
424 sock_release(sock);
421 return file; 425 return file;
422 } 426 }
423 427
@@ -432,8 +436,10 @@ static int sock_map_fd(struct socket *sock, int flags)
432{ 436{
433 struct file *newfile; 437 struct file *newfile;
434 int fd = get_unused_fd_flags(flags); 438 int fd = get_unused_fd_flags(flags);
435 if (unlikely(fd < 0)) 439 if (unlikely(fd < 0)) {
440 sock_release(sock);
436 return fd; 441 return fd;
442 }
437 443
438 newfile = sock_alloc_file(sock, flags, NULL); 444 newfile = sock_alloc_file(sock, flags, NULL);
439 if (likely(!IS_ERR(newfile))) { 445 if (likely(!IS_ERR(newfile))) {
@@ -1330,19 +1336,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
1330 1336
1331 retval = sock_create(family, type, protocol, &sock); 1337 retval = sock_create(family, type, protocol, &sock);
1332 if (retval < 0) 1338 if (retval < 0)
1333 goto out; 1339 return retval;
1334
1335 retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
1336 if (retval < 0)
1337 goto out_release;
1338
1339out:
1340 /* It may be already another descriptor 8) Not kernel problem. */
1341 return retval;
1342 1340
1343out_release: 1341 return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
1344 sock_release(sock);
1345 return retval;
1346} 1342}
1347 1343
1348/* 1344/*
@@ -1366,87 +1362,72 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
1366 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1362 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1367 1363
1368 /* 1364 /*
1365 * reserve descriptors and make sure we won't fail
1366 * to return them to userland.
1367 */
1368 fd1 = get_unused_fd_flags(flags);
1369 if (unlikely(fd1 < 0))
1370 return fd1;
1371
1372 fd2 = get_unused_fd_flags(flags);
1373 if (unlikely(fd2 < 0)) {
1374 put_unused_fd(fd1);
1375 return fd2;
1376 }
1377
1378 err = put_user(fd1, &usockvec[0]);
1379 if (err)
1380 goto out;
1381
1382 err = put_user(fd2, &usockvec[1]);
1383 if (err)
1384 goto out;
1385
1386 /*
1369 * Obtain the first socket and check if the underlying protocol 1387 * Obtain the first socket and check if the underlying protocol
1370 * supports the socketpair call. 1388 * supports the socketpair call.
1371 */ 1389 */
1372 1390
1373 err = sock_create(family, type, protocol, &sock1); 1391 err = sock_create(family, type, protocol, &sock1);
1374 if (err < 0) 1392 if (unlikely(err < 0))
1375 goto out; 1393 goto out;
1376 1394
1377 err = sock_create(family, type, protocol, &sock2); 1395 err = sock_create(family, type, protocol, &sock2);
1378 if (err < 0) 1396 if (unlikely(err < 0)) {
1379 goto out_release_1; 1397 sock_release(sock1);
1380 1398 goto out;
1381 err = sock1->ops->socketpair(sock1, sock2);
1382 if (err < 0)
1383 goto out_release_both;
1384
1385 fd1 = get_unused_fd_flags(flags);
1386 if (unlikely(fd1 < 0)) {
1387 err = fd1;
1388 goto out_release_both;
1389 } 1399 }
1390 1400
1391 fd2 = get_unused_fd_flags(flags); 1401 err = sock1->ops->socketpair(sock1, sock2);
1392 if (unlikely(fd2 < 0)) { 1402 if (unlikely(err < 0)) {
1393 err = fd2; 1403 sock_release(sock2);
1394 goto out_put_unused_1; 1404 sock_release(sock1);
1405 goto out;
1395 } 1406 }
1396 1407
1397 newfile1 = sock_alloc_file(sock1, flags, NULL); 1408 newfile1 = sock_alloc_file(sock1, flags, NULL);
1398 if (IS_ERR(newfile1)) { 1409 if (IS_ERR(newfile1)) {
1399 err = PTR_ERR(newfile1); 1410 err = PTR_ERR(newfile1);
1400 goto out_put_unused_both; 1411 sock_release(sock2);
1412 goto out;
1401 } 1413 }
1402 1414
1403 newfile2 = sock_alloc_file(sock2, flags, NULL); 1415 newfile2 = sock_alloc_file(sock2, flags, NULL);
1404 if (IS_ERR(newfile2)) { 1416 if (IS_ERR(newfile2)) {
1405 err = PTR_ERR(newfile2); 1417 err = PTR_ERR(newfile2);
1406 goto out_fput_1; 1418 fput(newfile1);
1419 goto out;
1407 } 1420 }
1408 1421
1409 err = put_user(fd1, &usockvec[0]);
1410 if (err)
1411 goto out_fput_both;
1412
1413 err = put_user(fd2, &usockvec[1]);
1414 if (err)
1415 goto out_fput_both;
1416
1417 audit_fd_pair(fd1, fd2); 1422 audit_fd_pair(fd1, fd2);
1418 1423
1419 fd_install(fd1, newfile1); 1424 fd_install(fd1, newfile1);
1420 fd_install(fd2, newfile2); 1425 fd_install(fd2, newfile2);
1421 /* fd1 and fd2 may be already another descriptors.
1422 * Not kernel problem.
1423 */
1424
1425 return 0; 1426 return 0;
1426 1427
1427out_fput_both: 1428out:
1428 fput(newfile2);
1429 fput(newfile1);
1430 put_unused_fd(fd2);
1431 put_unused_fd(fd1);
1432 goto out;
1433
1434out_fput_1:
1435 fput(newfile1);
1436 put_unused_fd(fd2);
1437 put_unused_fd(fd1);
1438 sock_release(sock2);
1439 goto out;
1440
1441out_put_unused_both:
1442 put_unused_fd(fd2); 1429 put_unused_fd(fd2);
1443out_put_unused_1:
1444 put_unused_fd(fd1); 1430 put_unused_fd(fd1);
1445out_release_both:
1446 sock_release(sock2);
1447out_release_1:
1448 sock_release(sock1);
1449out:
1450 return err; 1431 return err;
1451} 1432}
1452 1433
@@ -1562,7 +1543,6 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1562 if (IS_ERR(newfile)) { 1543 if (IS_ERR(newfile)) {
1563 err = PTR_ERR(newfile); 1544 err = PTR_ERR(newfile);
1564 put_unused_fd(newfd); 1545 put_unused_fd(newfd);
1565 sock_release(newsock);
1566 goto out_put; 1546 goto out_put;
1567 } 1547 }
1568 1548
@@ -2641,6 +2621,15 @@ out_fs:
2641 2621
2642core_initcall(sock_init); /* early initcall */ 2622core_initcall(sock_init); /* early initcall */
2643 2623
2624static int __init jit_init(void)
2625{
2626#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2627 bpf_jit_enable = 1;
2628#endif
2629 return 0;
2630}
2631pure_initcall(jit_init);
2632
2644#ifdef CONFIG_PROC_FS 2633#ifdef CONFIG_PROC_FS
2645void socket_seq_show(struct seq_file *seq) 2634void socket_seq_show(struct seq_file *seq)
2646{ 2635{
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index c5fda15ba319..1fdab5c4eda8 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
401 * allows a thread in BH context to safely check if the process 401 * allows a thread in BH context to safely check if the process
402 * lock is held. In this case, if the lock is held, queue work. 402 * lock is held. In this case, if the lock is held, queue work.
403 */ 403 */
404 if (sock_owned_by_user(strp->sk)) { 404 if (sock_owned_by_user_nocheck(strp->sk)) {
405 queue_work(strp_wq, &strp->work); 405 queue_work(strp_wq, &strp->work);
406 return; 406 return;
407 } 407 }
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index c4778cae58ef..444380f968f1 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
231 goto out_free_groups; 231 goto out_free_groups;
232 creds->cr_group_info->gid[i] = kgid; 232 creds->cr_group_info->gid[i] = kgid;
233 } 233 }
234 groups_sort(creds->cr_group_info);
234 235
235 return 0; 236 return 0;
236out_free_groups: 237out_free_groups:
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 5dd4e6c9fef2..26531193fce4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
481 goto out; 481 goto out;
482 rsci.cred.cr_group_info->gid[i] = kgid; 482 rsci.cred.cr_group_info->gid[i] = kgid;
483 } 483 }
484 groups_sort(rsci.cred.cr_group_info);
484 485
485 /* mech name */ 486 /* mech name */
486 len = qword_get(&mesg, buf, mlen); 487 len = qword_get(&mesg, buf, mlen);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 740b67d5a733..af7f28fb8102 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
520 ug.gi->gid[i] = kgid; 520 ug.gi->gid[i] = kgid;
521 } 521 }
522 522
523 groups_sort(ug.gi);
523 ugp = unix_gid_lookup(cd, uid); 524 ugp = unix_gid_lookup(cd, uid);
524 if (ugp) { 525 if (ugp) {
525 struct cache_head *ch; 526 struct cache_head *ch;
@@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
819 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); 820 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
820 cred->cr_group_info->gid[i] = kgid; 821 cred->cr_group_info->gid[i] = kgid;
821 } 822 }
823 groups_sort(cred->cr_group_info);
822 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 824 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
823 *authp = rpc_autherr_badverf; 825 *authp = rpc_autherr_badverf;
824 return SVC_DENIED; 826 return SVC_DENIED;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 333b9d697ae5..33b74fd84051 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task)
1001{ 1001{
1002 struct rpc_rqst *req = task->tk_rqstp; 1002 struct rpc_rqst *req = task->tk_rqstp;
1003 struct rpc_xprt *xprt = req->rq_xprt; 1003 struct rpc_xprt *xprt = req->rq_xprt;
1004 unsigned int connect_cookie;
1004 int status, numreqs; 1005 int status, numreqs;
1005 1006
1006 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 1007 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task)
1024 } else if (!req->rq_bytes_sent) 1025 } else if (!req->rq_bytes_sent)
1025 return; 1026 return;
1026 1027
1028 connect_cookie = xprt->connect_cookie;
1027 req->rq_xtime = ktime_get(); 1029 req->rq_xtime = ktime_get();
1028 status = xprt->ops->send_request(task); 1030 status = xprt->ops->send_request(task);
1029 trace_xprt_transmit(xprt, req->rq_xid, status); 1031 trace_xprt_transmit(xprt, req->rq_xid, status);
@@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task)
1047 xprt->stat.bklog_u += xprt->backlog.qlen; 1049 xprt->stat.bklog_u += xprt->backlog.qlen;
1048 xprt->stat.sending_u += xprt->sending.qlen; 1050 xprt->stat.sending_u += xprt->sending.qlen;
1049 xprt->stat.pending_u += xprt->pending.qlen; 1051 xprt->stat.pending_u += xprt->pending.qlen;
1052 spin_unlock_bh(&xprt->transport_lock);
1050 1053
1051 /* Don't race with disconnect */ 1054 req->rq_connect_cookie = connect_cookie;
1052 if (!xprt_connected(xprt)) 1055 if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
1053 task->tk_status = -ENOTCONN;
1054 else {
1055 /* 1056 /*
1056 * Sleep on the pending queue since 1057 * Sleep on the pending queue if we're expecting a reply.
1057 * we're expecting a reply. 1058 * The spinlock ensures atomicity between the test of
1059 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1058 */ 1060 */
1059 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) 1061 spin_lock(&xprt->recv_lock);
1062 if (!req->rq_reply_bytes_recvd) {
1060 rpc_sleep_on(&xprt->pending, task, xprt_timer); 1063 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1061 req->rq_connect_cookie = xprt->connect_cookie; 1064 /*
1065 * Send an extra queue wakeup call if the
1066 * connection was dropped in case the call to
1067 * rpc_sleep_on() raced.
1068 */
1069 if (!xprt_connected(xprt))
1070 xprt_wake_pending_tasks(xprt, -ENOTCONN);
1071 }
1072 spin_unlock(&xprt->recv_lock);
1062 } 1073 }
1063 spin_unlock_bh(&xprt->transport_lock);
1064} 1074}
1065 1075
1066static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1076static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index ed34dc0f144c..a3f2ab283aeb 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1408,11 +1408,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1408 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", 1408 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
1409 __func__, rep, req, be32_to_cpu(rep->rr_xid)); 1409 __func__, rep, req, be32_to_cpu(rep->rr_xid));
1410 1410
1411 if (list_empty(&req->rl_registered) && 1411 queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
1412 !test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags))
1413 rpcrdma_complete_rqst(rep);
1414 else
1415 queue_work(rpcrdma_receive_wq, &rep->rr_work);
1416 return; 1412 return;
1417 1413
1418out_badstatus: 1414out_badstatus:
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 646c24494ea7..6ee1ad8978f3 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -52,6 +52,7 @@
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/seq_file.h> 53#include <linux/seq_file.h>
54#include <linux/sunrpc/addr.h> 54#include <linux/sunrpc/addr.h>
55#include <linux/smp.h>
55 56
56#include "xprt_rdma.h" 57#include "xprt_rdma.h"
57 58
@@ -656,6 +657,7 @@ xprt_rdma_allocate(struct rpc_task *task)
656 task->tk_pid, __func__, rqst->rq_callsize, 657 task->tk_pid, __func__, rqst->rq_callsize,
657 rqst->rq_rcvsize, req); 658 rqst->rq_rcvsize, req);
658 659
660 req->rl_cpu = smp_processor_id();
659 req->rl_connect_cookie = 0; /* our reserved value */ 661 req->rl_connect_cookie = 0; /* our reserved value */
660 rpcrdma_set_xprtdata(rqst, req); 662 rpcrdma_set_xprtdata(rqst, req);
661 rqst->rq_buffer = req->rl_sendbuf->rg_base; 663 rqst->rq_buffer = req->rl_sendbuf->rg_base;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 710b3f77db82..8607c029c0dd 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -83,7 +83,7 @@ rpcrdma_alloc_wq(void)
83 struct workqueue_struct *recv_wq; 83 struct workqueue_struct *recv_wq;
84 84
85 recv_wq = alloc_workqueue("xprtrdma_receive", 85 recv_wq = alloc_workqueue("xprtrdma_receive",
86 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI, 86 WQ_MEM_RECLAIM | WQ_HIGHPRI,
87 0); 87 0);
88 if (!recv_wq) 88 if (!recv_wq)
89 return -ENOMEM; 89 return -ENOMEM;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 51686d9eac5f..1342f743f1c4 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -342,6 +342,7 @@ enum {
342struct rpcrdma_buffer; 342struct rpcrdma_buffer;
343struct rpcrdma_req { 343struct rpcrdma_req {
344 struct list_head rl_list; 344 struct list_head rl_list;
345 int rl_cpu;
345 unsigned int rl_connect_cookie; 346 unsigned int rl_connect_cookie;
346 struct rpcrdma_buffer *rl_buffer; 347 struct rpcrdma_buffer *rl_buffer;
347 struct rpcrdma_rep *rl_reply; 348 struct rpcrdma_rep *rl_reply;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 47ec121574ce..c8001471da6c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -324,6 +324,7 @@ restart:
324 if (res) { 324 if (res) {
325 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 325 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
326 name, -res); 326 name, -res);
327 kfree(b);
327 return -EINVAL; 328 return -EINVAL;
328 } 329 }
329 330
@@ -347,8 +348,10 @@ restart:
347 if (skb) 348 if (skb)
348 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); 349 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
349 350
350 if (tipc_mon_create(net, bearer_id)) 351 if (tipc_mon_create(net, bearer_id)) {
352 bearer_disable(net, b);
351 return -ENOMEM; 353 return -ENOMEM;
354 }
352 355
353 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 356 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
354 name, 357 name,
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 95fec2c057d6..5f4ffae807ee 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
109static void tipc_group_decr_active(struct tipc_group *grp, 109static void tipc_group_decr_active(struct tipc_group *grp,
110 struct tipc_member *m) 110 struct tipc_member *m)
111{ 111{
112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING) 112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
113 m->state == MBR_REMITTED)
113 grp->active_cnt--; 114 grp->active_cnt--;
114} 115}
115 116
@@ -351,8 +352,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
351 if (m->window >= ADV_IDLE) 352 if (m->window >= ADV_IDLE)
352 return; 353 return;
353 354
354 if (!list_empty(&m->congested)) 355 list_del_init(&m->congested);
355 return;
356 356
357 /* Sort member into congested members' list */ 357 /* Sort member into congested members' list */
358 list_for_each_entry_safe(_m, tmp, &grp->congested, congested) { 358 list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@@ -369,18 +369,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
369 u16 prev = grp->bc_snd_nxt - 1; 369 u16 prev = grp->bc_snd_nxt - 1;
370 struct tipc_member *m; 370 struct tipc_member *m;
371 struct rb_node *n; 371 struct rb_node *n;
372 u16 ackers = 0;
372 373
373 for (n = rb_first(&grp->members); n; n = rb_next(n)) { 374 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
374 m = container_of(n, struct tipc_member, tree_node); 375 m = container_of(n, struct tipc_member, tree_node);
375 if (tipc_group_is_enabled(m)) { 376 if (tipc_group_is_enabled(m)) {
376 tipc_group_update_member(m, len); 377 tipc_group_update_member(m, len);
377 m->bc_acked = prev; 378 m->bc_acked = prev;
379 ackers++;
378 } 380 }
379 } 381 }
380 382
381 /* Mark number of acknowledges to expect, if any */ 383 /* Mark number of acknowledges to expect, if any */
382 if (ack) 384 if (ack)
383 grp->bc_ackers = grp->member_cnt; 385 grp->bc_ackers = ackers;
384 grp->bc_snd_nxt++; 386 grp->bc_snd_nxt++;
385} 387}
386 388
@@ -561,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
561 int max_active = grp->max_active; 563 int max_active = grp->max_active;
562 int reclaim_limit = max_active * 3 / 4; 564 int reclaim_limit = max_active * 3 / 4;
563 int active_cnt = grp->active_cnt; 565 int active_cnt = grp->active_cnt;
564 struct tipc_member *m, *rm; 566 struct tipc_member *m, *rm, *pm;
565 567
566 m = tipc_group_find_member(grp, node, port); 568 m = tipc_group_find_member(grp, node, port);
567 if (!m) 569 if (!m)
@@ -604,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
604 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n"); 606 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
605 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 607 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
606 } 608 }
609 grp->active_cnt--;
610 list_del_init(&m->list);
611 if (list_empty(&grp->pending))
612 return;
613
614 /* Set oldest pending member to active and advertise */
615 pm = list_first_entry(&grp->pending, struct tipc_member, list);
616 pm->state = MBR_ACTIVE;
617 list_move_tail(&pm->list, &grp->active);
618 grp->active_cnt++;
619 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
607 break; 620 break;
608 case MBR_RECLAIMING: 621 case MBR_RECLAIMING:
609 case MBR_DISCOVERED: 622 case MBR_DISCOVERED:
@@ -648,6 +661,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
648 } else if (mtyp == GRP_REMIT_MSG) { 661 } else if (mtyp == GRP_REMIT_MSG) {
649 msg_set_grp_remitted(hdr, m->window); 662 msg_set_grp_remitted(hdr, m->window);
650 } 663 }
664 msg_set_dest_droppable(hdr, true);
651 __skb_queue_tail(xmitq, skb); 665 __skb_queue_tail(xmitq, skb);
652} 666}
653 667
@@ -689,15 +703,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
689 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); 703 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
690 __skb_queue_tail(inputq, m->event_msg); 704 __skb_queue_tail(inputq, m->event_msg);
691 } 705 }
692 if (m->window < ADV_IDLE) 706 list_del_init(&m->congested);
693 tipc_group_update_member(m, 0); 707 tipc_group_update_member(m, 0);
694 else
695 list_del_init(&m->congested);
696 return; 708 return;
697 case GRP_LEAVE_MSG: 709 case GRP_LEAVE_MSG:
698 if (!m) 710 if (!m)
699 return; 711 return;
700 m->bc_syncpt = msg_grp_bc_syncpt(hdr); 712 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
713 list_del_init(&m->list);
714 list_del_init(&m->congested);
715 *usr_wakeup = true;
701 716
702 /* Wait until WITHDRAW event is received */ 717 /* Wait until WITHDRAW event is received */
703 if (m->state != MBR_LEAVING) { 718 if (m->state != MBR_LEAVING) {
@@ -709,8 +724,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
709 ehdr = buf_msg(m->event_msg); 724 ehdr = buf_msg(m->event_msg);
710 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); 725 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
711 __skb_queue_tail(inputq, m->event_msg); 726 __skb_queue_tail(inputq, m->event_msg);
712 *usr_wakeup = true;
713 list_del_init(&m->congested);
714 return; 727 return;
715 case GRP_ADV_MSG: 728 case GRP_ADV_MSG:
716 if (!m) 729 if (!m)
@@ -741,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
741 if (!m || m->state != MBR_RECLAIMING) 754 if (!m || m->state != MBR_RECLAIMING)
742 return; 755 return;
743 756
744 list_del_init(&m->list);
745 grp->active_cnt--;
746 remitted = msg_grp_remitted(hdr); 757 remitted = msg_grp_remitted(hdr);
747 758
748 /* Messages preceding the REMIT still in receive queue */ 759 /* Messages preceding the REMIT still in receive queue */
749 if (m->advertised > remitted) { 760 if (m->advertised > remitted) {
750 m->state = MBR_REMITTED; 761 m->state = MBR_REMITTED;
751 in_flight = m->advertised - remitted; 762 in_flight = m->advertised - remitted;
763 m->advertised = ADV_IDLE + in_flight;
764 return;
752 } 765 }
753 /* All messages preceding the REMIT have been read */ 766 /* All messages preceding the REMIT have been read */
754 if (m->advertised <= remitted) { 767 if (m->advertised <= remitted) {
@@ -760,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
760 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 773 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
761 774
762 m->advertised = ADV_IDLE + in_flight; 775 m->advertised = ADV_IDLE + in_flight;
776 grp->active_cnt--;
777 list_del_init(&m->list);
763 778
764 /* Set oldest pending member to active and advertise */ 779 /* Set oldest pending member to active and advertise */
765 if (list_empty(&grp->pending)) 780 if (list_empty(&grp->pending))
@@ -849,19 +864,29 @@ void tipc_group_member_evt(struct tipc_group *grp,
849 *usr_wakeup = true; 864 *usr_wakeup = true;
850 m->usr_pending = false; 865 m->usr_pending = false;
851 node_up = tipc_node_is_up(net, node); 866 node_up = tipc_node_is_up(net, node);
852 867 m->event_msg = NULL;
853 /* Hold back event if more messages might be expected */ 868
854 if (m->state != MBR_LEAVING && node_up) { 869 if (node_up) {
855 m->event_msg = skb; 870 /* Hold back event if a LEAVE msg should be expected */
856 tipc_group_decr_active(grp, m); 871 if (m->state != MBR_LEAVING) {
857 m->state = MBR_LEAVING; 872 m->event_msg = skb;
858 } else { 873 tipc_group_decr_active(grp, m);
859 if (node_up) 874 m->state = MBR_LEAVING;
875 } else {
860 msg_set_grp_bc_seqno(hdr, m->bc_syncpt); 876 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
861 else 877 __skb_queue_tail(inputq, skb);
878 }
879 } else {
880 if (m->state != MBR_LEAVING) {
881 tipc_group_decr_active(grp, m);
882 m->state = MBR_LEAVING;
862 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt); 883 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
884 } else {
885 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
886 }
863 __skb_queue_tail(inputq, skb); 887 __skb_queue_tail(inputq, skb);
864 } 888 }
889 list_del_init(&m->list);
865 list_del_init(&m->congested); 890 list_del_init(&m->congested);
866 } 891 }
867 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); 892 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 8e884ed06d4b..32dc33a94bc7 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
642{ 642{
643 struct tipc_net *tn = tipc_net(net); 643 struct tipc_net *tn = tipc_net(net);
644 struct tipc_monitor *mon = tipc_monitor(net, bearer_id); 644 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
645 struct tipc_peer *self = get_self(net, bearer_id); 645 struct tipc_peer *self;
646 struct tipc_peer *peer, *tmp; 646 struct tipc_peer *peer, *tmp;
647 647
648 if (!mon)
649 return;
650
651 self = get_self(net, bearer_id);
648 write_lock_bh(&mon->lock); 652 write_lock_bh(&mon->lock);
649 tn->monitors[bearer_id] = NULL; 653 tn->monitors[bearer_id] = NULL;
650 list_for_each_entry_safe(peer, tmp, &self->list, list) { 654 list_for_each_entry_safe(peer, tmp, &self->list, list) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 507017fe0f1b..9036d8756e73 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1880 1880
1881 if (strcmp(name, tipc_bclink_name) == 0) { 1881 if (strcmp(name, tipc_bclink_name) == 0) {
1882 err = tipc_nl_add_bc_link(net, &msg); 1882 err = tipc_nl_add_bc_link(net, &msg);
1883 if (err) { 1883 if (err)
1884 nlmsg_free(msg.skb); 1884 goto err_free;
1885 return err;
1886 }
1887 } else { 1885 } else {
1888 int bearer_id; 1886 int bearer_id;
1889 struct tipc_node *node; 1887 struct tipc_node *node;
1890 struct tipc_link *link; 1888 struct tipc_link *link;
1891 1889
1892 node = tipc_node_find_by_name(net, name, &bearer_id); 1890 node = tipc_node_find_by_name(net, name, &bearer_id);
1893 if (!node) 1891 if (!node) {
1894 return -EINVAL; 1892 err = -EINVAL;
1893 goto err_free;
1894 }
1895 1895
1896 tipc_node_read_lock(node); 1896 tipc_node_read_lock(node);
1897 link = node->links[bearer_id].link; 1897 link = node->links[bearer_id].link;
1898 if (!link) { 1898 if (!link) {
1899 tipc_node_read_unlock(node); 1899 tipc_node_read_unlock(node);
1900 nlmsg_free(msg.skb); 1900 err = -EINVAL;
1901 return -EINVAL; 1901 goto err_free;
1902 } 1902 }
1903 1903
1904 err = __tipc_nl_add_link(net, &msg, link, 0); 1904 err = __tipc_nl_add_link(net, &msg, link, 0);
1905 tipc_node_read_unlock(node); 1905 tipc_node_read_unlock(node);
1906 if (err) { 1906 if (err)
1907 nlmsg_free(msg.skb); 1907 goto err_free;
1908 return err;
1909 }
1910 } 1908 }
1911 1909
1912 return genlmsg_reply(msg.skb, info); 1910 return genlmsg_reply(msg.skb, info);
1911
1912err_free:
1913 nlmsg_free(msg.skb);
1914 return err;
1913} 1915}
1914 1916
1915int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 1917int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/server.c b/net/tipc/server.c
index acaef80fb88c..d60c30342327 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -314,6 +314,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
314 newcon->usr_data = s->tipc_conn_new(newcon->conid); 314 newcon->usr_data = s->tipc_conn_new(newcon->conid);
315 if (!newcon->usr_data) { 315 if (!newcon->usr_data) {
316 sock_release(newsock); 316 sock_release(newsock);
317 conn_put(newcon);
317 return -ENOMEM; 318 return -ENOMEM;
318 } 319 }
319 320
@@ -511,7 +512,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
511 s = con->server; 512 s = con->server;
512 scbr = s->tipc_conn_new(*conid); 513 scbr = s->tipc_conn_new(*conid);
513 if (!scbr) { 514 if (!scbr) {
514 tipc_close_conn(con); 515 conn_put(con);
515 return false; 516 return false;
516 } 517 }
517 518
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5d18c0caa92b..3b4084480377 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
727 727
728 switch (sk->sk_state) { 728 switch (sk->sk_state) {
729 case TIPC_ESTABLISHED: 729 case TIPC_ESTABLISHED:
730 case TIPC_CONNECTING:
730 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
731 revents |= POLLOUT; 732 revents |= POLLOUT;
732 /* fall thru' */ 733 /* fall thru' */
733 case TIPC_LISTEN: 734 case TIPC_LISTEN:
734 case TIPC_CONNECTING:
735 if (!skb_queue_empty(&sk->sk_receive_queue)) 735 if (!skb_queue_empty(&sk->sk_receive_queue))
736 revents |= POLLIN | POLLRDNORM; 736 revents |= POLLIN | POLLRDNORM;
737 break; 737 break;
@@ -1140,7 +1140,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1140 __skb_dequeue(arrvq); 1140 __skb_dequeue(arrvq);
1141 __skb_queue_tail(inputq, skb); 1141 __skb_queue_tail(inputq, skb);
1142 } 1142 }
1143 refcount_dec(&skb->users); 1143 kfree_skb(skb);
1144 spin_unlock_bh(&inputq->lock); 1144 spin_unlock_bh(&inputq->lock);
1145 continue; 1145 continue;
1146 } 1146 }
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ecca64fc6a6f..3deabcab4882 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
371 goto rcu_out; 371 goto rcu_out;
372 } 372 }
373 373
374 tipc_rcv(sock_net(sk), skb, b);
375 rcu_read_unlock();
376 return 0;
377
378rcu_out: 374rcu_out:
379 rcu_read_unlock(); 375 rcu_read_unlock();
380out: 376out:
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e07ee3ae0023..736719c8314e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -367,8 +367,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
367 367
368 crypto_info = &ctx->crypto_send; 368 crypto_info = &ctx->crypto_send;
369 /* Currently we don't support set crypto info more than one time */ 369 /* Currently we don't support set crypto info more than one time */
370 if (TLS_CRYPTO_INFO_READY(crypto_info)) 370 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
371 rc = -EBUSY;
371 goto out; 372 goto out;
373 }
372 374
373 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 375 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
374 if (rc) { 376 if (rc) {
@@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
386 case TLS_CIPHER_AES_GCM_128: { 388 case TLS_CIPHER_AES_GCM_128: {
387 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { 389 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
388 rc = -EINVAL; 390 rc = -EINVAL;
389 goto out; 391 goto err_crypto_info;
390 } 392 }
391 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), 393 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
392 optlen - sizeof(*crypto_info)); 394 optlen - sizeof(*crypto_info));
@@ -398,7 +400,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
398 } 400 }
399 default: 401 default:
400 rc = -EINVAL; 402 rc = -EINVAL;
401 goto out; 403 goto err_crypto_info;
402 } 404 }
403 405
404 /* currently SW is default, we will have ethtool in future */ 406 /* currently SW is default, we will have ethtool in future */
@@ -454,6 +456,15 @@ static int tls_init(struct sock *sk)
454 struct tls_context *ctx; 456 struct tls_context *ctx;
455 int rc = 0; 457 int rc = 0;
456 458
459 /* The TLS ulp is currently supported only for TCP sockets
460 * in ESTABLISHED state.
461 * Supporting sockets in LISTEN state will require us
462 * to modify the accept implementation to clone rather then
463 * share the ulp context.
464 */
465 if (sk->sk_state != TCP_ESTABLISHED)
466 return -ENOTSUPP;
467
457 /* allocate tls context */ 468 /* allocate tls context */
458 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 469 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
459 if (!ctx) { 470 if (!ctx) {
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 73d19210dd49..0a9b72fbd761 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
391 391
392 while (msg_data_left(msg)) { 392 while (msg_data_left(msg)) {
393 if (sk->sk_err) { 393 if (sk->sk_err) {
394 ret = sk->sk_err; 394 ret = -sk->sk_err;
395 goto send_end; 395 goto send_end;
396 } 396 }
397 397
@@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
544 size_t copy, required_size; 544 size_t copy, required_size;
545 545
546 if (sk->sk_err) { 546 if (sk->sk_err) {
547 ret = sk->sk_err; 547 ret = -sk->sk_err;
548 goto sendpage_end; 548 goto sendpage_end;
549 } 549 }
550 550
@@ -577,6 +577,8 @@ alloc_payload:
577 get_page(page); 577 get_page(page);
578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
579 sg_set_page(sg, page, copy, offset); 579 sg_set_page(sg, page, copy, offset);
580 sg_unmark_end(sg);
581
580 ctx->sg_plaintext_num_elem++; 582 ctx->sg_plaintext_num_elem++;
581 583
582 sk_mem_charge(sk, copy); 584 sk_mem_charge(sk, copy);
@@ -681,18 +683,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
681 } 683 }
682 default: 684 default:
683 rc = -EINVAL; 685 rc = -EINVAL;
684 goto out; 686 goto free_priv;
685 } 687 }
686 688
687 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 689 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
688 ctx->tag_size = tag_size; 690 ctx->tag_size = tag_size;
689 ctx->overhead_size = ctx->prepend_size + ctx->tag_size; 691 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
690 ctx->iv_size = iv_size; 692 ctx->iv_size = iv_size;
691 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 693 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
692 GFP_KERNEL);
693 if (!ctx->iv) { 694 if (!ctx->iv) {
694 rc = -ENOMEM; 695 rc = -ENOMEM;
695 goto out; 696 goto free_priv;
696 } 697 }
697 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 698 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
698 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 699 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -740,7 +741,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
740 741
741 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size); 742 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
742 if (!rc) 743 if (!rc)
743 goto out; 744 return 0;
744 745
745free_aead: 746free_aead:
746 crypto_free_aead(sw_ctx->aead_send); 747 crypto_free_aead(sw_ctx->aead_send);
@@ -751,6 +752,9 @@ free_rec_seq:
751free_iv: 752free_iv:
752 kfree(ctx->iv); 753 kfree(ctx->iv);
753 ctx->iv = NULL; 754 ctx->iv = NULL;
755free_priv:
756 kfree(ctx->priv_ctx);
757 ctx->priv_ctx = NULL;
754out: 758out:
755 return rc; 759 return rc;
756} 760}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5d28abf87fbf..c9473d698525 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
951 * POLLOUT|POLLWRNORM when peer is closed and nothing to read, 951 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
952 * but local send is not shutdown. 952 * but local send is not shutdown.
953 */ 953 */
954 if (sk->sk_state == TCP_CLOSE) { 954 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
955 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 955 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
956 mask |= POLLOUT | POLLWRNORM; 956 mask |= POLLOUT | POLLWRNORM;
957 957
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 5583df708b8c..a827547aa102 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -487,7 +487,7 @@ static void hvs_release(struct vsock_sock *vsk)
487 487
488 lock_sock(sk); 488 lock_sock(sk);
489 489
490 sk->sk_state = SS_DISCONNECTING; 490 sk->sk_state = TCP_CLOSING;
491 vsock_remove_sock(vsk); 491 vsock_remove_sock(vsk);
492 492
493 release_sock(sk); 493 release_sock(sk);
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 278d979c211a..1d84f91bbfb0 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -23,19 +23,36 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
23cfg80211-y += extra-certs.o 23cfg80211-y += extra-certs.o
24endif 24endif
25 25
26$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509) 26$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
27 @$(kecho) " GEN $@" 27 @$(kecho) " GEN $@"
28 @echo '#include "reg.h"' > $@ 28 @(echo '#include "reg.h"'; \
29 @echo 'const u8 shipped_regdb_certs[] = {' >> $@ 29 echo 'const u8 shipped_regdb_certs[] = {'; \
30 @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done 30 cat $^ ; \
31 @echo '};' >> $@ 31 echo '};'; \
32 @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@ 32 echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
33 ) > $@
33 34
34$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ 35$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
35 $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) 36 $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
36 @$(kecho) " GEN $@" 37 @$(kecho) " GEN $@"
37 @echo '#include "reg.h"' > $@ 38 @(set -e; \
38 @echo 'const u8 extra_regdb_certs[] = {' >> $@ 39 allf=""; \
39 @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done 40 for f in $^ ; do \
40 @echo '};' >> $@ 41 # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
41 @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@ 42 thisf=$$(od -An -v -tx1 < $$f | \
43 sed -e 's/ /\n/g' | \
44 sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
45 sed -e 's/^/0x/;s/$$/,/'); \
46 # file should not be empty - maybe command substitution failed? \
47 test ! -z "$$thisf";\
48 allf=$$allf$$thisf;\
49 done; \
50 ( \
51 echo '#include "reg.h"'; \
52 echo 'const u8 extra_regdb_certs[] = {'; \
53 echo "$$allf"; \
54 echo '};'; \
55 echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
56 ) > $@)
57
58clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644
index 000000000000..14ea66643ffa
--- /dev/null
+++ b/net/wireless/certs/sforshee.hex
@@ -0,0 +1,86 @@
1/* Seth Forshee's regdb certificate */
20x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
30x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
40xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
50x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
60x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
70x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
80x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
90x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
100x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
110x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
120x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
130x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
140x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
150x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
160x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
170x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
180x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
190x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
200x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
210x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
220x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
230x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
240x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
250x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
260x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
270x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
280x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
290x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
300x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
310xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
320x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
330xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
340x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
350x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
360x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
370xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
380xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
390x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
400x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
410x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
420x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
430x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
440x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
450x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
460x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
470x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
480x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
490xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
500xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
510x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
520x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
530x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
540x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
550x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
560x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
570x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
580x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
590x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
600x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
610xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
620x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
630x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
640x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
650xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
660x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
670x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
680x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
690xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
700xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
710x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
720xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
730xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
740x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
750xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
760xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
770x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
780xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
790x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
800xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
810xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
820x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
830x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
840x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
850x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
860x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644
index c6f8f9d6b988..000000000000
--- a/net/wireless/certs/sforshee.x509
+++ /dev/null
Binary files differ
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fdde0d98fde1..a6f3cac8c640 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
439 if (rv) 439 if (rv)
440 goto use_default_name; 440 goto use_default_name;
441 } else { 441 } else {
442 int rv;
443
442use_default_name: 444use_default_name:
443 /* NOTE: This is *probably* safe w/out holding rtnl because of 445 /* NOTE: This is *probably* safe w/out holding rtnl because of
444 * the restrictions on phy names. Probably this call could 446 * the restrictions on phy names. Probably this call could
@@ -446,7 +448,11 @@ use_default_name:
446 * phyX. But, might should add some locking and check return 448 * phyX. But, might should add some locking and check return
447 * value, and use a different name if this one exists? 449 * value, and use a different name if this one exists?
448 */ 450 */
449 dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); 451 rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
452 if (rv < 0) {
453 kfree(rdev);
454 return NULL;
455 }
450 } 456 }
451 457
452 INIT_LIST_HEAD(&rdev->wiphy.wdev_list); 458 INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d2f7e8b8a097..eaff636169c2 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
507void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, 507void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
508 struct wireless_dev *wdev); 508 struct wireless_dev *wdev);
509 509
510#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
511
512#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 510#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
513#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 511#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
514#else 512#else
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b1ac23ca20c8..542a4fc0a8d7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
2610 case NL80211_IFTYPE_AP: 2610 case NL80211_IFTYPE_AP:
2611 if (wdev->ssid_len && 2611 if (wdev->ssid_len &&
2612 nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) 2612 nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
2613 goto nla_put_failure; 2613 goto nla_put_failure_locked;
2614 break; 2614 break;
2615 case NL80211_IFTYPE_STATION: 2615 case NL80211_IFTYPE_STATION:
2616 case NL80211_IFTYPE_P2P_CLIENT: 2616 case NL80211_IFTYPE_P2P_CLIENT:
@@ -2618,12 +2618,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
2618 const u8 *ssid_ie; 2618 const u8 *ssid_ie;
2619 if (!wdev->current_bss) 2619 if (!wdev->current_bss)
2620 break; 2620 break;
2621 rcu_read_lock();
2621 ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, 2622 ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
2622 WLAN_EID_SSID); 2623 WLAN_EID_SSID);
2623 if (!ssid_ie) 2624 if (ssid_ie &&
2624 break; 2625 nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
2625 if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) 2626 goto nla_put_failure_rcu_locked;
2626 goto nla_put_failure; 2627 rcu_read_unlock();
2627 break; 2628 break;
2628 } 2629 }
2629 default: 2630 default:
@@ -2635,6 +2636,10 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
2635 genlmsg_end(msg, hdr); 2636 genlmsg_end(msg, hdr);
2636 return 0; 2637 return 0;
2637 2638
2639 nla_put_failure_rcu_locked:
2640 rcu_read_unlock();
2641 nla_put_failure_locked:
2642 wdev_unlock(wdev);
2638 nla_put_failure: 2643 nla_put_failure:
2639 genlmsg_cancel(msg, hdr); 2644 genlmsg_cancel(msg, hdr);
2640 return -EMSGSIZE; 2645 return -EMSGSIZE;
@@ -9804,7 +9809,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
9804 */ 9809 */
9805 if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss && 9810 if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
9806 rdev->ops->get_station) { 9811 rdev->ops->get_station) {
9807 struct station_info sinfo; 9812 struct station_info sinfo = {};
9808 u8 *mac_addr; 9813 u8 *mac_addr;
9809 9814
9810 mac_addr = wdev->current_bss->pub.bssid; 9815 mac_addr = wdev->current_bss->pub.bssid;
@@ -11359,7 +11364,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
11359 break; 11364 break;
11360 case NL80211_NAN_FUNC_FOLLOW_UP: 11365 case NL80211_NAN_FUNC_FOLLOW_UP:
11361 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || 11366 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
11362 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) { 11367 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
11368 !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
11363 err = -EINVAL; 11369 err = -EINVAL;
11364 goto out; 11370 goto out;
11365 } 11371 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 78e71b0390be..7b42f0bacfd8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
1769 if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) 1769 if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
1770 return; 1770 return;
1771 1771
1772 chan_before.center_freq = chan->center_freq; 1772 chan_before = *chan;
1773 chan_before.flags = chan->flags;
1774 1773
1775 if (chan->flags & IEEE80211_CHAN_NO_IR) { 1774 if (chan->flags & IEEE80211_CHAN_NO_IR) {
1776 chan->flags &= ~IEEE80211_CHAN_NO_IR; 1775 chan->flags &= ~IEEE80211_CHAN_NO_IR;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 7ca04a7de85a..05186a47878f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1254,8 +1254,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1254{ 1254{
1255 struct wireless_dev *wdev = dev->ieee80211_ptr; 1255 struct wireless_dev *wdev = dev->ieee80211_ptr;
1256 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1256 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1257 /* we are under RTNL - globally locked - so can use a static struct */ 1257 struct station_info sinfo = {};
1258 static struct station_info sinfo;
1259 u8 addr[ETH_ALEN]; 1258 u8 addr[ETH_ALEN];
1260 int err; 1259 int err;
1261 1260
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 30e5746085b8..ac9477189d1c 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -102,6 +102,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
102 102
103 err = dev->xfrmdev_ops->xdo_dev_state_add(x); 103 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
104 if (err) { 104 if (err) {
105 xso->dev = NULL;
105 dev_put(dev); 106 dev_put(dev);
106 return err; 107 return err;
107 } 108 }
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 347ab31574d5..5b2409746ae0 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -8,15 +8,29 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/bottom_half.h>
12#include <linux/interrupt.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/percpu.h>
14#include <net/dst.h> 17#include <net/dst.h>
15#include <net/ip.h> 18#include <net/ip.h>
16#include <net/xfrm.h> 19#include <net/xfrm.h>
17#include <net/ip_tunnels.h> 20#include <net/ip_tunnels.h>
18#include <net/ip6_tunnel.h> 21#include <net/ip6_tunnel.h>
19 22
23struct xfrm_trans_tasklet {
24 struct tasklet_struct tasklet;
25 struct sk_buff_head queue;
26};
27
28struct xfrm_trans_cb {
29 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
30};
31
32#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
33
20static struct kmem_cache *secpath_cachep __read_mostly; 34static struct kmem_cache *secpath_cachep __read_mostly;
21 35
22static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 36static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
25static struct gro_cells gro_cells; 39static struct gro_cells gro_cells;
26static struct net_device xfrm_napi_dev; 40static struct net_device xfrm_napi_dev;
27 41
42static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
43
28int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 44int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
29{ 45{
30 int err = 0; 46 int err = 0;
@@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
207 xfrm_address_t *daddr; 223 xfrm_address_t *daddr;
208 struct xfrm_mode *inner_mode; 224 struct xfrm_mode *inner_mode;
209 u32 mark = skb->mark; 225 u32 mark = skb->mark;
210 unsigned int family; 226 unsigned int family = AF_UNSPEC;
211 int decaps = 0; 227 int decaps = 0;
212 int async = 0; 228 int async = 0;
213 bool xfrm_gro = false; 229 bool xfrm_gro = false;
@@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
216 232
217 if (encap_type < 0) { 233 if (encap_type < 0) {
218 x = xfrm_input_state(skb); 234 x = xfrm_input_state(skb);
235
236 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
237 if (x->km.state == XFRM_STATE_ACQ)
238 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
239 else
240 XFRM_INC_STATS(net,
241 LINUX_MIB_XFRMINSTATEINVALID);
242 goto drop;
243 }
244
219 family = x->outer_mode->afinfo->family; 245 family = x->outer_mode->afinfo->family;
220 246
221 /* An encap_type of -1 indicates async resumption. */ 247 /* An encap_type of -1 indicates async resumption. */
@@ -467,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
467} 493}
468EXPORT_SYMBOL(xfrm_input_resume); 494EXPORT_SYMBOL(xfrm_input_resume);
469 495
496static void xfrm_trans_reinject(unsigned long data)
497{
498 struct xfrm_trans_tasklet *trans = (void *)data;
499 struct sk_buff_head queue;
500 struct sk_buff *skb;
501
502 __skb_queue_head_init(&queue);
503 skb_queue_splice_init(&trans->queue, &queue);
504
505 while ((skb = __skb_dequeue(&queue)))
506 XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
507}
508
509int xfrm_trans_queue(struct sk_buff *skb,
510 int (*finish)(struct net *, struct sock *,
511 struct sk_buff *))
512{
513 struct xfrm_trans_tasklet *trans;
514
515 trans = this_cpu_ptr(&xfrm_trans_tasklet);
516
517 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
518 return -ENOBUFS;
519
520 XFRM_TRANS_SKB_CB(skb)->finish = finish;
521 __skb_queue_tail(&trans->queue, skb);
522 tasklet_schedule(&trans->tasklet);
523 return 0;
524}
525EXPORT_SYMBOL(xfrm_trans_queue);
526
470void __init xfrm_input_init(void) 527void __init xfrm_input_init(void)
471{ 528{
472 int err; 529 int err;
530 int i;
473 531
474 init_dummy_netdev(&xfrm_napi_dev); 532 init_dummy_netdev(&xfrm_napi_dev);
475 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 533 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@@ -480,4 +538,13 @@ void __init xfrm_input_init(void)
480 sizeof(struct sec_path), 538 sizeof(struct sec_path),
481 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 539 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
482 NULL); 540 NULL);
541
542 for_each_possible_cpu(i) {
543 struct xfrm_trans_tasklet *trans;
544
545 trans = &per_cpu(xfrm_trans_tasklet, i);
546 __skb_queue_head_init(&trans->queue);
547 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
548 (unsigned long)trans);
549 }
483} 550}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9542975eb2f9..bd6b0e7a0ee4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
609 609
610 /* re-insert all policies by order of creation */ 610 /* re-insert all policies by order of creation */
611 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 611 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
612 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { 612 if (policy->walk.dead ||
613 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
613 /* skip socket policies */ 614 /* skip socket policies */
614 continue; 615 continue;
615 } 616 }
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
974 } 975 }
975 if (!cnt) 976 if (!cnt)
976 err = -ESRCH; 977 err = -ESRCH;
977 else
978 xfrm_policy_cache_flush();
979out: 978out:
980 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 979 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
981 return err; 980 return err;
@@ -1168,9 +1167,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1168 again: 1167 again:
1169 pol = rcu_dereference(sk->sk_policy[dir]); 1168 pol = rcu_dereference(sk->sk_policy[dir]);
1170 if (pol != NULL) { 1169 if (pol != NULL) {
1171 bool match = xfrm_selector_match(&pol->selector, fl, family); 1170 bool match;
1172 int err = 0; 1171 int err = 0;
1173 1172
1173 if (pol->family != family) {
1174 pol = NULL;
1175 goto out;
1176 }
1177
1178 match = xfrm_selector_match(&pol->selector, fl, family);
1174 if (match) { 1179 if (match) {
1175 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1180 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1176 pol = NULL; 1181 pol = NULL;
@@ -1737,6 +1742,8 @@ void xfrm_policy_cache_flush(void)
1737 bool found = 0; 1742 bool found = 0;
1738 int cpu; 1743 int cpu;
1739 1744
1745 might_sleep();
1746
1740 local_bh_disable(); 1747 local_bh_disable();
1741 rcu_read_lock(); 1748 rcu_read_lock();
1742 for_each_possible_cpu(cpu) { 1749 for_each_possible_cpu(cpu) {
@@ -1833,6 +1840,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1833 sizeof(struct xfrm_policy *) * num_pols) == 0 && 1840 sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1834 xfrm_xdst_can_reuse(xdst, xfrm, err)) { 1841 xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1835 dst_hold(&xdst->u.dst); 1842 dst_hold(&xdst->u.dst);
1843 xfrm_pols_put(pols, num_pols);
1836 while (err > 0) 1844 while (err > 0)
1837 xfrm_state_put(xfrm[--err]); 1845 xfrm_state_put(xfrm[--err]);
1838 return xdst; 1846 return xdst;
@@ -2055,8 +2063,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2055 if (num_xfrms <= 0) 2063 if (num_xfrms <= 0)
2056 goto make_dummy_bundle; 2064 goto make_dummy_bundle;
2057 2065
2066 local_bh_disable();
2058 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 2067 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2059 xflo->dst_orig); 2068 xflo->dst_orig);
2069 local_bh_enable();
2070
2060 if (IS_ERR(xdst)) { 2071 if (IS_ERR(xdst)) {
2061 err = PTR_ERR(xdst); 2072 err = PTR_ERR(xdst);
2062 if (err != -EAGAIN) 2073 if (err != -EAGAIN)
@@ -2143,9 +2154,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2143 goto no_transform; 2154 goto no_transform;
2144 } 2155 }
2145 2156
2157 local_bh_disable();
2146 xdst = xfrm_resolve_and_create_bundle( 2158 xdst = xfrm_resolve_and_create_bundle(
2147 pols, num_pols, fl, 2159 pols, num_pols, fl,
2148 family, dst_orig); 2160 family, dst_orig);
2161 local_bh_enable();
2162
2149 if (IS_ERR(xdst)) { 2163 if (IS_ERR(xdst)) {
2150 xfrm_pols_put(pols, num_pols); 2164 xfrm_pols_put(pols, num_pols);
2151 err = PTR_ERR(xdst); 2165 err = PTR_ERR(xdst);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 065d89606888..a3785f538018 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -313,13 +313,14 @@ retry:
313 if ((type && !try_module_get(type->owner))) 313 if ((type && !try_module_get(type->owner)))
314 type = NULL; 314 type = NULL;
315 315
316 rcu_read_unlock();
317
316 if (!type && try_load) { 318 if (!type && try_load) {
317 request_module("xfrm-offload-%d-%d", family, proto); 319 request_module("xfrm-offload-%d-%d", family, proto);
318 try_load = 0; 320 try_load = false;
319 goto retry; 321 goto retry;
320 } 322 }
321 323
322 rcu_read_unlock();
323 return type; 324 return type;
324} 325}
325 326
@@ -1343,6 +1344,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1343 1344
1344 if (orig->aead) { 1345 if (orig->aead) {
1345 x->aead = xfrm_algo_aead_clone(orig->aead); 1346 x->aead = xfrm_algo_aead_clone(orig->aead);
1347 x->geniv = orig->geniv;
1346 if (!x->aead) 1348 if (!x->aead)
1347 goto error; 1349 goto error;
1348 } 1350 }
@@ -1533,8 +1535,12 @@ out:
1533 err = -EINVAL; 1535 err = -EINVAL;
1534 spin_lock_bh(&x1->lock); 1536 spin_lock_bh(&x1->lock);
1535 if (likely(x1->km.state == XFRM_STATE_VALID)) { 1537 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1536 if (x->encap && x1->encap) 1538 if (x->encap && x1->encap &&
1539 x->encap->encap_type == x1->encap->encap_type)
1537 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 1540 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1541 else if (x->encap || x1->encap)
1542 goto fail;
1543
1538 if (x->coaddr && x1->coaddr) { 1544 if (x->coaddr && x1->coaddr) {
1539 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 1545 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1540 } 1546 }
@@ -1551,6 +1557,8 @@ out:
1551 x->km.state = XFRM_STATE_DEAD; 1557 x->km.state = XFRM_STATE_DEAD;
1552 __xfrm_state_put(x); 1558 __xfrm_state_put(x);
1553 } 1559 }
1560
1561fail:
1554 spin_unlock_bh(&x1->lock); 1562 spin_unlock_bh(&x1->lock);
1555 1563
1556 xfrm_state_put(x1); 1564 xfrm_state_put(x1);
@@ -2264,8 +2272,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2264 goto error; 2272 goto error;
2265 } 2273 }
2266 2274
2267 x->km.state = XFRM_STATE_VALID;
2268
2269error: 2275error:
2270 return err; 2276 return err;
2271} 2277}
@@ -2274,7 +2280,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
2274 2280
2275int xfrm_init_state(struct xfrm_state *x) 2281int xfrm_init_state(struct xfrm_state *x)
2276{ 2282{
2277 return __xfrm_init_state(x, true, false); 2283 int err;
2284
2285 err = __xfrm_init_state(x, true, false);
2286 if (!err)
2287 x->km.state = XFRM_STATE_VALID;
2288
2289 return err;
2278} 2290}
2279 2291
2280EXPORT_SYMBOL(xfrm_init_state); 2292EXPORT_SYMBOL(xfrm_init_state);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 983b0233767b..7f52b8eb177d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
598 goto error; 598 goto error;
599 } 599 }
600 600
601 if (attrs[XFRMA_OFFLOAD_DEV]) {
602 err = xfrm_dev_state_add(net, x,
603 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
604 if (err)
605 goto error;
606 }
607
608 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 601 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
609 attrs[XFRMA_REPLAY_ESN_VAL]))) 602 attrs[XFRMA_REPLAY_ESN_VAL])))
610 goto error; 603 goto error;
@@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
620 /* override default values from above */ 613 /* override default values from above */
621 xfrm_update_ae_params(x, attrs, 0); 614 xfrm_update_ae_params(x, attrs, 0);
622 615
616 /* configure the hardware if offload is requested */
617 if (attrs[XFRMA_OFFLOAD_DEV]) {
618 err = xfrm_dev_state_add(net, x,
619 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
620 if (err)
621 goto error;
622 }
623
623 return x; 624 return x;
624 625
625error: 626error:
@@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
662 goto out; 663 goto out;
663 } 664 }
664 665
666 if (x->km.state == XFRM_STATE_VOID)
667 x->km.state = XFRM_STATE_VALID;
668
665 c.seq = nlh->nlmsg_seq; 669 c.seq = nlh->nlmsg_seq;
666 c.portid = nlh->nlmsg_pid; 670 c.portid = nlh->nlmsg_pid;
667 c.event = nlh->nlmsg_type; 671 c.event = nlh->nlmsg_type;
@@ -1419,11 +1423,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1419 1423
1420static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1424static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1421{ 1425{
1426 u16 prev_family;
1422 int i; 1427 int i;
1423 1428
1424 if (nr > XFRM_MAX_DEPTH) 1429 if (nr > XFRM_MAX_DEPTH)
1425 return -EINVAL; 1430 return -EINVAL;
1426 1431
1432 prev_family = family;
1433
1427 for (i = 0; i < nr; i++) { 1434 for (i = 0; i < nr; i++) {
1428 /* We never validated the ut->family value, so many 1435 /* We never validated the ut->family value, so many
1429 * applications simply leave it at zero. The check was 1436 * applications simply leave it at zero. The check was
@@ -1435,6 +1442,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1435 if (!ut[i].family) 1442 if (!ut[i].family)
1436 ut[i].family = family; 1443 ut[i].family = family;
1437 1444
1445 if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1446 (ut[i].family != prev_family))
1447 return -EINVAL;
1448
1449 prev_family = ut[i].family;
1450
1438 switch (ut[i].family) { 1451 switch (ut[i].family) {
1439 case AF_INET: 1452 case AF_INET:
1440 break; 1453 break;
@@ -1445,6 +1458,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1445 default: 1458 default:
1446 return -EINVAL; 1459 return -EINVAL;
1447 } 1460 }
1461
1462 switch (ut[i].id.proto) {
1463 case IPPROTO_AH:
1464 case IPPROTO_ESP:
1465 case IPPROTO_COMP:
1466#if IS_ENABLED(CONFIG_IPV6)
1467 case IPPROTO_ROUTING:
1468 case IPPROTO_DSTOPTS:
1469#endif
1470 case IPSEC_PROTO_ANY:
1471 break;
1472 default:
1473 return -EINVAL;
1474 }
1475
1448 } 1476 }
1449 1477
1450 return 0; 1478 return 0;
@@ -2470,7 +2498,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2470 [XFRMA_PROTO] = { .type = NLA_U8 }, 2498 [XFRMA_PROTO] = { .type = NLA_U8 },
2471 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2499 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
2472 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 2500 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
2473 [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 }, 2501 [XFRMA_OUTPUT_MARK] = { .type = NLA_U32 },
2474}; 2502};
2475 2503
2476static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 2504static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 522ca9252d6c..242631aa4ea2 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -193,8 +193,18 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
193 return -1; 193 return -1;
194 } 194 }
195 event_fd[prog_cnt - 1] = efd; 195 event_fd[prog_cnt - 1] = efd;
196 ioctl(efd, PERF_EVENT_IOC_ENABLE, 0); 196 err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
197 ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd); 197 if (err < 0) {
198 printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
199 strerror(errno));
200 return -1;
201 }
202 err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
203 if (err < 0) {
204 printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
205 strerror(errno));
206 return -1;
207 }
198 208
199 return 0; 209 return 0;
200} 210}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index cb8997ed0149..47cddf32aeba 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -265,12 +265,18 @@ else
265objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) 265objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
266endif 266endif
267 267
268ifdef CONFIG_MODVERSIONS
269objtool_o = $(@D)/.tmp_$(@F)
270else
271objtool_o = $(@)
272endif
273
268# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory 274# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
269# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file 275# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
270# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file 276# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
271cmd_objtool = $(if $(patsubst y%,, \ 277cmd_objtool = $(if $(patsubst y%,, \
272 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ 278 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
273 $(__objtool_obj) $(objtool_args) "$(@)";) 279 $(__objtool_obj) $(objtool_args) "$(objtool_o)";)
274objtool_obj = $(if $(patsubst y%,, \ 280objtool_obj = $(if $(patsubst y%,, \
275 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ 281 $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
276 $(__objtool_obj)) 282 $(__objtool_obj))
@@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj) \
286define rule_cc_o_c 292define rule_cc_o_c
287 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 293 $(call echo-cmd,checksrc) $(cmd_checksrc) \
288 $(call cmd_and_fixdep,cc_o_c) \ 294 $(call cmd_and_fixdep,cc_o_c) \
289 $(cmd_modversions_c) \
290 $(cmd_checkdoc) \ 295 $(cmd_checkdoc) \
291 $(call echo-cmd,objtool) $(cmd_objtool) \ 296 $(call echo-cmd,objtool) $(cmd_objtool) \
297 $(cmd_modversions_c) \
292 $(call echo-cmd,record_mcount) $(cmd_record_mcount) 298 $(call echo-cmd,record_mcount) $(cmd_record_mcount)
293endef 299endef
294 300
295define rule_as_o_S 301define rule_as_o_S
296 $(call cmd_and_fixdep,as_o_S) \ 302 $(call cmd_and_fixdep,as_o_S) \
297 $(cmd_modversions_S) \ 303 $(call echo-cmd,objtool) $(cmd_objtool) \
298 $(call echo-cmd,objtool) $(cmd_objtool) 304 $(cmd_modversions_S)
299endef 305endef
300 306
301# List module undefined symbols (or empty line if not enabled) 307# List module undefined symbols (or empty line if not enabled)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 040aa79e1d9d..31031f10fe56 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6233,28 +6233,6 @@ sub process {
6233 } 6233 }
6234 } 6234 }
6235 6235
6236# whine about ACCESS_ONCE
6237 if ($^V && $^V ge 5.10.0 &&
6238 $line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) {
6239 my $par = $1;
6240 my $eq = $2;
6241 my $fun = $3;
6242 $par =~ s/^\(\s*(.*)\s*\)$/$1/;
6243 if (defined($eq)) {
6244 if (WARN("PREFER_WRITE_ONCE",
6245 "Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) &&
6246 $fix) {
6247 $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/;
6248 }
6249 } else {
6250 if (WARN("PREFER_READ_ONCE",
6251 "Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) &&
6252 $fix) {
6253 $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/;
6254 }
6255 }
6256 }
6257
6258# check for mutex_trylock_recursive usage 6236# check for mutex_trylock_recursive usage
6259 if ($line =~ /mutex_trylock_recursive/) { 6237 if ($line =~ /mutex_trylock_recursive/) {
6260 ERROR("LOCKING", 6238 ERROR("LOCKING",
diff --git a/scripts/decodecode b/scripts/decodecode
index 438120da1361..5ea071099330 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -59,6 +59,14 @@ disas() {
59 ${CROSS_COMPILE}strip $1.o 59 ${CROSS_COMPILE}strip $1.o
60 fi 60 fi
61 61
62 if [ "$ARCH" = "arm64" ]; then
63 if [ $width -eq 4 ]; then
64 type=inst
65 fi
66
67 ${CROSS_COMPILE}strip $1.o
68 fi
69
62 ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \ 70 ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
63 grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1 71 grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1
64} 72}
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 39e07d8574dd..7721d5b2b0c0 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -44,10 +44,10 @@
44set -o errexit 44set -o errexit
45set -o nounset 45set -o nounset
46 46
47READELF="${CROSS_COMPILE}readelf" 47READELF="${CROSS_COMPILE:-}readelf"
48ADDR2LINE="${CROSS_COMPILE}addr2line" 48ADDR2LINE="${CROSS_COMPILE:-}addr2line"
49SIZE="${CROSS_COMPILE}size" 49SIZE="${CROSS_COMPILE:-}size"
50NM="${CROSS_COMPILE}nm" 50NM="${CROSS_COMPILE:-}nm"
51 51
52command -v awk >/dev/null 2>&1 || die "awk isn't installed" 52command -v awk >/dev/null 2>&1 || die "awk isn't installed"
53command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" 53command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 1bf949c43b76..f6ab3ccf698f 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -96,6 +96,8 @@ def get_thread_info(task):
96 thread_info_addr = task.address + ia64_task_size 96 thread_info_addr = task.address + ia64_task_size
97 thread_info = thread_info_addr.cast(thread_info_ptr_type) 97 thread_info = thread_info_addr.cast(thread_info_ptr_type)
98 else: 98 else:
99 if task.type.fields()[0].type == thread_info_type.get_type():
100 return task['thread_info']
99 thread_info = task['stack'].cast(thread_info_ptr_type) 101 thread_info = task['stack'].cast(thread_info_ptr_type)
100 return thread_info.dereference() 102 return thread_info.dereference()
101 103
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index 86dc07a01b43..e7836b47f060 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1,3 @@
1*.hash.c
2*.lex.c 1*.lex.c
3*.tab.c 2*.tab.c
4*.tab.h 3*.tab.h
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..8cee597d33a5 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
893 switch (type) { 893 switch (type) {
894 case S_BOOLEAN: 894 case S_BOOLEAN:
895 case S_TRISTATE: 895 case S_TRISTATE:
896 return k_string; 896 val->s = !strcmp(str, "n") ? 0 :
897 !strcmp(str, "m") ? 1 :
898 !strcmp(str, "y") ? 2 : -1;
899 return k_signed;
897 case S_INT: 900 case S_INT:
898 val->s = strtoll(str, &tail, 10); 901 val->s = strtoll(str, &tail, 10);
899 kind = k_signed; 902 kind = k_signed;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index bd29a92b4b48..df0f045a9a89 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -3248,4 +3248,4 @@ if ($verbose && $warnings) {
3248 print STDERR "$warnings warnings\n"; 3248 print STDERR "$warnings warnings\n";
3249} 3249}
3250 3250
3251exit($errors); 3251exit($output_mode eq "none" ? 0 : $errors);
diff --git a/security/Kconfig b/security/Kconfig
index e8e449444e65..b0cb9a5f9448 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -54,6 +54,17 @@ config SECURITY_NETWORK
54 implement socket and networking access controls. 54 implement socket and networking access controls.
55 If you are unsure how to answer this question, answer N. 55 If you are unsure how to answer this question, answer N.
56 56
57config PAGE_TABLE_ISOLATION
58 bool "Remove the kernel mapping in user mode"
59 default y
60 depends on X86_64 && !UML
61 help
62 This feature reduces the number of hardware side channels by
63 ensuring that the majority of kernel addresses are not mapped
64 into userspace.
65
66 See Documentation/x86/pti.txt for more details.
67
57config SECURITY_INFINIBAND 68config SECURITY_INFINIBAND
58 bool "Infiniband Security Hooks" 69 bool "Infiniband Security Hooks"
59 depends on SECURITY && INFINIBAND 70 depends on SECURITY && INFINIBAND
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 04ba9d0718ea..6a54d2ffa840 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
330 continue; 330 continue;
331 331
332 if (profile->xmatch) { 332 if (profile->xmatch) {
333 if (profile->xmatch_len == len) { 333 if (profile->xmatch_len >= len) {
334 conflict = true;
335 continue;
336 } else if (profile->xmatch_len > len) {
337 unsigned int state; 334 unsigned int state;
338 u32 perm; 335 u32 perm;
339 336
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
342 perm = dfa_user_allow(profile->xmatch, state); 339 perm = dfa_user_allow(profile->xmatch, state);
343 /* any accepting state means a valid match. */ 340 /* any accepting state means a valid match. */
344 if (perm & MAY_EXEC) { 341 if (perm & MAY_EXEC) {
342 if (profile->xmatch_len == len) {
343 conflict = true;
344 continue;
345 }
345 candidate = profile; 346 candidate = profile;
346 len = profile->xmatch_len; 347 len = profile->xmatch_len;
347 conflict = false; 348 conflict = false;
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 2b27bb79aec4..d7b7e7115160 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
133#define xcheck_labels_profiles(L1, L2, FN, args...) \ 133#define xcheck_labels_profiles(L1, L2, FN, args...) \
134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) 134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
135 135
136#define xcheck_labels(L1, L2, P, FN1, FN2) \
137 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
138
136 139
137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); 140void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
138void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); 141void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ca0032e7ba9..b40678f3c1d5 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
64 FLAGS_NONE, GFP_ATOMIC); 64 FLAGS_NONE, GFP_ATOMIC);
65} 65}
66 66
67/* assumes check for PROFILE_MEDIATES is already done */
67/* TODO: conditionals */ 68/* TODO: conditionals */
68static int profile_ptrace_perm(struct aa_profile *profile, 69static int profile_ptrace_perm(struct aa_profile *profile,
69 struct aa_profile *peer, u32 request, 70 struct aa_label *peer, u32 request,
70 struct common_audit_data *sa) 71 struct common_audit_data *sa)
71{ 72{
72 struct aa_perms perms = { }; 73 struct aa_perms perms = { };
73 74
74 /* need because of peer in cross check */ 75 aad(sa)->peer = peer;
75 if (profile_unconfined(profile) || 76 aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
76 !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
77 return 0;
78
79 aad(sa)->peer = &peer->label;
80 aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
81 &perms); 77 &perms);
82 aa_apply_modes_to_perms(profile, &perms); 78 aa_apply_modes_to_perms(profile, &perms);
83 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); 79 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
84} 80}
85 81
86static int cross_ptrace_perm(struct aa_profile *tracer, 82static int profile_tracee_perm(struct aa_profile *tracee,
87 struct aa_profile *tracee, u32 request, 83 struct aa_label *tracer, u32 request,
88 struct common_audit_data *sa) 84 struct common_audit_data *sa)
89{ 85{
86 if (profile_unconfined(tracee) || unconfined(tracer) ||
87 !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
88 return 0;
89
90 return profile_ptrace_perm(tracee, tracer, request, sa);
91}
92
93static int profile_tracer_perm(struct aa_profile *tracer,
94 struct aa_label *tracee, u32 request,
95 struct common_audit_data *sa)
96{
97 if (profile_unconfined(tracer))
98 return 0;
99
90 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) 100 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
91 return xcheck(profile_ptrace_perm(tracer, tracee, request, sa), 101 return profile_ptrace_perm(tracer, tracee, request, sa);
92 profile_ptrace_perm(tracee, tracer, 102
93 request << PTRACE_PERM_SHIFT, 103 /* profile uses the old style capability check for ptrace */
94 sa)); 104 if (&tracer->label == tracee)
95 /* policy uses the old style capability check for ptrace */
96 if (profile_unconfined(tracer) || tracer == tracee)
97 return 0; 105 return 0;
98 106
99 aad(sa)->label = &tracer->label; 107 aad(sa)->label = &tracer->label;
100 aad(sa)->peer = &tracee->label; 108 aad(sa)->peer = tracee;
101 aad(sa)->request = 0; 109 aad(sa)->request = 0;
102 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); 110 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
103 111
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
115int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, 123int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
116 u32 request) 124 u32 request)
117{ 125{
126 struct aa_profile *profile;
127 u32 xrequest = request << PTRACE_PERM_SHIFT;
118 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); 128 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
119 129
120 return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm, 130 return xcheck_labels(tracer, tracee, profile,
121 request, &sa); 131 profile_tracer_perm(profile, tracee, request, &sa),
132 profile_tracee_perm(profile, tracer, xrequest, &sa));
122} 133}
123 134
124 135
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index ed9b4d0f9f7e..8c558cbce930 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
329 AA_BUG(!mntpath); 329 AA_BUG(!mntpath);
330 AA_BUG(!buffer); 330 AA_BUG(!buffer);
331 331
332 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
333 return 0;
334
332 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, 335 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
333 &mntpnt, &info, profile->disconnected); 336 &mntpnt, &info, profile->disconnected);
334 if (error) 337 if (error)
@@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
380 AA_BUG(!profile); 383 AA_BUG(!profile);
381 AA_BUG(devpath && !devbuffer); 384 AA_BUG(devpath && !devbuffer);
382 385
386 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
387 return 0;
388
383 if (devpath) { 389 if (devpath) {
384 error = aa_path_name(devpath, path_flags(profile, devpath), 390 error = aa_path_name(devpath, path_flags(profile, devpath),
385 devbuffer, &devname, &info, 391 devbuffer, &devname, &info,
@@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
558 AA_BUG(!profile); 564 AA_BUG(!profile);
559 AA_BUG(!path); 565 AA_BUG(!path);
560 566
567 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
568 return 0;
569
561 error = aa_path_name(path, path_flags(profile, path), buffer, &name, 570 error = aa_path_name(path, path_flags(profile, path), buffer, &name,
562 &info, profile->disconnected); 571 &info, profile->disconnected);
563 if (error) 572 if (error)
@@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
613 AA_BUG(!new_path); 622 AA_BUG(!new_path);
614 AA_BUG(!old_path); 623 AA_BUG(!old_path);
615 624
616 if (profile_unconfined(profile)) 625 if (profile_unconfined(profile) ||
626 !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
617 return aa_get_newest_label(&profile->label); 627 return aa_get_newest_label(&profile->label);
618 628
619 error = aa_path_name(old_path, path_flags(profile, old_path), 629 error = aa_path_name(old_path, path_flags(profile, old_path),
diff --git a/security/commoncap.c b/security/commoncap.c
index 4f8e09340956..48620c93d697 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
348 return m & ~VFS_CAP_FLAGS_EFFECTIVE; 348 return m & ~VFS_CAP_FLAGS_EFFECTIVE;
349} 349}
350 350
351static bool is_v2header(size_t size, __le32 magic) 351static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
352{ 352{
353 __u32 m = le32_to_cpu(magic);
354 if (size != XATTR_CAPS_SZ_2) 353 if (size != XATTR_CAPS_SZ_2)
355 return false; 354 return false;
356 return sansflags(m) == VFS_CAP_REVISION_2; 355 return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
357} 356}
358 357
359static bool is_v3header(size_t size, __le32 magic) 358static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
360{ 359{
361 __u32 m = le32_to_cpu(magic);
362
363 if (size != XATTR_CAPS_SZ_3) 360 if (size != XATTR_CAPS_SZ_3)
364 return false; 361 return false;
365 return sansflags(m) == VFS_CAP_REVISION_3; 362 return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
366} 363}
367 364
368/* 365/*
@@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
405 402
406 fs_ns = inode->i_sb->s_user_ns; 403 fs_ns = inode->i_sb->s_user_ns;
407 cap = (struct vfs_cap_data *) tmpbuf; 404 cap = (struct vfs_cap_data *) tmpbuf;
408 if (is_v2header((size_t) ret, cap->magic_etc)) { 405 if (is_v2header((size_t) ret, cap)) {
409 /* If this is sizeof(vfs_cap_data) then we're ok with the 406 /* If this is sizeof(vfs_cap_data) then we're ok with the
410 * on-disk value, so return that. */ 407 * on-disk value, so return that. */
411 if (alloc) 408 if (alloc)
@@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
413 else 410 else
414 kfree(tmpbuf); 411 kfree(tmpbuf);
415 return ret; 412 return ret;
416 } else if (!is_v3header((size_t) ret, cap->magic_etc)) { 413 } else if (!is_v3header((size_t) ret, cap)) {
417 kfree(tmpbuf); 414 kfree(tmpbuf);
418 return -EINVAL; 415 return -EINVAL;
419 } 416 }
@@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
470 return make_kuid(task_ns, rootid); 467 return make_kuid(task_ns, rootid);
471} 468}
472 469
473static bool validheader(size_t size, __le32 magic) 470static bool validheader(size_t size, const struct vfs_cap_data *cap)
474{ 471{
475 return is_v2header(size, magic) || is_v3header(size, magic); 472 return is_v2header(size, cap) || is_v3header(size, cap);
476} 473}
477 474
478/* 475/*
@@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
495 492
496 if (!*ivalue) 493 if (!*ivalue)
497 return -EINVAL; 494 return -EINVAL;
498 if (!validheader(size, cap->magic_etc)) 495 if (!validheader(size, cap))
499 return -EINVAL; 496 return -EINVAL;
500 if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP)) 497 if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
501 return -EPERM; 498 return -EPERM;
diff --git a/security/keys/key.c b/security/keys/key.c
index 66049183ad89..d97c9394b5dd 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -833,7 +833,6 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
833 833
834 key_check(keyring); 834 key_check(keyring);
835 835
836 key_ref = ERR_PTR(-EPERM);
837 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) 836 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
838 restrict_link = keyring->restrict_link; 837 restrict_link = keyring->restrict_link;
839 838
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 76d22f726ae4..1ffe60bb2845 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1588,9 +1588,8 @@ error_keyring:
1588 * The caller must have Setattr permission to change keyring restrictions. 1588 * The caller must have Setattr permission to change keyring restrictions.
1589 * 1589 *
1590 * The requested type name may be a NULL pointer to reject all attempts 1590 * The requested type name may be a NULL pointer to reject all attempts
1591 * to link to the keyring. If _type is non-NULL, _restriction can be 1591 * to link to the keyring. In this case, _restriction must also be NULL.
1592 * NULL or a pointer to a string describing the restriction. If _type is 1592 * Otherwise, both _type and _restriction must be non-NULL.
1593 * NULL, _restriction must also be NULL.
1594 * 1593 *
1595 * Returns 0 if successful. 1594 * Returns 0 if successful.
1596 */ 1595 */
@@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
1598 const char __user *_restriction) 1597 const char __user *_restriction)
1599{ 1598{
1600 key_ref_t key_ref; 1599 key_ref_t key_ref;
1601 bool link_reject = !_type;
1602 char type[32]; 1600 char type[32];
1603 char *restriction = NULL; 1601 char *restriction = NULL;
1604 long ret; 1602 long ret;
@@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
1607 if (IS_ERR(key_ref)) 1605 if (IS_ERR(key_ref))
1608 return PTR_ERR(key_ref); 1606 return PTR_ERR(key_ref);
1609 1607
1608 ret = -EINVAL;
1610 if (_type) { 1609 if (_type) {
1611 ret = key_get_type_from_user(type, _type, sizeof(type)); 1610 if (!_restriction)
1612 if (ret < 0)
1613 goto error; 1611 goto error;
1614 }
1615 1612
1616 if (_restriction) { 1613 ret = key_get_type_from_user(type, _type, sizeof(type));
1617 if (!_type) { 1614 if (ret < 0)
1618 ret = -EINVAL;
1619 goto error; 1615 goto error;
1620 }
1621 1616
1622 restriction = strndup_user(_restriction, PAGE_SIZE); 1617 restriction = strndup_user(_restriction, PAGE_SIZE);
1623 if (IS_ERR(restriction)) { 1618 if (IS_ERR(restriction)) {
1624 ret = PTR_ERR(restriction); 1619 ret = PTR_ERR(restriction);
1625 goto error; 1620 goto error;
1626 } 1621 }
1622 } else {
1623 if (_restriction)
1624 goto error;
1627 } 1625 }
1628 1626
1629 ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); 1627 ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
1630 kfree(restriction); 1628 kfree(restriction);
1631
1632error: 1629error:
1633 key_ref_put(key_ref); 1630 key_ref_put(key_ref);
1634
1635 return ret; 1631 return ret;
1636} 1632}
1637 1633
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index e8036cd0ad54..114f7408feee 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info,
251 * The keyring selected is returned with an extra reference upon it which the 251 * The keyring selected is returned with an extra reference upon it which the
252 * caller must release. 252 * caller must release.
253 */ 253 */
254static void construct_get_dest_keyring(struct key **_dest_keyring) 254static int construct_get_dest_keyring(struct key **_dest_keyring)
255{ 255{
256 struct request_key_auth *rka; 256 struct request_key_auth *rka;
257 const struct cred *cred = current_cred(); 257 const struct cred *cred = current_cred();
258 struct key *dest_keyring = *_dest_keyring, *authkey; 258 struct key *dest_keyring = *_dest_keyring, *authkey;
259 int ret;
259 260
260 kenter("%p", dest_keyring); 261 kenter("%p", dest_keyring);
261 262
@@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
264 /* the caller supplied one */ 265 /* the caller supplied one */
265 key_get(dest_keyring); 266 key_get(dest_keyring);
266 } else { 267 } else {
268 bool do_perm_check = true;
269
267 /* use a default keyring; falling through the cases until we 270 /* use a default keyring; falling through the cases until we
268 * find one that we actually have */ 271 * find one that we actually have */
269 switch (cred->jit_keyring) { 272 switch (cred->jit_keyring) {
@@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
278 dest_keyring = 281 dest_keyring =
279 key_get(rka->dest_keyring); 282 key_get(rka->dest_keyring);
280 up_read(&authkey->sem); 283 up_read(&authkey->sem);
281 if (dest_keyring) 284 if (dest_keyring) {
285 do_perm_check = false;
282 break; 286 break;
287 }
283 } 288 }
284 289
285 case KEY_REQKEY_DEFL_THREAD_KEYRING: 290 case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
314 default: 319 default:
315 BUG(); 320 BUG();
316 } 321 }
322
323 /*
324 * Require Write permission on the keyring. This is essential
325 * because the default keyring may be the session keyring, and
326 * joining a keyring only requires Search permission.
327 *
328 * However, this check is skipped for the "requestor keyring" so
329 * that /sbin/request-key can itself use request_key() to add
330 * keys to the original requestor's destination keyring.
331 */
332 if (dest_keyring && do_perm_check) {
333 ret = key_permission(make_key_ref(dest_keyring, 1),
334 KEY_NEED_WRITE);
335 if (ret) {
336 key_put(dest_keyring);
337 return ret;
338 }
339 }
317 } 340 }
318 341
319 *_dest_keyring = dest_keyring; 342 *_dest_keyring = dest_keyring;
320 kleave(" [dk %d]", key_serial(dest_keyring)); 343 kleave(" [dk %d]", key_serial(dest_keyring));
321 return; 344 return 0;
322} 345}
323 346
324/* 347/*
@@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
444 if (ctx->index_key.type == &key_type_keyring) 467 if (ctx->index_key.type == &key_type_keyring)
445 return ERR_PTR(-EPERM); 468 return ERR_PTR(-EPERM);
446 469
447 user = key_user_lookup(current_fsuid()); 470 ret = construct_get_dest_keyring(&dest_keyring);
448 if (!user) 471 if (ret)
449 return ERR_PTR(-ENOMEM); 472 goto error;
450 473
451 construct_get_dest_keyring(&dest_keyring); 474 user = key_user_lookup(current_fsuid());
475 if (!user) {
476 ret = -ENOMEM;
477 goto error_put_dest_keyring;
478 }
452 479
453 ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); 480 ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
454 key_user_put(user); 481 key_user_put(user);
@@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
463 } else if (ret == -EINPROGRESS) { 490 } else if (ret == -EINPROGRESS) {
464 ret = 0; 491 ret = 0;
465 } else { 492 } else {
466 goto couldnt_alloc_key; 493 goto error_put_dest_keyring;
467 } 494 }
468 495
469 key_put(dest_keyring); 496 key_put(dest_keyring);
@@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
473construction_failed: 500construction_failed:
474 key_negate_and_link(key, key_negative_timeout, NULL, NULL); 501 key_negate_and_link(key, key_negative_timeout, NULL, NULL);
475 key_put(key); 502 key_put(key);
476couldnt_alloc_key: 503error_put_dest_keyring:
477 key_put(dest_keyring); 504 key_put(dest_keyring);
505error:
478 kleave(" = %d", ret); 506 kleave(" = %d", ret);
479 return ERR_PTR(ret); 507 return ERR_PTR(ret);
480} 508}
@@ -546,9 +574,7 @@ struct key *request_key_and_link(struct key_type *type,
546 if (!IS_ERR(key_ref)) { 574 if (!IS_ERR(key_ref)) {
547 key = key_ref_to_ptr(key_ref); 575 key = key_ref_to_ptr(key_ref);
548 if (dest_keyring) { 576 if (dest_keyring) {
549 construct_get_dest_keyring(&dest_keyring);
550 ret = key_link(dest_keyring, key); 577 ret = key_link(dest_keyring, key);
551 key_put(dest_keyring);
552 if (ret < 0) { 578 if (ret < 0) {
553 key_put(key); 579 key_put(key);
554 key = ERR_PTR(ret); 580 key = ERR_PTR(ret);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index e49f448ee04f..c2db7e905f7d 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
455 v = snd_pcm_hw_param_last(pcm, params, var, dir); 455 v = snd_pcm_hw_param_last(pcm, params, var, dir);
456 else 456 else
457 v = snd_pcm_hw_param_first(pcm, params, var, dir); 457 v = snd_pcm_hw_param_first(pcm, params, var, dir);
458 snd_BUG_ON(v < 0);
459 return v; 458 return v;
460} 459}
461 460
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1335 1334
1336 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1335 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1337 return tmp; 1336 return tmp;
1338 mutex_lock(&runtime->oss.params_lock);
1339 while (bytes > 0) { 1337 while (bytes > 0) {
1338 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1339 tmp = -ERESTARTSYS;
1340 break;
1341 }
1340 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1342 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1341 tmp = bytes; 1343 tmp = bytes;
1342 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) 1344 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1380 xfer += tmp; 1382 xfer += tmp;
1381 if ((substream->f_flags & O_NONBLOCK) != 0 && 1383 if ((substream->f_flags & O_NONBLOCK) != 0 &&
1382 tmp != runtime->oss.period_bytes) 1384 tmp != runtime->oss.period_bytes)
1383 break; 1385 tmp = -EAGAIN;
1384 } 1386 }
1385 }
1386 mutex_unlock(&runtime->oss.params_lock);
1387 return xfer;
1388
1389 err: 1387 err:
1390 mutex_unlock(&runtime->oss.params_lock); 1388 mutex_unlock(&runtime->oss.params_lock);
1389 if (tmp < 0)
1390 break;
1391 if (signal_pending(current)) {
1392 tmp = -ERESTARTSYS;
1393 break;
1394 }
1395 tmp = 0;
1396 }
1391 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1397 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1392} 1398}
1393 1399
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1435 1441
1436 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1442 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1437 return tmp; 1443 return tmp;
1438 mutex_lock(&runtime->oss.params_lock);
1439 while (bytes > 0) { 1444 while (bytes > 0) {
1445 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1446 tmp = -ERESTARTSYS;
1447 break;
1448 }
1440 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1449 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1441 if (runtime->oss.buffer_used == 0) { 1450 if (runtime->oss.buffer_used == 0) {
1442 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); 1451 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1467 bytes -= tmp; 1476 bytes -= tmp;
1468 xfer += tmp; 1477 xfer += tmp;
1469 } 1478 }
1470 }
1471 mutex_unlock(&runtime->oss.params_lock);
1472 return xfer;
1473
1474 err: 1479 err:
1475 mutex_unlock(&runtime->oss.params_lock); 1480 mutex_unlock(&runtime->oss.params_lock);
1481 if (tmp < 0)
1482 break;
1483 if (signal_pending(current)) {
1484 tmp = -ERESTARTSYS;
1485 break;
1486 }
1487 tmp = 0;
1488 }
1476 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1489 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1477} 1490}
1478 1491
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index cadc93792868..85a56af104bd 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
592 snd_pcm_sframes_t frames = size; 592 snd_pcm_sframes_t frames = size;
593 593
594 plugin = snd_pcm_plug_first(plug); 594 plugin = snd_pcm_plug_first(plug);
595 while (plugin && frames > 0) { 595 while (plugin) {
596 if (frames <= 0)
597 return frames;
596 if ((next = plugin->next) != NULL) { 598 if ((next = plugin->next) != NULL) {
597 snd_pcm_sframes_t frames1 = frames; 599 snd_pcm_sframes_t frames1 = frames;
598 if (plugin->dst_frames) 600 if (plugin->dst_frames) {
599 frames1 = plugin->dst_frames(plugin, frames); 601 frames1 = plugin->dst_frames(plugin, frames);
602 if (frames1 <= 0)
603 return frames1;
604 }
600 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { 605 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
601 return err; 606 return err;
602 } 607 }
603 if (err != frames1) { 608 if (err != frames1) {
604 frames = err; 609 frames = err;
605 if (plugin->src_frames) 610 if (plugin->src_frames) {
606 frames = plugin->src_frames(plugin, frames1); 611 frames = plugin->src_frames(plugin, frames1);
612 if (frames <= 0)
613 return frames;
614 }
607 } 615 }
608 } else 616 } else
609 dst_channels = NULL; 617 dst_channels = NULL;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 9070f277f8db..09ee8c6b9f75 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -153,7 +153,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
153 err = -ENXIO; 153 err = -ENXIO;
154 goto _error; 154 goto _error;
155 } 155 }
156 mutex_lock(&pcm->open_mutex);
156 err = snd_pcm_info_user(substream, info); 157 err = snd_pcm_info_user(substream, info);
158 mutex_unlock(&pcm->open_mutex);
157 _error: 159 _error:
158 mutex_unlock(&register_mutex); 160 mutex_unlock(&register_mutex);
159 return err; 161 return err;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 10e7ef7a8804..faa67861cbc1 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
560{ 560{
561 u_int64_t n = (u_int64_t) a * b; 561 u_int64_t n = (u_int64_t) a * b;
562 if (c == 0) { 562 if (c == 0) {
563 snd_BUG_ON(!n);
564 *r = 0; 563 *r = 0;
565 return UINT_MAX; 564 return UINT_MAX;
566 } 565 }
@@ -1632,7 +1631,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1632 return changed; 1631 return changed;
1633 if (params->rmask) { 1632 if (params->rmask) {
1634 int err = snd_pcm_hw_refine(pcm, params); 1633 int err = snd_pcm_hw_refine(pcm, params);
1635 if (snd_BUG_ON(err < 0)) 1634 if (err < 0)
1636 return err; 1635 return err;
1637 } 1636 }
1638 return snd_pcm_hw_param_value(params, var, dir); 1637 return snd_pcm_hw_param_value(params, var, dir);
@@ -1678,7 +1677,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1678 return changed; 1677 return changed;
1679 if (params->rmask) { 1678 if (params->rmask) {
1680 int err = snd_pcm_hw_refine(pcm, params); 1679 int err = snd_pcm_hw_refine(pcm, params);
1681 if (snd_BUG_ON(err < 0)) 1680 if (err < 0)
1682 return err; 1681 return err;
1683 } 1682 }
1684 return snd_pcm_hw_param_value(params, var, dir); 1683 return snd_pcm_hw_param_value(params, var, dir);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a4d92e46c459..f08772568c17 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2580 return ret < 0 ? ret : frames; 2580 return ret < 0 ? ret : frames;
2581} 2581}
2582 2582
2583/* decrease the appl_ptr; returns the processed frames or a negative error */ 2583/* decrease the appl_ptr; returns the processed frames or zero for error */
2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, 2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2585 snd_pcm_uframes_t frames, 2585 snd_pcm_uframes_t frames,
2586 snd_pcm_sframes_t avail) 2586 snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2597 if (appl_ptr < 0) 2597 if (appl_ptr < 0)
2598 appl_ptr += runtime->boundary; 2598 appl_ptr += runtime->boundary;
2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2600 return ret < 0 ? ret : frames; 2600 /* NOTE: we return zero for errors because PulseAudio gets depressed
2601 * upon receiving an error from rewind ioctl and stops processing
2602 * any longer. Returning zero means that no rewind is done, so
2603 * it's not absolutely wrong to answer like that.
2604 */
2605 return ret < 0 ? 0 : frames;
2601} 2606}
2602 2607
2603static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, 2608static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b3b353d72527..f055ca10bbc1 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
579 return 0; 579 return 0;
580} 580}
581 581
582int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) 582static int __snd_rawmidi_info_select(struct snd_card *card,
583 struct snd_rawmidi_info *info)
583{ 584{
584 struct snd_rawmidi *rmidi; 585 struct snd_rawmidi *rmidi;
585 struct snd_rawmidi_str *pstr; 586 struct snd_rawmidi_str *pstr;
586 struct snd_rawmidi_substream *substream; 587 struct snd_rawmidi_substream *substream;
587 588
588 mutex_lock(&register_mutex);
589 rmidi = snd_rawmidi_search(card, info->device); 589 rmidi = snd_rawmidi_search(card, info->device);
590 mutex_unlock(&register_mutex);
591 if (!rmidi) 590 if (!rmidi)
592 return -ENXIO; 591 return -ENXIO;
593 if (info->stream < 0 || info->stream > 1) 592 if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
603 } 602 }
604 return -ENXIO; 603 return -ENXIO;
605} 604}
605
606int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
607{
608 int ret;
609
610 mutex_lock(&register_mutex);
611 ret = __snd_rawmidi_info_select(card, info);
612 mutex_unlock(&register_mutex);
613 return ret;
614}
606EXPORT_SYMBOL(snd_rawmidi_info_select); 615EXPORT_SYMBOL(snd_rawmidi_info_select);
607 616
608static int snd_rawmidi_info_select_user(struct snd_card *card, 617static int snd_rawmidi_info_select_user(struct snd_card *card,
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6e22eea72654..d01913404581 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
221 rwlock_init(&client->ports_lock); 221 rwlock_init(&client->ports_lock);
222 mutex_init(&client->ports_mutex); 222 mutex_init(&client->ports_mutex);
223 INIT_LIST_HEAD(&client->ports_list_head); 223 INIT_LIST_HEAD(&client->ports_list_head);
224 mutex_init(&client->ioctl_mutex);
224 225
225 /* find free slot in the client table */ 226 /* find free slot in the client table */
226 spin_lock_irqsave(&clients_lock, flags); 227 spin_lock_irqsave(&clients_lock, flags);
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
2130 return -EFAULT; 2131 return -EFAULT;
2131 } 2132 }
2132 2133
2134 mutex_lock(&client->ioctl_mutex);
2133 err = handler->func(client, &buf); 2135 err = handler->func(client, &buf);
2136 mutex_unlock(&client->ioctl_mutex);
2134 if (err >= 0) { 2137 if (err >= 0) {
2135 /* Some commands includes a bug in 'dir' field. */ 2138 /* Some commands includes a bug in 'dir' field. */
2136 if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || 2139 if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index c6614254ef8a..0611e1e0ed5b 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -61,6 +61,7 @@ struct snd_seq_client {
61 struct list_head ports_list_head; 61 struct list_head ports_list_head;
62 rwlock_t ports_lock; 62 rwlock_t ports_lock;
63 struct mutex ports_mutex; 63 struct mutex ports_mutex;
64 struct mutex ioctl_mutex;
64 int convert32; /* convert 32->64bit */ 65 int convert32; /* convert 32->64bit */
65 66
66 /* output pool */ 67 /* output pool */
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 37d9cfbc29f9..b80985fbc334 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
355 unsigned long freq; 355 unsigned long freq;
356 356
357 t = tmr->timeri->timer; 357 t = tmr->timeri->timer;
358 if (snd_BUG_ON(!t)) 358 if (!t)
359 return -EINVAL; 359 return -EINVAL;
360 360
361 freq = tmr->preferred_resolution; 361 freq = tmr->preferred_resolution;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index afac886ffa28..0333143a1fa7 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -39,6 +39,7 @@
39#include <sound/core.h> 39#include <sound/core.h>
40#include <sound/control.h> 40#include <sound/control.h>
41#include <sound/pcm.h> 41#include <sound/pcm.h>
42#include <sound/pcm_params.h>
42#include <sound/info.h> 43#include <sound/info.h>
43#include <sound/initval.h> 44#include <sound/initval.h>
44 45
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
305 return 0; 306 return 0;
306} 307}
307 308
308static void params_change_substream(struct loopback_pcm *dpcm,
309 struct snd_pcm_runtime *runtime)
310{
311 struct snd_pcm_runtime *dst_runtime;
312
313 if (dpcm == NULL || dpcm->substream == NULL)
314 return;
315 dst_runtime = dpcm->substream->runtime;
316 if (dst_runtime == NULL)
317 return;
318 dst_runtime->hw = dpcm->cable->hw;
319}
320
321static void params_change(struct snd_pcm_substream *substream) 309static void params_change(struct snd_pcm_substream *substream)
322{ 310{
323 struct snd_pcm_runtime *runtime = substream->runtime; 311 struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
329 cable->hw.rate_max = runtime->rate; 317 cable->hw.rate_max = runtime->rate;
330 cable->hw.channels_min = runtime->channels; 318 cable->hw.channels_min = runtime->channels;
331 cable->hw.channels_max = runtime->channels; 319 cable->hw.channels_max = runtime->channels;
332 params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
333 runtime);
334 params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
335 runtime);
336} 320}
337 321
338static int loopback_prepare(struct snd_pcm_substream *substream) 322static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
620static int rule_format(struct snd_pcm_hw_params *params, 604static int rule_format(struct snd_pcm_hw_params *params,
621 struct snd_pcm_hw_rule *rule) 605 struct snd_pcm_hw_rule *rule)
622{ 606{
607 struct loopback_pcm *dpcm = rule->private;
608 struct loopback_cable *cable = dpcm->cable;
609 struct snd_mask m;
623 610
624 struct snd_pcm_hardware *hw = rule->private; 611 snd_mask_none(&m);
625 struct snd_mask *maskp = hw_param_mask(params, rule->var); 612 mutex_lock(&dpcm->loopback->cable_lock);
626 613 m.bits[0] = (u_int32_t)cable->hw.formats;
627 maskp->bits[0] &= (u_int32_t)hw->formats; 614 m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
628 maskp->bits[1] &= (u_int32_t)(hw->formats >> 32); 615 mutex_unlock(&dpcm->loopback->cable_lock);
629 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 616 return snd_mask_refine(hw_param_mask(params, rule->var), &m);
630 if (! maskp->bits[0] && ! maskp->bits[1])
631 return -EINVAL;
632 return 0;
633} 617}
634 618
635static int rule_rate(struct snd_pcm_hw_params *params, 619static int rule_rate(struct snd_pcm_hw_params *params,
636 struct snd_pcm_hw_rule *rule) 620 struct snd_pcm_hw_rule *rule)
637{ 621{
638 struct snd_pcm_hardware *hw = rule->private; 622 struct loopback_pcm *dpcm = rule->private;
623 struct loopback_cable *cable = dpcm->cable;
639 struct snd_interval t; 624 struct snd_interval t;
640 625
641 t.min = hw->rate_min; 626 mutex_lock(&dpcm->loopback->cable_lock);
642 t.max = hw->rate_max; 627 t.min = cable->hw.rate_min;
628 t.max = cable->hw.rate_max;
629 mutex_unlock(&dpcm->loopback->cable_lock);
643 t.openmin = t.openmax = 0; 630 t.openmin = t.openmax = 0;
644 t.integer = 0; 631 t.integer = 0;
645 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 632 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
648static int rule_channels(struct snd_pcm_hw_params *params, 635static int rule_channels(struct snd_pcm_hw_params *params,
649 struct snd_pcm_hw_rule *rule) 636 struct snd_pcm_hw_rule *rule)
650{ 637{
651 struct snd_pcm_hardware *hw = rule->private; 638 struct loopback_pcm *dpcm = rule->private;
639 struct loopback_cable *cable = dpcm->cable;
652 struct snd_interval t; 640 struct snd_interval t;
653 641
654 t.min = hw->channels_min; 642 mutex_lock(&dpcm->loopback->cable_lock);
655 t.max = hw->channels_max; 643 t.min = cable->hw.channels_min;
644 t.max = cable->hw.channels_max;
645 mutex_unlock(&dpcm->loopback->cable_lock);
656 t.openmin = t.openmax = 0; 646 t.openmin = t.openmax = 0;
657 t.integer = 0; 647 t.integer = 0;
658 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 648 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
659} 649}
660 650
651static void free_cable(struct snd_pcm_substream *substream)
652{
653 struct loopback *loopback = substream->private_data;
654 int dev = get_cable_index(substream);
655 struct loopback_cable *cable;
656
657 cable = loopback->cables[substream->number][dev];
658 if (!cable)
659 return;
660 if (cable->streams[!substream->stream]) {
661 /* other stream is still alive */
662 cable->streams[substream->stream] = NULL;
663 } else {
664 /* free the cable */
665 loopback->cables[substream->number][dev] = NULL;
666 kfree(cable);
667 }
668}
669
661static int loopback_open(struct snd_pcm_substream *substream) 670static int loopback_open(struct snd_pcm_substream *substream)
662{ 671{
663 struct snd_pcm_runtime *runtime = substream->runtime; 672 struct snd_pcm_runtime *runtime = substream->runtime;
664 struct loopback *loopback = substream->private_data; 673 struct loopback *loopback = substream->private_data;
665 struct loopback_pcm *dpcm; 674 struct loopback_pcm *dpcm;
666 struct loopback_cable *cable; 675 struct loopback_cable *cable = NULL;
667 int err = 0; 676 int err = 0;
668 int dev = get_cable_index(substream); 677 int dev = get_cable_index(substream);
669 678
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
681 if (!cable) { 690 if (!cable) {
682 cable = kzalloc(sizeof(*cable), GFP_KERNEL); 691 cable = kzalloc(sizeof(*cable), GFP_KERNEL);
683 if (!cable) { 692 if (!cable) {
684 kfree(dpcm);
685 err = -ENOMEM; 693 err = -ENOMEM;
686 goto unlock; 694 goto unlock;
687 } 695 }
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
699 /* are cached -> they do not reflect the actual state */ 707 /* are cached -> they do not reflect the actual state */
700 err = snd_pcm_hw_rule_add(runtime, 0, 708 err = snd_pcm_hw_rule_add(runtime, 0,
701 SNDRV_PCM_HW_PARAM_FORMAT, 709 SNDRV_PCM_HW_PARAM_FORMAT,
702 rule_format, &runtime->hw, 710 rule_format, dpcm,
703 SNDRV_PCM_HW_PARAM_FORMAT, -1); 711 SNDRV_PCM_HW_PARAM_FORMAT, -1);
704 if (err < 0) 712 if (err < 0)
705 goto unlock; 713 goto unlock;
706 err = snd_pcm_hw_rule_add(runtime, 0, 714 err = snd_pcm_hw_rule_add(runtime, 0,
707 SNDRV_PCM_HW_PARAM_RATE, 715 SNDRV_PCM_HW_PARAM_RATE,
708 rule_rate, &runtime->hw, 716 rule_rate, dpcm,
709 SNDRV_PCM_HW_PARAM_RATE, -1); 717 SNDRV_PCM_HW_PARAM_RATE, -1);
710 if (err < 0) 718 if (err < 0)
711 goto unlock; 719 goto unlock;
712 err = snd_pcm_hw_rule_add(runtime, 0, 720 err = snd_pcm_hw_rule_add(runtime, 0,
713 SNDRV_PCM_HW_PARAM_CHANNELS, 721 SNDRV_PCM_HW_PARAM_CHANNELS,
714 rule_channels, &runtime->hw, 722 rule_channels, dpcm,
715 SNDRV_PCM_HW_PARAM_CHANNELS, -1); 723 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
716 if (err < 0) 724 if (err < 0)
717 goto unlock; 725 goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
723 else 731 else
724 runtime->hw = cable->hw; 732 runtime->hw = cable->hw;
725 unlock: 733 unlock:
734 if (err < 0) {
735 free_cable(substream);
736 kfree(dpcm);
737 }
726 mutex_unlock(&loopback->cable_lock); 738 mutex_unlock(&loopback->cable_lock);
727 return err; 739 return err;
728} 740}
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
731{ 743{
732 struct loopback *loopback = substream->private_data; 744 struct loopback *loopback = substream->private_data;
733 struct loopback_pcm *dpcm = substream->runtime->private_data; 745 struct loopback_pcm *dpcm = substream->runtime->private_data;
734 struct loopback_cable *cable;
735 int dev = get_cable_index(substream);
736 746
737 loopback_timer_stop(dpcm); 747 loopback_timer_stop(dpcm);
738 mutex_lock(&loopback->cable_lock); 748 mutex_lock(&loopback->cable_lock);
739 cable = loopback->cables[substream->number][dev]; 749 free_cable(substream);
740 if (cable->streams[!substream->stream]) {
741 /* other stream is still alive */
742 cable->streams[substream->stream] = NULL;
743 } else {
744 /* free the cable */
745 loopback->cables[substream->number][dev] = NULL;
746 kfree(cable);
747 }
748 mutex_unlock(&loopback->cable_lock); 750 mutex_unlock(&loopback->cable_lock);
749 return 0; 751 return 0;
750} 752}
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 038a180d3f81..cbe818eda336 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -325,7 +325,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
325 */ 325 */
326int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) 326int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
327{ 327{
328 if (WARN_ON(!hdac_acomp)) 328 if (!hdac_acomp)
329 return -ENODEV; 329 return -ENODEV;
330 330
331 hdac_acomp->audio_ops = aops; 331 hdac_acomp->audio_ops = aops;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 80bbadc83721..d6e079f4ec09 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
408 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ 408 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
409 409
410 /* codec SSID */ 410 /* codec SSID */
411 SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
411 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), 412 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
412 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), 413 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
413 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), 414 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index a81aacf684b2..37e1cf8218ff 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -271,6 +271,8 @@ enum {
271 CXT_FIXUP_HP_SPECTRE, 271 CXT_FIXUP_HP_SPECTRE,
272 CXT_FIXUP_HP_GATE_MIC, 272 CXT_FIXUP_HP_GATE_MIC,
273 CXT_FIXUP_MUTE_LED_GPIO, 273 CXT_FIXUP_MUTE_LED_GPIO,
274 CXT_FIXUP_HEADSET_MIC,
275 CXT_FIXUP_HP_MIC_NO_PRESENCE,
274}; 276};
275 277
276/* for hda_fixup_thinkpad_acpi() */ 278/* for hda_fixup_thinkpad_acpi() */
@@ -350,6 +352,18 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
350 } 352 }
351} 353}
352 354
355static void cxt_fixup_headset_mic(struct hda_codec *codec,
356 const struct hda_fixup *fix, int action)
357{
358 struct conexant_spec *spec = codec->spec;
359
360 switch (action) {
361 case HDA_FIXUP_ACT_PRE_PROBE:
362 spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
363 break;
364 }
365}
366
353/* OPLC XO 1.5 fixup */ 367/* OPLC XO 1.5 fixup */
354 368
355/* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors) 369/* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors)
@@ -880,6 +894,19 @@ static const struct hda_fixup cxt_fixups[] = {
880 .type = HDA_FIXUP_FUNC, 894 .type = HDA_FIXUP_FUNC,
881 .v.func = cxt_fixup_mute_led_gpio, 895 .v.func = cxt_fixup_mute_led_gpio,
882 }, 896 },
897 [CXT_FIXUP_HEADSET_MIC] = {
898 .type = HDA_FIXUP_FUNC,
899 .v.func = cxt_fixup_headset_mic,
900 },
901 [CXT_FIXUP_HP_MIC_NO_PRESENCE] = {
902 .type = HDA_FIXUP_PINS,
903 .v.pins = (const struct hda_pintbl[]) {
904 { 0x1a, 0x02a1113c },
905 { }
906 },
907 .chained = true,
908 .chain_id = CXT_FIXUP_HEADSET_MIC,
909 },
883}; 910};
884 911
885static const struct snd_pci_quirk cxt5045_fixups[] = { 912static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -934,6 +961,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
934 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 961 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
935 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 962 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
936 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), 963 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
964 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
965 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
937 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 966 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
938 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), 967 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
939 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), 968 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c19c81d230bd..b4f1b6e88305 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
55#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b) 55#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
56#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \ 56#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
57 ((codec)->core.vendor_id == 0x80862800)) 57 ((codec)->core.vendor_id == 0x80862800))
58#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
58#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \ 59#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
59 || is_skylake(codec) || is_broxton(codec) \ 60 || is_skylake(codec) || is_broxton(codec) \
60 || is_kabylake(codec)) || is_geminilake(codec) 61 || is_kabylake(codec)) || is_geminilake(codec) \
61 62 || is_cannonlake(codec)
62#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882) 63#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
63#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883) 64#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
64#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec)) 65#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi),
3841HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi), 3842HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
3842HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi), 3843HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
3843HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi), 3844HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
3845HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
3844HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), 3846HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
3845HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), 3847HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
3846HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), 3848HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 921a10eff43a..9aafc6c86132 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -324,21 +324,24 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
324 case 0x10ec0292: 324 case 0x10ec0292:
325 alc_update_coef_idx(codec, 0x4, 1<<15, 0); 325 alc_update_coef_idx(codec, 0x4, 1<<15, 0);
326 break; 326 break;
327 case 0x10ec0215:
328 case 0x10ec0225: 327 case 0x10ec0225:
328 case 0x10ec0295:
329 case 0x10ec0299:
330 alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
331 /* fallthrough */
332 case 0x10ec0215:
329 case 0x10ec0233: 333 case 0x10ec0233:
330 case 0x10ec0236: 334 case 0x10ec0236:
331 case 0x10ec0255: 335 case 0x10ec0255:
332 case 0x10ec0256: 336 case 0x10ec0256:
337 case 0x10ec0257:
333 case 0x10ec0282: 338 case 0x10ec0282:
334 case 0x10ec0283: 339 case 0x10ec0283:
335 case 0x10ec0286: 340 case 0x10ec0286:
336 case 0x10ec0288: 341 case 0x10ec0288:
337 case 0x10ec0285: 342 case 0x10ec0285:
338 case 0x10ec0295:
339 case 0x10ec0298: 343 case 0x10ec0298:
340 case 0x10ec0289: 344 case 0x10ec0289:
341 case 0x10ec0299:
342 alc_update_coef_idx(codec, 0x10, 1<<9, 0); 345 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
343 break; 346 break;
344 case 0x10ec0275: 347 case 0x10ec0275:
@@ -2772,6 +2775,7 @@ enum {
2772 ALC269_TYPE_ALC298, 2775 ALC269_TYPE_ALC298,
2773 ALC269_TYPE_ALC255, 2776 ALC269_TYPE_ALC255,
2774 ALC269_TYPE_ALC256, 2777 ALC269_TYPE_ALC256,
2778 ALC269_TYPE_ALC257,
2775 ALC269_TYPE_ALC215, 2779 ALC269_TYPE_ALC215,
2776 ALC269_TYPE_ALC225, 2780 ALC269_TYPE_ALC225,
2777 ALC269_TYPE_ALC294, 2781 ALC269_TYPE_ALC294,
@@ -2805,6 +2809,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
2805 case ALC269_TYPE_ALC298: 2809 case ALC269_TYPE_ALC298:
2806 case ALC269_TYPE_ALC255: 2810 case ALC269_TYPE_ALC255:
2807 case ALC269_TYPE_ALC256: 2811 case ALC269_TYPE_ALC256:
2812 case ALC269_TYPE_ALC257:
2808 case ALC269_TYPE_ALC215: 2813 case ALC269_TYPE_ALC215:
2809 case ALC269_TYPE_ALC225: 2814 case ALC269_TYPE_ALC225:
2810 case ALC269_TYPE_ALC294: 2815 case ALC269_TYPE_ALC294:
@@ -5182,6 +5187,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
5182 } 5187 }
5183} 5188}
5184 5189
5190/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
5191static void alc274_fixup_bind_dacs(struct hda_codec *codec,
5192 const struct hda_fixup *fix, int action)
5193{
5194 struct alc_spec *spec = codec->spec;
5195 static hda_nid_t preferred_pairs[] = {
5196 0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
5197 0
5198 };
5199
5200 if (action != HDA_FIXUP_ACT_PRE_PROBE)
5201 return;
5202
5203 spec->gen.preferred_dacs = preferred_pairs;
5204}
5205
5185/* for hda_fixup_thinkpad_acpi() */ 5206/* for hda_fixup_thinkpad_acpi() */
5186#include "thinkpad_helper.c" 5207#include "thinkpad_helper.c"
5187 5208
@@ -5299,6 +5320,8 @@ enum {
5299 ALC233_FIXUP_LENOVO_MULTI_CODECS, 5320 ALC233_FIXUP_LENOVO_MULTI_CODECS,
5300 ALC294_FIXUP_LENOVO_MIC_LOCATION, 5321 ALC294_FIXUP_LENOVO_MIC_LOCATION,
5301 ALC700_FIXUP_INTEL_REFERENCE, 5322 ALC700_FIXUP_INTEL_REFERENCE,
5323 ALC274_FIXUP_DELL_BIND_DACS,
5324 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5302}; 5325};
5303 5326
5304static const struct hda_fixup alc269_fixups[] = { 5327static const struct hda_fixup alc269_fixups[] = {
@@ -6109,6 +6132,21 @@ static const struct hda_fixup alc269_fixups[] = {
6109 {} 6132 {}
6110 } 6133 }
6111 }, 6134 },
6135 [ALC274_FIXUP_DELL_BIND_DACS] = {
6136 .type = HDA_FIXUP_FUNC,
6137 .v.func = alc274_fixup_bind_dacs,
6138 .chained = true,
6139 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
6140 },
6141 [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
6142 .type = HDA_FIXUP_PINS,
6143 .v.pins = (const struct hda_pintbl[]) {
6144 { 0x1b, 0x0401102f },
6145 { }
6146 },
6147 .chained = true,
6148 .chain_id = ALC274_FIXUP_DELL_BIND_DACS
6149 },
6112}; 6150};
6113 6151
6114static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6152static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6158,6 +6196,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6158 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6196 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6159 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6197 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6160 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6198 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6199 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6161 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6200 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6162 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6201 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6163 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6202 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6292,6 +6331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6292 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6331 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6293 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6332 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6294 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6333 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6334 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6295 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6335 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6296 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 6336 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6297 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 6337 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6550,6 +6590,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6550 {0x1b, 0x01011020}, 6590 {0x1b, 0x01011020},
6551 {0x21, 0x02211010}), 6591 {0x21, 0x02211010}),
6552 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 6592 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6593 {0x12, 0x90a60130},
6594 {0x14, 0x90170110},
6595 {0x1b, 0x01011020},
6596 {0x21, 0x0221101f}),
6597 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6553 {0x12, 0x90a60160}, 6598 {0x12, 0x90a60160},
6554 {0x14, 0x90170120}, 6599 {0x14, 0x90170120},
6555 {0x21, 0x02211030}), 6600 {0x21, 0x02211030}),
@@ -6575,7 +6620,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6575 {0x14, 0x90170110}, 6620 {0x14, 0x90170110},
6576 {0x1b, 0x90a70130}, 6621 {0x1b, 0x90a70130},
6577 {0x21, 0x03211020}), 6622 {0x21, 0x03211020}),
6578 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6623 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
6579 {0x12, 0xb7a60130}, 6624 {0x12, 0xb7a60130},
6580 {0x13, 0xb8a61140}, 6625 {0x13, 0xb8a61140},
6581 {0x16, 0x90170110}, 6626 {0x16, 0x90170110},
@@ -6867,6 +6912,10 @@ static int patch_alc269(struct hda_codec *codec)
6867 spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ 6912 spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
6868 alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ 6913 alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
6869 break; 6914 break;
6915 case 0x10ec0257:
6916 spec->codec_variant = ALC269_TYPE_ALC257;
6917 spec->gen.mixer_nid = 0;
6918 break;
6870 case 0x10ec0215: 6919 case 0x10ec0215:
6871 case 0x10ec0285: 6920 case 0x10ec0285:
6872 case 0x10ec0289: 6921 case 0x10ec0289:
@@ -7914,6 +7963,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7914 HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), 7963 HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
7915 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), 7964 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
7916 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), 7965 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
7966 HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
7917 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), 7967 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
7918 HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262), 7968 HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262),
7919 HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268), 7969 HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268),
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 9f521a55d610..b5e41df6bb3a 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -1051,6 +1051,11 @@ static int acp_audio_probe(struct platform_device *pdev)
1051 struct resource *res; 1051 struct resource *res;
1052 const u32 *pdata = pdev->dev.platform_data; 1052 const u32 *pdata = pdev->dev.platform_data;
1053 1053
1054 if (!pdata) {
1055 dev_err(&pdev->dev, "Missing platform data\n");
1056 return -ENODEV;
1057 }
1058
1054 audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data), 1059 audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data),
1055 GFP_KERNEL); 1060 GFP_KERNEL);
1056 if (audio_drv_data == NULL) 1061 if (audio_drv_data == NULL)
@@ -1058,6 +1063,8 @@ static int acp_audio_probe(struct platform_device *pdev)
1058 1063
1059 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1064 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1060 audio_drv_data->acp_mmio = devm_ioremap_resource(&pdev->dev, res); 1065 audio_drv_data->acp_mmio = devm_ioremap_resource(&pdev->dev, res);
1066 if (IS_ERR(audio_drv_data->acp_mmio))
1067 return PTR_ERR(audio_drv_data->acp_mmio);
1061 1068
1062 /* The following members gets populated in device 'open' 1069 /* The following members gets populated in device 'open'
1063 * function. Till then interrupts are disabled in 'acp_init' 1070 * function. Till then interrupts are disabled in 'acp_init'
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 4a56f3dfba51..dcee145dd179 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -64,7 +64,7 @@ config SND_AT91_SOC_SAM9X5_WM8731
64config SND_ATMEL_SOC_CLASSD 64config SND_ATMEL_SOC_CLASSD
65 tristate "Atmel ASoC driver for boards using CLASSD" 65 tristate "Atmel ASoC driver for boards using CLASSD"
66 depends on ARCH_AT91 || COMPILE_TEST 66 depends on ARCH_AT91 || COMPILE_TEST
67 select SND_ATMEL_SOC_DMA 67 select SND_SOC_GENERIC_DMAENGINE_PCM
68 select REGMAP_MMIO 68 select REGMAP_MMIO
69 help 69 help
70 Say Y if you want to add support for Atmel ASoC driver for boards using 70 Say Y if you want to add support for Atmel ASoC driver for boards using
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index b2d42ec1dcd9..56564ce90cb6 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -2520,7 +2520,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
2520 } 2520 }
2521 2521
2522 if (da7218->dev_id == DA7218_DEV_ID) { 2522 if (da7218->dev_id == DA7218_DEV_ID) {
2523 hpldet_np = of_find_node_by_name(np, "da7218_hpldet"); 2523 hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
2524 if (!hpldet_np) 2524 if (!hpldet_np)
2525 return pdata; 2525 return pdata;
2526 2526
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 5f3c42c4f74a..066ea2f4ce7b 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -267,7 +267,7 @@
267#define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ 267#define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
268 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) 268 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
269#define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ 269#define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
270 SNDRV_PCM_FMTBIT_S24_LE) 270 SNDRV_PCM_FMTBIT_S32_LE)
271 271
272static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | 272static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 |
273 SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4; 273 SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4;
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index a10a724eb448..13354d6304a8 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -194,7 +194,7 @@
194 SNDRV_PCM_RATE_32000 | \ 194 SNDRV_PCM_RATE_32000 | \
195 SNDRV_PCM_RATE_48000) 195 SNDRV_PCM_RATE_48000)
196#define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ 196#define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
197 SNDRV_PCM_FMTBIT_S24_LE) 197 SNDRV_PCM_FMTBIT_S32_LE)
198 198
199struct msm8916_wcd_digital_priv { 199struct msm8916_wcd_digital_priv {
200 struct clk *ahbclk, *mclk; 200 struct clk *ahbclk, *mclk;
@@ -645,7 +645,7 @@ static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream,
645 RX_I2S_CTL_RX_I2S_MODE_MASK, 645 RX_I2S_CTL_RX_I2S_MODE_MASK,
646 RX_I2S_CTL_RX_I2S_MODE_16); 646 RX_I2S_CTL_RX_I2S_MODE_16);
647 break; 647 break;
648 case SNDRV_PCM_FORMAT_S24_LE: 648 case SNDRV_PCM_FORMAT_S32_LE:
649 snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, 649 snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
650 TX_I2S_CTL_TX_I2S_MODE_MASK, 650 TX_I2S_CTL_TX_I2S_MODE_MASK,
651 TX_I2S_CTL_TX_I2S_MODE_32); 651 TX_I2S_CTL_TX_I2S_MODE_32);
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 714ce17da717..e853a6dfd33b 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -905,6 +905,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
905 905
906 switch (event) { 906 switch (event) {
907 case SND_SOC_DAPM_POST_PMU: 907 case SND_SOC_DAPM_POST_PMU:
908 msleep(125);
908 regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL, 909 regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
909 NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC); 910 NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
910 break; 911 break;
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 2df91db765ac..64bf26cec20d 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -289,6 +289,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
289 dev_err(&rt5514_spi->dev, 289 dev_err(&rt5514_spi->dev,
290 "%s Failed to reguest IRQ: %d\n", __func__, 290 "%s Failed to reguest IRQ: %d\n", __func__,
291 ret); 291 ret);
292 else
293 device_init_wakeup(rt5514_dsp->dev, true);
292 } 294 }
293 295
294 return 0; 296 return 0;
@@ -456,8 +458,6 @@ static int rt5514_spi_probe(struct spi_device *spi)
456 return ret; 458 return ret;
457 } 459 }
458 460
459 device_init_wakeup(&spi->dev, true);
460
461 return 0; 461 return 0;
462} 462}
463 463
@@ -482,10 +482,13 @@ static int __maybe_unused rt5514_resume(struct device *dev)
482 if (device_may_wakeup(dev)) 482 if (device_may_wakeup(dev))
483 disable_irq_wake(irq); 483 disable_irq_wake(irq);
484 484
485 if (rt5514_dsp->substream) { 485 if (rt5514_dsp) {
486 rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf)); 486 if (rt5514_dsp->substream) {
487 if (buf[0] & RT5514_IRQ_STATUS_BIT) 487 rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf,
488 rt5514_schedule_copy(rt5514_dsp); 488 sizeof(buf));
489 if (buf[0] & RT5514_IRQ_STATUS_BIT)
490 rt5514_schedule_copy(rt5514_dsp);
491 }
489 } 492 }
490 493
491 return 0; 494 return 0;
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 2a5b5d74e697..2dd6e9f990a4 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -496,7 +496,7 @@ static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
496 SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0), 496 SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
497 SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0), 497 SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
498 498
499 SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0, 499 SND_SOC_DAPM_SUPPLY_S("DMIC CLK", 1, SND_SOC_NOPM, 0, 0,
500 rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU), 500 rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
501 501
502 SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1, 502 SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index f020d2d1eef4..edc152c8a1fe 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3823,6 +3823,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3823 regmap_read(regmap, RT5645_VENDOR_ID, &val); 3823 regmap_read(regmap, RT5645_VENDOR_ID, &val);
3824 rt5645->v_id = val & 0xff; 3824 rt5645->v_id = val & 0xff;
3825 3825
3826 regmap_write(rt5645->regmap, RT5645_AD_DA_MIXER, 0x8080);
3827
3826 ret = regmap_register_patch(rt5645->regmap, init_list, 3828 ret = regmap_register_patch(rt5645->regmap, init_list,
3827 ARRAY_SIZE(init_list)); 3829 ARRAY_SIZE(init_list));
3828 if (ret != 0) 3830 if (ret != 0)
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index b036c9dc0c8c..d329bf719d80 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1560,6 +1560,10 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
1560 RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN); 1560 RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
1561 snd_soc_update_bits(codec, RT5663_IRQ_1, 1561 snd_soc_update_bits(codec, RT5663_IRQ_1,
1562 RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN); 1562 RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
1563 snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
1564 RT5663_EM_JD_MASK, RT5663_EM_JD_RST);
1565 snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
1566 RT5663_EM_JD_MASK, RT5663_EM_JD_NOR);
1563 1567
1564 while (true) { 1568 while (true) {
1565 regmap_read(rt5663->regmap, RT5663_INT_ST_2, &val); 1569 regmap_read(rt5663->regmap, RT5663_INT_ST_2, &val);
diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h
index c5a9b69579ad..03adc8004ba9 100644
--- a/sound/soc/codecs/rt5663.h
+++ b/sound/soc/codecs/rt5663.h
@@ -1029,6 +1029,10 @@
1029#define RT5663_POL_EXT_JD_SHIFT 10 1029#define RT5663_POL_EXT_JD_SHIFT 10
1030#define RT5663_POL_EXT_JD_EN (0x1 << 10) 1030#define RT5663_POL_EXT_JD_EN (0x1 << 10)
1031#define RT5663_POL_EXT_JD_DIS (0x0 << 10) 1031#define RT5663_POL_EXT_JD_DIS (0x0 << 10)
1032#define RT5663_EM_JD_MASK (0x1 << 7)
1033#define RT5663_EM_JD_SHIFT 7
1034#define RT5663_EM_JD_NOR (0x1 << 7)
1035#define RT5663_EM_JD_RST (0x0 << 7)
1032 1036
1033/* DACREF LDO Control (0x0112)*/ 1037/* DACREF LDO Control (0x0112)*/
1034#define RT5663_PWR_LDO_DACREFL_MASK (0x1 << 9) 1038#define RT5663_PWR_LDO_DACREFL_MASK (0x1 << 9)
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 730fb2058869..1ff3edb7bbb6 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -116,7 +116,7 @@ struct aic31xx_pdata {
116/* INT2 interrupt control */ 116/* INT2 interrupt control */
117#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49) 117#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
118/* GPIO1 control */ 118/* GPIO1 control */
119#define AIC31XX_GPIO1 AIC31XX_REG(0, 50) 119#define AIC31XX_GPIO1 AIC31XX_REG(0, 51)
120 120
121#define AIC31XX_DACPRB AIC31XX_REG(0, 60) 121#define AIC31XX_DACPRB AIC31XX_REG(0, 60)
122/* ADC Instruction Set Register */ 122/* ADC Instruction Set Register */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index c482b2e7a7d2..cfe72b9d4356 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
232 struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); 232 struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
233 struct device_node *twl4030_codec_node = NULL; 233 struct device_node *twl4030_codec_node = NULL;
234 234
235 twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node, 235 twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
236 "codec"); 236 "codec");
237 237
238 if (!pdata && twl4030_codec_node) { 238 if (!pdata && twl4030_codec_node) {
@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
241 GFP_KERNEL); 241 GFP_KERNEL);
242 if (!pdata) { 242 if (!pdata) {
243 dev_err(codec->dev, "Can not allocate memory\n"); 243 dev_err(codec->dev, "Can not allocate memory\n");
244 of_node_put(twl4030_codec_node);
244 return NULL; 245 return NULL;
245 } 246 }
246 twl4030_setup_pdata_of(pdata, twl4030_codec_node); 247 twl4030_setup_pdata_of(pdata, twl4030_codec_node);
248 of_node_put(twl4030_codec_node);
247 } 249 }
248 250
249 return pdata; 251 return pdata;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 65c059b5ffd7..66e32f5d2917 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1733,7 +1733,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
1733 le64_to_cpu(footer->timestamp)); 1733 le64_to_cpu(footer->timestamp));
1734 1734
1735 while (pos < firmware->size && 1735 while (pos < firmware->size &&
1736 pos - firmware->size > sizeof(*region)) { 1736 sizeof(*region) < firmware->size - pos) {
1737 region = (void *)&(firmware->data[pos]); 1737 region = (void *)&(firmware->data[pos]);
1738 region_name = "Unknown"; 1738 region_name = "Unknown";
1739 reg = 0; 1739 reg = 0;
@@ -1782,8 +1782,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
1782 regions, le32_to_cpu(region->len), offset, 1782 regions, le32_to_cpu(region->len), offset,
1783 region_name); 1783 region_name);
1784 1784
1785 if ((pos + le32_to_cpu(region->len) + sizeof(*region)) > 1785 if (le32_to_cpu(region->len) >
1786 firmware->size) { 1786 firmware->size - pos - sizeof(*region)) {
1787 adsp_err(dsp, 1787 adsp_err(dsp,
1788 "%s.%d: %s region len %d bytes exceeds file length %zu\n", 1788 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
1789 file, regions, region_name, 1789 file, regions, region_name,
@@ -2253,7 +2253,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2253 2253
2254 blocks = 0; 2254 blocks = 0;
2255 while (pos < firmware->size && 2255 while (pos < firmware->size &&
2256 pos - firmware->size > sizeof(*blk)) { 2256 sizeof(*blk) < firmware->size - pos) {
2257 blk = (void *)(&firmware->data[pos]); 2257 blk = (void *)(&firmware->data[pos]);
2258 2258
2259 type = le16_to_cpu(blk->type); 2259 type = le16_to_cpu(blk->type);
@@ -2327,8 +2327,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2327 } 2327 }
2328 2328
2329 if (reg) { 2329 if (reg) {
2330 if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) > 2330 if (le32_to_cpu(blk->len) >
2331 firmware->size) { 2331 firmware->size - pos - sizeof(*blk)) {
2332 adsp_err(dsp, 2332 adsp_err(dsp,
2333 "%s.%d: %s region len %d bytes exceeds file length %zu\n", 2333 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2334 file, blocks, region_name, 2334 file, blocks, region_name,
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 0f163abe4ba3..52c27a358933 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -260,8 +260,8 @@
260#define ASRFSTi_OUTPUT_FIFO_SHIFT 12 260#define ASRFSTi_OUTPUT_FIFO_SHIFT 12
261#define ASRFSTi_OUTPUT_FIFO_MASK (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT) 261#define ASRFSTi_OUTPUT_FIFO_MASK (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT)
262#define ASRFSTi_IAEi_SHIFT 11 262#define ASRFSTi_IAEi_SHIFT 11
263#define ASRFSTi_IAEi_MASK (1 << ASRFSTi_OAFi_SHIFT) 263#define ASRFSTi_IAEi_MASK (1 << ASRFSTi_IAEi_SHIFT)
264#define ASRFSTi_IAEi (1 << ASRFSTi_OAFi_SHIFT) 264#define ASRFSTi_IAEi (1 << ASRFSTi_IAEi_SHIFT)
265#define ASRFSTi_INPUT_FIFO_WIDTH 7 265#define ASRFSTi_INPUT_FIFO_WIDTH 7
266#define ASRFSTi_INPUT_FIFO_SHIFT 0 266#define ASRFSTi_INPUT_FIFO_SHIFT 0
267#define ASRFSTi_INPUT_FIFO_MASK ((1 << ASRFSTi_INPUT_FIFO_WIDTH) - 1) 267#define ASRFSTi_INPUT_FIFO_MASK ((1 << ASRFSTi_INPUT_FIFO_WIDTH) - 1)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index f2f51e06e22c..424bafaf51ef 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -38,6 +38,7 @@
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <linux/device.h> 39#include <linux/device.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/mutex.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/of.h> 44#include <linux/of.h>
@@ -265,6 +266,8 @@ struct fsl_ssi_private {
265 266
266 u32 fifo_watermark; 267 u32 fifo_watermark;
267 u32 dma_maxburst; 268 u32 dma_maxburst;
269
270 struct mutex ac97_reg_lock;
268}; 271};
269 272
270/* 273/*
@@ -1260,11 +1263,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
1260 if (reg > 0x7f) 1263 if (reg > 0x7f)
1261 return; 1264 return;
1262 1265
1266 mutex_lock(&fsl_ac97_data->ac97_reg_lock);
1267
1263 ret = clk_prepare_enable(fsl_ac97_data->clk); 1268 ret = clk_prepare_enable(fsl_ac97_data->clk);
1264 if (ret) { 1269 if (ret) {
1265 pr_err("ac97 write clk_prepare_enable failed: %d\n", 1270 pr_err("ac97 write clk_prepare_enable failed: %d\n",
1266 ret); 1271 ret);
1267 return; 1272 goto ret_unlock;
1268 } 1273 }
1269 1274
1270 lreg = reg << 12; 1275 lreg = reg << 12;
@@ -1278,6 +1283,9 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
1278 udelay(100); 1283 udelay(100);
1279 1284
1280 clk_disable_unprepare(fsl_ac97_data->clk); 1285 clk_disable_unprepare(fsl_ac97_data->clk);
1286
1287ret_unlock:
1288 mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
1281} 1289}
1282 1290
1283static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, 1291static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1285,16 +1293,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
1285{ 1293{
1286 struct regmap *regs = fsl_ac97_data->regs; 1294 struct regmap *regs = fsl_ac97_data->regs;
1287 1295
1288 unsigned short val = -1; 1296 unsigned short val = 0;
1289 u32 reg_val; 1297 u32 reg_val;
1290 unsigned int lreg; 1298 unsigned int lreg;
1291 int ret; 1299 int ret;
1292 1300
1301 mutex_lock(&fsl_ac97_data->ac97_reg_lock);
1302
1293 ret = clk_prepare_enable(fsl_ac97_data->clk); 1303 ret = clk_prepare_enable(fsl_ac97_data->clk);
1294 if (ret) { 1304 if (ret) {
1295 pr_err("ac97 read clk_prepare_enable failed: %d\n", 1305 pr_err("ac97 read clk_prepare_enable failed: %d\n",
1296 ret); 1306 ret);
1297 return -1; 1307 goto ret_unlock;
1298 } 1308 }
1299 1309
1300 lreg = (reg & 0x7f) << 12; 1310 lreg = (reg & 0x7f) << 12;
@@ -1309,6 +1319,8 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
1309 1319
1310 clk_disable_unprepare(fsl_ac97_data->clk); 1320 clk_disable_unprepare(fsl_ac97_data->clk);
1311 1321
1322ret_unlock:
1323 mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
1312 return val; 1324 return val;
1313} 1325}
1314 1326
@@ -1458,12 +1470,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1458 sizeof(fsl_ssi_ac97_dai)); 1470 sizeof(fsl_ssi_ac97_dai));
1459 1471
1460 fsl_ac97_data = ssi_private; 1472 fsl_ac97_data = ssi_private;
1461
1462 ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
1463 if (ret) {
1464 dev_err(&pdev->dev, "could not set AC'97 ops\n");
1465 return ret;
1466 }
1467 } else { 1473 } else {
1468 /* Initialize this copy of the CPU DAI driver structure */ 1474 /* Initialize this copy of the CPU DAI driver structure */
1469 memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, 1475 memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1574,6 +1580,15 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1574 return ret; 1580 return ret;
1575 } 1581 }
1576 1582
1583 if (fsl_ssi_is_ac97(ssi_private)) {
1584 mutex_init(&ssi_private->ac97_reg_lock);
1585 ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
1586 if (ret) {
1587 dev_err(&pdev->dev, "could not set AC'97 ops\n");
1588 goto error_ac97_ops;
1589 }
1590 }
1591
1577 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, 1592 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
1578 &ssi_private->cpu_dai_drv, 1); 1593 &ssi_private->cpu_dai_drv, 1);
1579 if (ret) { 1594 if (ret) {
@@ -1657,6 +1672,13 @@ error_sound_card:
1657 fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); 1672 fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
1658 1673
1659error_asoc_register: 1674error_asoc_register:
1675 if (fsl_ssi_is_ac97(ssi_private))
1676 snd_soc_set_ac97_ops(NULL);
1677
1678error_ac97_ops:
1679 if (fsl_ssi_is_ac97(ssi_private))
1680 mutex_destroy(&ssi_private->ac97_reg_lock);
1681
1660 if (ssi_private->soc->imx) 1682 if (ssi_private->soc->imx)
1661 fsl_ssi_imx_clean(pdev, ssi_private); 1683 fsl_ssi_imx_clean(pdev, ssi_private);
1662 1684
@@ -1675,8 +1697,10 @@ static int fsl_ssi_remove(struct platform_device *pdev)
1675 if (ssi_private->soc->imx) 1697 if (ssi_private->soc->imx)
1676 fsl_ssi_imx_clean(pdev, ssi_private); 1698 fsl_ssi_imx_clean(pdev, ssi_private);
1677 1699
1678 if (fsl_ssi_is_ac97(ssi_private)) 1700 if (fsl_ssi_is_ac97(ssi_private)) {
1679 snd_soc_set_ac97_ops(NULL); 1701 snd_soc_set_ac97_ops(NULL);
1702 mutex_destroy(&ssi_private->ac97_reg_lock);
1703 }
1680 1704
1681 return 0; 1705 return 0;
1682} 1706}
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 6f9a8bcf20f3..6dcad0a8a0d0 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -101,7 +101,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
101 { "ssp0 Tx", NULL, "spk_out" }, 101 { "ssp0 Tx", NULL, "spk_out" },
102 102
103 { "AIF Playback", NULL, "ssp1 Tx" }, 103 { "AIF Playback", NULL, "ssp1 Tx" },
104 { "ssp1 Tx", NULL, "hs_out" }, 104 { "ssp1 Tx", NULL, "codec1_out" },
105 105
106 { "hs_in", NULL, "ssp1 Rx" }, 106 { "hs_in", NULL, "ssp1 Rx" },
107 { "ssp1 Rx", NULL, "AIF Capture" }, 107 { "ssp1 Rx", NULL, "AIF Capture" },
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 6072164f2d43..271ae3c2c535 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -109,7 +109,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
109 { "ssp0 Tx", NULL, "spk_out" }, 109 { "ssp0 Tx", NULL, "spk_out" },
110 110
111 { "AIF Playback", NULL, "ssp1 Tx" }, 111 { "AIF Playback", NULL, "ssp1 Tx" },
112 { "ssp1 Tx", NULL, "hs_out" }, 112 { "ssp1 Tx", NULL, "codec1_out" },
113 113
114 { "hs_in", NULL, "ssp1 Rx" }, 114 { "hs_in", NULL, "ssp1 Rx" },
115 { "ssp1 Rx", NULL, "AIF Capture" }, 115 { "ssp1 Rx", NULL, "AIF Capture" },
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index d14c50a60289..3eaac41090ca 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -119,11 +119,16 @@ static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
119 119
120 if ((epnt->virtual_bus_id == instance_id) && 120 if ((epnt->virtual_bus_id == instance_id) &&
121 (epnt->linktype == link_type) && 121 (epnt->linktype == link_type) &&
122 (epnt->direction == dirn) && 122 (epnt->direction == dirn)) {
123 (epnt->device_type == dev_type)) 123 /* do not check dev_type for DMIC link type */
124 return true; 124 if (epnt->linktype == NHLT_LINK_DMIC)
125 else 125 return true;
126 return false; 126
127 if (epnt->device_type == dev_type)
128 return true;
129 }
130
131 return false;
127} 132}
128 133
129struct nhlt_specific_cfg 134struct nhlt_specific_cfg
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index a072bcf209d2..81923da18ac2 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2908,7 +2908,7 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2908 break; 2908 break;
2909 2909
2910 default: 2910 default:
2911 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n", 2911 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
2912 hdr->ops.get, hdr->ops.put, hdr->ops.info); 2912 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2913 break; 2913 break;
2914 } 2914 }
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index ee5055d47d13..a89fe9b6463b 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -322,26 +322,30 @@ static int rk_spdif_probe(struct platform_device *pdev)
322 spdif->mclk = devm_clk_get(&pdev->dev, "mclk"); 322 spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
323 if (IS_ERR(spdif->mclk)) { 323 if (IS_ERR(spdif->mclk)) {
324 dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n"); 324 dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n");
325 return PTR_ERR(spdif->mclk); 325 ret = PTR_ERR(spdif->mclk);
326 goto err_disable_hclk;
326 } 327 }
327 328
328 ret = clk_prepare_enable(spdif->mclk); 329 ret = clk_prepare_enable(spdif->mclk);
329 if (ret) { 330 if (ret) {
330 dev_err(spdif->dev, "clock enable failed %d\n", ret); 331 dev_err(spdif->dev, "clock enable failed %d\n", ret);
331 return ret; 332 goto err_disable_clocks;
332 } 333 }
333 334
334 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 335 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
335 regs = devm_ioremap_resource(&pdev->dev, res); 336 regs = devm_ioremap_resource(&pdev->dev, res);
336 if (IS_ERR(regs)) 337 if (IS_ERR(regs)) {
337 return PTR_ERR(regs); 338 ret = PTR_ERR(regs);
339 goto err_disable_clocks;
340 }
338 341
339 spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs, 342 spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
340 &rk_spdif_regmap_config); 343 &rk_spdif_regmap_config);
341 if (IS_ERR(spdif->regmap)) { 344 if (IS_ERR(spdif->regmap)) {
342 dev_err(&pdev->dev, 345 dev_err(&pdev->dev,
343 "Failed to initialise managed register map\n"); 346 "Failed to initialise managed register map\n");
344 return PTR_ERR(spdif->regmap); 347 ret = PTR_ERR(spdif->regmap);
348 goto err_disable_clocks;
345 } 349 }
346 350
347 spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR; 351 spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
@@ -373,6 +377,10 @@ static int rk_spdif_probe(struct platform_device *pdev)
373 377
374err_pm_runtime: 378err_pm_runtime:
375 pm_runtime_disable(&pdev->dev); 379 pm_runtime_disable(&pdev->dev);
380err_disable_clocks:
381 clk_disable_unprepare(spdif->mclk);
382err_disable_hclk:
383 clk_disable_unprepare(spdif->hclk);
376 384
377 return ret; 385 return ret;
378} 386}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 8ddb08714faa..4672688cac32 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -222,7 +222,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
222 NULL, &val, NULL); 222 NULL, &val, NULL);
223 223
224 val = val << shift; 224 val = val << shift;
225 mask = 0xffff << shift; 225 mask = 0x0f1f << shift;
226 226
227 rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val); 227 rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val);
228 228
@@ -250,7 +250,7 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
250 250
251 in = in << shift; 251 in = in << shift;
252 out = out << shift; 252 out = out << shift;
253 mask = 0xffff << shift; 253 mask = 0x0f1f << shift;
254 254
255 switch (id / 2) { 255 switch (id / 2) {
256 case 0: 256 case 0:
@@ -380,7 +380,7 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
380 ckr = 0x80000000; 380 ckr = 0x80000000;
381 } 381 }
382 382
383 rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr); 383 rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr | ckr);
384 rsnd_mod_write(adg_mod, BRRA, adg->rbga); 384 rsnd_mod_write(adg_mod, BRRA, adg->rbga);
385 rsnd_mod_write(adg_mod, BRRB, adg->rbgb); 385 rsnd_mod_write(adg_mod, BRRB, adg->rbgb);
386 386
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index c70eb2097816..f12a88a21dfa 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1332,8 +1332,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
1332 1332
1333 return snd_pcm_lib_preallocate_pages_for_all( 1333 return snd_pcm_lib_preallocate_pages_for_all(
1334 rtd->pcm, 1334 rtd->pcm,
1335 SNDRV_DMA_TYPE_CONTINUOUS, 1335 SNDRV_DMA_TYPE_DEV,
1336 snd_dma_continuous_data(GFP_KERNEL), 1336 rtd->card->snd_card->dev,
1337 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); 1337 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
1338} 1338}
1339 1339
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index fd557abfe390..4d750bdf8e24 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -26,10 +26,7 @@
26struct rsnd_dmaen { 26struct rsnd_dmaen {
27 struct dma_chan *chan; 27 struct dma_chan *chan;
28 dma_cookie_t cookie; 28 dma_cookie_t cookie;
29 dma_addr_t dma_buf;
30 unsigned int dma_len; 29 unsigned int dma_len;
31 unsigned int dma_period;
32 unsigned int dma_cnt;
33}; 30};
34 31
35struct rsnd_dmapp { 32struct rsnd_dmapp {
@@ -71,38 +68,10 @@ static struct rsnd_mod mem = {
71/* 68/*
72 * Audio DMAC 69 * Audio DMAC
73 */ 70 */
74#define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
75#define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
76static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
77 int i, int sync)
78{
79 struct device *dev = dmaen->chan->device->dev;
80 enum dma_data_direction dir;
81 int is_play = rsnd_io_is_play(io);
82 dma_addr_t buf;
83 int len, max;
84 size_t period;
85
86 len = dmaen->dma_len;
87 period = dmaen->dma_period;
88 max = len / period;
89 i = i % max;
90 buf = dmaen->dma_buf + (period * i);
91
92 dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
93
94 if (sync)
95 dma_sync_single_for_device(dev, buf, period, dir);
96 else
97 dma_sync_single_for_cpu(dev, buf, period, dir);
98}
99
100static void __rsnd_dmaen_complete(struct rsnd_mod *mod, 71static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
101 struct rsnd_dai_stream *io) 72 struct rsnd_dai_stream *io)
102{ 73{
103 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 74 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
104 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
105 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
106 bool elapsed = false; 75 bool elapsed = false;
107 unsigned long flags; 76 unsigned long flags;
108 77
@@ -115,22 +84,9 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
115 */ 84 */
116 spin_lock_irqsave(&priv->lock, flags); 85 spin_lock_irqsave(&priv->lock, flags);
117 86
118 if (rsnd_io_is_working(io)) { 87 if (rsnd_io_is_working(io))
119 rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
120
121 /*
122 * Next period is already started.
123 * Let's sync Next Next period
124 * see
125 * rsnd_dmaen_start()
126 */
127 rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
128
129 elapsed = true; 88 elapsed = true;
130 89
131 dmaen->dma_cnt++;
132 }
133
134 spin_unlock_irqrestore(&priv->lock, flags); 90 spin_unlock_irqrestore(&priv->lock, flags);
135 91
136 if (elapsed) 92 if (elapsed)
@@ -165,14 +121,8 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
165 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 121 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
166 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 122 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
167 123
168 if (dmaen->chan) { 124 if (dmaen->chan)
169 int is_play = rsnd_io_is_play(io);
170
171 dmaengine_terminate_all(dmaen->chan); 125 dmaengine_terminate_all(dmaen->chan);
172 dma_unmap_single(dmaen->chan->device->dev,
173 dmaen->dma_buf, dmaen->dma_len,
174 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
175 }
176 126
177 return 0; 127 return 0;
178} 128}
@@ -237,11 +187,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
237 struct device *dev = rsnd_priv_to_dev(priv); 187 struct device *dev = rsnd_priv_to_dev(priv);
238 struct dma_async_tx_descriptor *desc; 188 struct dma_async_tx_descriptor *desc;
239 struct dma_slave_config cfg = {}; 189 struct dma_slave_config cfg = {};
240 dma_addr_t buf;
241 size_t len;
242 size_t period;
243 int is_play = rsnd_io_is_play(io); 190 int is_play = rsnd_io_is_play(io);
244 int i;
245 int ret; 191 int ret;
246 192
247 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 193 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
@@ -258,19 +204,10 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
258 if (ret < 0) 204 if (ret < 0)
259 return ret; 205 return ret;
260 206
261 len = snd_pcm_lib_buffer_bytes(substream);
262 period = snd_pcm_lib_period_bytes(substream);
263 buf = dma_map_single(dmaen->chan->device->dev,
264 substream->runtime->dma_area,
265 len,
266 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
267 if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
268 dev_err(dev, "dma map failed\n");
269 return -EIO;
270 }
271
272 desc = dmaengine_prep_dma_cyclic(dmaen->chan, 207 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
273 buf, len, period, 208 substream->runtime->dma_addr,
209 snd_pcm_lib_buffer_bytes(substream),
210 snd_pcm_lib_period_bytes(substream),
274 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 211 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
275 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 212 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
276 213
@@ -282,18 +219,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
282 desc->callback = rsnd_dmaen_complete; 219 desc->callback = rsnd_dmaen_complete;
283 desc->callback_param = rsnd_mod_get(dma); 220 desc->callback_param = rsnd_mod_get(dma);
284 221
285 dmaen->dma_buf = buf; 222 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
286 dmaen->dma_len = len;
287 dmaen->dma_period = period;
288 dmaen->dma_cnt = 0;
289
290 /*
291 * synchronize this and next period
292 * see
293 * __rsnd_dmaen_complete()
294 */
295 for (i = 0; i < 2; i++)
296 rsnd_dmaen_sync(dmaen, io, i);
297 223
298 dmaen->cookie = dmaengine_submit(desc); 224 dmaen->cookie = dmaengine_submit(desc);
299 if (dmaen->cookie < 0) { 225 if (dmaen->cookie < 0) {
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fece1e5f582f..cbf3bf312d23 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -446,25 +446,29 @@ static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
446 int byte) 446 int byte)
447{ 447{
448 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 448 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
449 bool ret = false;
450 int byte_pos;
449 451
450 ssi->byte_pos += byte; 452 byte_pos = ssi->byte_pos + byte;
451 453
452 if (ssi->byte_pos >= ssi->next_period_byte) { 454 if (byte_pos >= ssi->next_period_byte) {
453 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 455 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
454 456
455 ssi->period_pos++; 457 ssi->period_pos++;
456 ssi->next_period_byte += ssi->byte_per_period; 458 ssi->next_period_byte += ssi->byte_per_period;
457 459
458 if (ssi->period_pos >= runtime->periods) { 460 if (ssi->period_pos >= runtime->periods) {
459 ssi->byte_pos = 0; 461 byte_pos = 0;
460 ssi->period_pos = 0; 462 ssi->period_pos = 0;
461 ssi->next_period_byte = ssi->byte_per_period; 463 ssi->next_period_byte = ssi->byte_per_period;
462 } 464 }
463 465
464 return true; 466 ret = true;
465 } 467 }
466 468
467 return false; 469 WRITE_ONCE(ssi->byte_pos, byte_pos);
470
471 return ret;
468} 472}
469 473
470/* 474/*
@@ -838,7 +842,7 @@ static int rsnd_ssi_pointer(struct rsnd_mod *mod,
838 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 842 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
839 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 843 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
840 844
841 *pointer = bytes_to_frames(runtime, ssi->byte_pos); 845 *pointer = bytes_to_frames(runtime, READ_ONCE(ssi->byte_pos));
842 846
843 return 0; 847 return 0;
844} 848}
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 4d948757d300..6ff8a36c2c82 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -125,6 +125,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
125{ 125{
126 int hdmi = rsnd_ssi_hdmi_port(io); 126 int hdmi = rsnd_ssi_hdmi_port(io);
127 int ret; 127 int ret;
128 u32 mode = 0;
128 129
129 ret = rsnd_ssiu_init(mod, io, priv); 130 ret = rsnd_ssiu_init(mod, io, priv);
130 if (ret < 0) 131 if (ret < 0)
@@ -136,9 +137,11 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
136 * see 137 * see
137 * rsnd_ssi_config_init() 138 * rsnd_ssi_config_init()
138 */ 139 */
139 rsnd_mod_write(mod, SSI_MODE, 0x1); 140 mode = 0x1;
140 } 141 }
141 142
143 rsnd_mod_write(mod, SSI_MODE, mode);
144
142 if (rsnd_ssi_use_busif(io)) { 145 if (rsnd_ssi_use_busif(io)) {
143 rsnd_mod_write(mod, SSI_BUSIF_ADINR, 146 rsnd_mod_write(mod, SSI_BUSIF_ADINR,
144 rsnd_get_adinr_bit(mod, io) | 147 rsnd_get_adinr_bit(mod, io) |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 61b348383de8..2b4ceda36291 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
204 int index, char *buf, int maxlen) 204 int index, char *buf, int maxlen)
205{ 205{
206 int len = usb_string(state->chip->dev, index, buf, maxlen - 1); 206 int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
207
208 if (len < 0)
209 return 0;
210
207 buf[len] = 0; 211 buf[len] = 0;
208 return len; 212 return len;
209} 213}
@@ -2169,19 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
2169 kctl->private_value = (unsigned long)namelist; 2173 kctl->private_value = (unsigned long)namelist;
2170 kctl->private_free = usb_mixer_selector_elem_free; 2174 kctl->private_free = usb_mixer_selector_elem_free;
2171 2175
2172 nameid = uac_selector_unit_iSelector(desc); 2176 /* check the static mapping table at first */
2173 len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); 2177 len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
2174 if (len) 2178 if (!len) {
2175 ; 2179 /* no mapping ? */
2176 else if (nameid) 2180 /* if iSelector is given, use it */
2177 snd_usb_copy_string_desc(state, nameid, kctl->id.name, 2181 nameid = uac_selector_unit_iSelector(desc);
2178 sizeof(kctl->id.name)); 2182 if (nameid)
2179 else { 2183 len = snd_usb_copy_string_desc(state, nameid,
2180 len = get_term_name(state, &state->oterm, 2184 kctl->id.name,
2185 sizeof(kctl->id.name));
2186 /* ... or pick up the terminal name at next */
2187 if (!len)
2188 len = get_term_name(state, &state->oterm,
2181 kctl->id.name, sizeof(kctl->id.name), 0); 2189 kctl->id.name, sizeof(kctl->id.name), 0);
2190 /* ... or use the fixed string "USB" as the last resort */
2182 if (!len) 2191 if (!len)
2183 strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); 2192 strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
2184 2193
2194 /* and add the proper suffix */
2185 if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) 2195 if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
2186 append_ctl_name(kctl, " Clock Source"); 2196 append_ctl_name(kctl, " Clock Source");
2187 else if ((state->oterm.type & 0xff00) == 0x0100) 2197 else if ((state->oterm.type & 0xff00) == 0x0100)
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 77eecaa4db1f..a66ef5777887 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1166,10 +1166,11 @@ static bool is_marantz_denon_dac(unsigned int id)
1166/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch 1166/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
1167 * between PCM/DOP and native DSD mode 1167 * between PCM/DOP and native DSD mode
1168 */ 1168 */
1169static bool is_teac_50X_dac(unsigned int id) 1169static bool is_teac_dsd_dac(unsigned int id)
1170{ 1170{
1171 switch (id) { 1171 switch (id) {
1172 case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */ 1172 case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
1173 case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
1173 return true; 1174 return true;
1174 } 1175 }
1175 return false; 1176 return false;
@@ -1202,7 +1203,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
1202 break; 1203 break;
1203 } 1204 }
1204 mdelay(20); 1205 mdelay(20);
1205 } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) { 1206 } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
1206 /* Vendor mode switch cmd is required. */ 1207 /* Vendor mode switch cmd is required. */
1207 switch (fmt->altsetting) { 1208 switch (fmt->altsetting) {
1208 case 3: /* DSD mode (DSD_U32) requested */ 1209 case 3: /* DSD mode (DSD_U32) requested */
@@ -1392,7 +1393,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1392 } 1393 }
1393 1394
1394 /* TEAC devices with USB DAC functionality */ 1395 /* TEAC devices with USB DAC functionality */
1395 if (is_teac_50X_dac(chip->usb_id)) { 1396 if (is_teac_dsd_dac(chip->usb_id)) {
1396 if (fp->altsetting == 3) 1397 if (fp->altsetting == 3)
1397 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1398 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1398 } 1399 }
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 1f57bbe82b6f..6edd177bb1c7 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) 152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) 153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
154 154
155/* PL1 Physical Timer Registers */
156#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
157#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
158#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
159
160/* Virtual Timer Registers */
155#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) 161#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
156#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) 162#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
157#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) 163#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
216#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 222#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
217#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 223#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
218#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 224#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
225#define KVM_DEV_ARM_ITS_CTRL_RESET 4
219 226
220/* KVM_IRQ_LINE irq field index values */ 227/* KVM_IRQ_LINE irq field index values */
221#define KVM_ARM_IRQ_TYPE_SHIFT 24 228#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef struct user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
196 196
197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) 197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
198 198
199/* Physical Timer EL0 Registers */
200#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
201#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
202#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
203
204/* EL0 Virtual Timer Registers */
199#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) 205#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
200#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
201#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
228#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 234#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
229#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 235#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
230#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 236#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
237#define KVM_DEV_ARM_ITS_CTRL_RESET 4
231 238
232/* Device Control API on vcpu fd */ 239/* Device Control API on vcpu fd */
233#define KVM_ARM_VCPU_PMU_V3_CTRL 0 240#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..0a8e37a519f2
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include "ptrace.h"
6
7typedef user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 9ad172dcd912..38535a57fef8 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -6,10 +6,6 @@
6 * 6 *
7 * Copyright IBM Corp. 2008 7 * Copyright IBM Corp. 2008
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License (version 2 only)
11 * as published by the Free Software Foundation.
12 *
13 * Author(s): Carsten Otte <cotte@de.ibm.com> 9 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com> 10 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */ 11 */
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
index c36c97ffdc6f..84606b8cc49e 100644
--- a/tools/arch/s390/include/uapi/asm/kvm_perf.h
+++ b/tools/arch/s390/include/uapi/asm/kvm_perf.h
@@ -4,10 +4,6 @@
4 * 4 *
5 * Copyright 2014 IBM Corp. 5 * Copyright 2014 IBM Corp.
6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com> 6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License (version 2 only)
10 * as published by the Free Software Foundation.
11 */ 7 */
12 8
13#ifndef __LINUX_KVM_PERF_S390_H 9#ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d17dd9e5d516
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_S390_PERF_REGS_H
3#define _ASM_S390_PERF_REGS_H
4
5enum perf_event_s390_regs {
6 PERF_REG_S390_R0,
7 PERF_REG_S390_R1,
8 PERF_REG_S390_R2,
9 PERF_REG_S390_R3,
10 PERF_REG_S390_R4,
11 PERF_REG_S390_R5,
12 PERF_REG_S390_R6,
13 PERF_REG_S390_R7,
14 PERF_REG_S390_R8,
15 PERF_REG_S390_R9,
16 PERF_REG_S390_R10,
17 PERF_REG_S390_R11,
18 PERF_REG_S390_R12,
19 PERF_REG_S390_R13,
20 PERF_REG_S390_R14,
21 PERF_REG_S390_R15,
22 PERF_REG_S390_FP0,
23 PERF_REG_S390_FP1,
24 PERF_REG_S390_FP2,
25 PERF_REG_S390_FP3,
26 PERF_REG_S390_FP4,
27 PERF_REG_S390_FP5,
28 PERF_REG_S390_FP6,
29 PERF_REG_S390_FP7,
30 PERF_REG_S390_FP8,
31 PERF_REG_S390_FP9,
32 PERF_REG_S390_FP10,
33 PERF_REG_S390_FP11,
34 PERF_REG_S390_FP12,
35 PERF_REG_S390_FP13,
36 PERF_REG_S390_FP14,
37 PERF_REG_S390_FP15,
38 PERF_REG_S390_MASK,
39 PERF_REG_S390_PC,
40
41 PERF_REG_S390_MAX
42};
43
44#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..543dd70e12c8
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/ptrace.h
@@ -0,0 +1,457 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6 */
7
8#ifndef _UAPI_S390_PTRACE_H
9#define _UAPI_S390_PTRACE_H
10
11/*
12 * Offsets in the user_regs_struct. They are used for the ptrace
13 * system call and in entry.S
14 */
15#ifndef __s390x__
16
17#define PT_PSWMASK 0x00
18#define PT_PSWADDR 0x04
19#define PT_GPR0 0x08
20#define PT_GPR1 0x0C
21#define PT_GPR2 0x10
22#define PT_GPR3 0x14
23#define PT_GPR4 0x18
24#define PT_GPR5 0x1C
25#define PT_GPR6 0x20
26#define PT_GPR7 0x24
27#define PT_GPR8 0x28
28#define PT_GPR9 0x2C
29#define PT_GPR10 0x30
30#define PT_GPR11 0x34
31#define PT_GPR12 0x38
32#define PT_GPR13 0x3C
33#define PT_GPR14 0x40
34#define PT_GPR15 0x44
35#define PT_ACR0 0x48
36#define PT_ACR1 0x4C
37#define PT_ACR2 0x50
38#define PT_ACR3 0x54
39#define PT_ACR4 0x58
40#define PT_ACR5 0x5C
41#define PT_ACR6 0x60
42#define PT_ACR7 0x64
43#define PT_ACR8 0x68
44#define PT_ACR9 0x6C
45#define PT_ACR10 0x70
46#define PT_ACR11 0x74
47#define PT_ACR12 0x78
48#define PT_ACR13 0x7C
49#define PT_ACR14 0x80
50#define PT_ACR15 0x84
51#define PT_ORIGGPR2 0x88
52#define PT_FPC 0x90
53/*
54 * A nasty fact of life that the ptrace api
55 * only supports passing of longs.
56 */
57#define PT_FPR0_HI 0x98
58#define PT_FPR0_LO 0x9C
59#define PT_FPR1_HI 0xA0
60#define PT_FPR1_LO 0xA4
61#define PT_FPR2_HI 0xA8
62#define PT_FPR2_LO 0xAC
63#define PT_FPR3_HI 0xB0
64#define PT_FPR3_LO 0xB4
65#define PT_FPR4_HI 0xB8
66#define PT_FPR4_LO 0xBC
67#define PT_FPR5_HI 0xC0
68#define PT_FPR5_LO 0xC4
69#define PT_FPR6_HI 0xC8
70#define PT_FPR6_LO 0xCC
71#define PT_FPR7_HI 0xD0
72#define PT_FPR7_LO 0xD4
73#define PT_FPR8_HI 0xD8
74#define PT_FPR8_LO 0XDC
75#define PT_FPR9_HI 0xE0
76#define PT_FPR9_LO 0xE4
77#define PT_FPR10_HI 0xE8
78#define PT_FPR10_LO 0xEC
79#define PT_FPR11_HI 0xF0
80#define PT_FPR11_LO 0xF4
81#define PT_FPR12_HI 0xF8
82#define PT_FPR12_LO 0xFC
83#define PT_FPR13_HI 0x100
84#define PT_FPR13_LO 0x104
85#define PT_FPR14_HI 0x108
86#define PT_FPR14_LO 0x10C
87#define PT_FPR15_HI 0x110
88#define PT_FPR15_LO 0x114
89#define PT_CR_9 0x118
90#define PT_CR_10 0x11C
91#define PT_CR_11 0x120
92#define PT_IEEE_IP 0x13C
93#define PT_LASTOFF PT_IEEE_IP
94#define PT_ENDREGS 0x140-1
95
96#define GPR_SIZE 4
97#define CR_SIZE 4
98
99#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
100
101#else /* __s390x__ */
102
103#define PT_PSWMASK 0x00
104#define PT_PSWADDR 0x08
105#define PT_GPR0 0x10
106#define PT_GPR1 0x18
107#define PT_GPR2 0x20
108#define PT_GPR3 0x28
109#define PT_GPR4 0x30
110#define PT_GPR5 0x38
111#define PT_GPR6 0x40
112#define PT_GPR7 0x48
113#define PT_GPR8 0x50
114#define PT_GPR9 0x58
115#define PT_GPR10 0x60
116#define PT_GPR11 0x68
117#define PT_GPR12 0x70
118#define PT_GPR13 0x78
119#define PT_GPR14 0x80
120#define PT_GPR15 0x88
121#define PT_ACR0 0x90
122#define PT_ACR1 0x94
123#define PT_ACR2 0x98
124#define PT_ACR3 0x9C
125#define PT_ACR4 0xA0
126#define PT_ACR5 0xA4
127#define PT_ACR6 0xA8
128#define PT_ACR7 0xAC
129#define PT_ACR8 0xB0
130#define PT_ACR9 0xB4
131#define PT_ACR10 0xB8
132#define PT_ACR11 0xBC
133#define PT_ACR12 0xC0
134#define PT_ACR13 0xC4
135#define PT_ACR14 0xC8
136#define PT_ACR15 0xCC
137#define PT_ORIGGPR2 0xD0
138#define PT_FPC 0xD8
139#define PT_FPR0 0xE0
140#define PT_FPR1 0xE8
141#define PT_FPR2 0xF0
142#define PT_FPR3 0xF8
143#define PT_FPR4 0x100
144#define PT_FPR5 0x108
145#define PT_FPR6 0x110
146#define PT_FPR7 0x118
147#define PT_FPR8 0x120
148#define PT_FPR9 0x128
149#define PT_FPR10 0x130
150#define PT_FPR11 0x138
151#define PT_FPR12 0x140
152#define PT_FPR13 0x148
153#define PT_FPR14 0x150
154#define PT_FPR15 0x158
155#define PT_CR_9 0x160
156#define PT_CR_10 0x168
157#define PT_CR_11 0x170
158#define PT_IEEE_IP 0x1A8
159#define PT_LASTOFF PT_IEEE_IP
160#define PT_ENDREGS 0x1B0-1
161
162#define GPR_SIZE 8
163#define CR_SIZE 8
164
165#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
166
167#endif /* __s390x__ */
168
169#define NUM_GPRS 16
170#define NUM_FPRS 16
171#define NUM_CRS 16
172#define NUM_ACRS 16
173
174#define NUM_CR_WORDS 3
175
176#define FPR_SIZE 8
177#define FPC_SIZE 4
178#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
179#define ACR_SIZE 4
180
181
182#define PTRACE_OLDSETOPTIONS 21
183
184#ifndef __ASSEMBLY__
185#include <linux/stddef.h>
186#include <linux/types.h>
187
188typedef union {
189 float f;
190 double d;
191 __u64 ui;
192 struct
193 {
194 __u32 hi;
195 __u32 lo;
196 } fp;
197} freg_t;
198
199typedef struct {
200 __u32 fpc;
201 __u32 pad;
202 freg_t fprs[NUM_FPRS];
203} s390_fp_regs;
204
205#define FPC_EXCEPTION_MASK 0xF8000000
206#define FPC_FLAGS_MASK 0x00F80000
207#define FPC_DXC_MASK 0x0000FF00
208#define FPC_RM_MASK 0x00000003
209
210/* this typedef defines how a Program Status Word looks like */
211typedef struct {
212 unsigned long mask;
213 unsigned long addr;
214} __attribute__ ((aligned(8))) psw_t;
215
216#ifndef __s390x__
217
218#define PSW_MASK_PER 0x40000000UL
219#define PSW_MASK_DAT 0x04000000UL
220#define PSW_MASK_IO 0x02000000UL
221#define PSW_MASK_EXT 0x01000000UL
222#define PSW_MASK_KEY 0x00F00000UL
223#define PSW_MASK_BASE 0x00080000UL /* always one */
224#define PSW_MASK_MCHECK 0x00040000UL
225#define PSW_MASK_WAIT 0x00020000UL
226#define PSW_MASK_PSTATE 0x00010000UL
227#define PSW_MASK_ASC 0x0000C000UL
228#define PSW_MASK_CC 0x00003000UL
229#define PSW_MASK_PM 0x00000F00UL
230#define PSW_MASK_RI 0x00000000UL
231#define PSW_MASK_EA 0x00000000UL
232#define PSW_MASK_BA 0x00000000UL
233
234#define PSW_MASK_USER 0x0000FF00UL
235
236#define PSW_ADDR_AMODE 0x80000000UL
237#define PSW_ADDR_INSN 0x7FFFFFFFUL
238
239#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
240
241#define PSW_ASC_PRIMARY 0x00000000UL
242#define PSW_ASC_ACCREG 0x00004000UL
243#define PSW_ASC_SECONDARY 0x00008000UL
244#define PSW_ASC_HOME 0x0000C000UL
245
246#else /* __s390x__ */
247
248#define PSW_MASK_PER 0x4000000000000000UL
249#define PSW_MASK_DAT 0x0400000000000000UL
250#define PSW_MASK_IO 0x0200000000000000UL
251#define PSW_MASK_EXT 0x0100000000000000UL
252#define PSW_MASK_BASE 0x0000000000000000UL
253#define PSW_MASK_KEY 0x00F0000000000000UL
254#define PSW_MASK_MCHECK 0x0004000000000000UL
255#define PSW_MASK_WAIT 0x0002000000000000UL
256#define PSW_MASK_PSTATE 0x0001000000000000UL
257#define PSW_MASK_ASC 0x0000C00000000000UL
258#define PSW_MASK_CC 0x0000300000000000UL
259#define PSW_MASK_PM 0x00000F0000000000UL
260#define PSW_MASK_RI 0x0000008000000000UL
261#define PSW_MASK_EA 0x0000000100000000UL
262#define PSW_MASK_BA 0x0000000080000000UL
263
264#define PSW_MASK_USER 0x0000FF0180000000UL
265
266#define PSW_ADDR_AMODE 0x0000000000000000UL
267#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
268
269#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
270
271#define PSW_ASC_PRIMARY 0x0000000000000000UL
272#define PSW_ASC_ACCREG 0x0000400000000000UL
273#define PSW_ASC_SECONDARY 0x0000800000000000UL
274#define PSW_ASC_HOME 0x0000C00000000000UL
275
276#endif /* __s390x__ */
277
278
279/*
280 * The s390_regs structure is used to define the elf_gregset_t.
281 */
282typedef struct {
283 psw_t psw;
284 unsigned long gprs[NUM_GPRS];
285 unsigned int acrs[NUM_ACRS];
286 unsigned long orig_gpr2;
287} s390_regs;
288
289/*
290 * The user_pt_regs structure exports the beginning of
291 * the in-kernel pt_regs structure to user space.
292 */
293typedef struct {
294 unsigned long args[1];
295 psw_t psw;
296 unsigned long gprs[NUM_GPRS];
297} user_pt_regs;
298
299/*
300 * Now for the user space program event recording (trace) definitions.
301 * The following structures are used only for the ptrace interface, don't
302 * touch or even look at it if you don't want to modify the user-space
303 * ptrace interface. In particular stay away from it for in-kernel PER.
304 */
305typedef struct {
306 unsigned long cr[NUM_CR_WORDS];
307} per_cr_words;
308
309#define PER_EM_MASK 0xE8000000UL
310
311typedef struct {
312#ifdef __s390x__
313 unsigned : 32;
314#endif /* __s390x__ */
315 unsigned em_branching : 1;
316 unsigned em_instruction_fetch : 1;
317 /*
318 * Switching on storage alteration automatically fixes
319 * the storage alteration event bit in the users std.
320 */
321 unsigned em_storage_alteration : 1;
322 unsigned em_gpr_alt_unused : 1;
323 unsigned em_store_real_address : 1;
324 unsigned : 3;
325 unsigned branch_addr_ctl : 1;
326 unsigned : 1;
327 unsigned storage_alt_space_ctl : 1;
328 unsigned : 21;
329 unsigned long starting_addr;
330 unsigned long ending_addr;
331} per_cr_bits;
332
333typedef struct {
334 unsigned short perc_atmid;
335 unsigned long address;
336 unsigned char access_id;
337} per_lowcore_words;
338
339typedef struct {
340 unsigned perc_branching : 1;
341 unsigned perc_instruction_fetch : 1;
342 unsigned perc_storage_alteration : 1;
343 unsigned perc_gpr_alt_unused : 1;
344 unsigned perc_store_real_address : 1;
345 unsigned : 3;
346 unsigned atmid_psw_bit_31 : 1;
347 unsigned atmid_validity_bit : 1;
348 unsigned atmid_psw_bit_32 : 1;
349 unsigned atmid_psw_bit_5 : 1;
350 unsigned atmid_psw_bit_16 : 1;
351 unsigned atmid_psw_bit_17 : 1;
352 unsigned si : 2;
353 unsigned long address;
354 unsigned : 4;
355 unsigned access_id : 4;
356} per_lowcore_bits;
357
358typedef struct {
359 union {
360 per_cr_words words;
361 per_cr_bits bits;
362 } control_regs;
363 /*
364 * The single_step and instruction_fetch bits are obsolete,
365 * the kernel always sets them to zero. To enable single
366 * stepping use ptrace(PTRACE_SINGLESTEP) instead.
367 */
368 unsigned single_step : 1;
369 unsigned instruction_fetch : 1;
370 unsigned : 30;
371 /*
372 * These addresses are copied into cr10 & cr11 if single
373 * stepping is switched off
374 */
375 unsigned long starting_addr;
376 unsigned long ending_addr;
377 union {
378 per_lowcore_words words;
379 per_lowcore_bits bits;
380 } lowcore;
381} per_struct;
382
383typedef struct {
384 unsigned int len;
385 unsigned long kernel_addr;
386 unsigned long process_addr;
387} ptrace_area;
388
389/*
390 * S/390 specific non posix ptrace requests. I chose unusual values so
391 * they are unlikely to clash with future ptrace definitions.
392 */
393#define PTRACE_PEEKUSR_AREA 0x5000
394#define PTRACE_POKEUSR_AREA 0x5001
395#define PTRACE_PEEKTEXT_AREA 0x5002
396#define PTRACE_PEEKDATA_AREA 0x5003
397#define PTRACE_POKETEXT_AREA 0x5004
398#define PTRACE_POKEDATA_AREA 0x5005
399#define PTRACE_GET_LAST_BREAK 0x5006
400#define PTRACE_PEEK_SYSTEM_CALL 0x5007
401#define PTRACE_POKE_SYSTEM_CALL 0x5008
402#define PTRACE_ENABLE_TE 0x5009
403#define PTRACE_DISABLE_TE 0x5010
404#define PTRACE_TE_ABORT_RAND 0x5011
405
406/*
407 * The numbers chosen here are somewhat arbitrary but absolutely MUST
408 * not overlap with any of the number assigned in <linux/ptrace.h>.
409 */
410#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
411
412/*
413 * PT_PROT definition is loosely based on hppa bsd definition in
414 * gdb/hppab-nat.c
415 */
416#define PTRACE_PROT 21
417
418typedef enum {
419 ptprot_set_access_watchpoint,
420 ptprot_set_write_watchpoint,
421 ptprot_disable_watchpoint
422} ptprot_flags;
423
424typedef struct {
425 unsigned long lowaddr;
426 unsigned long hiaddr;
427 ptprot_flags prot;
428} ptprot_area;
429
430/* Sequence of bytes for breakpoint illegal instruction. */
431#define S390_BREAKPOINT {0x0,0x1}
432#define S390_BREAKPOINT_U16 ((__u16)0x0001)
433#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
434#define S390_SYSCALL_SIZE 2
435
436/*
437 * The user_regs_struct defines the way the user registers are
438 * store on the stack for signal handling.
439 */
440struct user_regs_struct {
441 psw_t psw;
442 unsigned long gprs[NUM_GPRS];
443 unsigned int acrs[NUM_ACRS];
444 unsigned long orig_gpr2;
445 s390_fp_regs fp_regs;
446 /*
447 * These per registers are in here so that gdb can modify them
448 * itself as there is no "official" ptrace interface for hardware
449 * watchpoints. This is the way intel does it.
450 */
451 per_struct per_info;
452 unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
453};
454
455#endif /* __ASSEMBLY__ */
456
457#endif /* _UAPI_S390_PTRACE_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..800104c8a3ed 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
13/* 13/*
14 * Defines x86 CPU feature bits 14 * Defines x86 CPU feature bits
15 */ 15 */
16#define NCAPINTS 18 /* N 32-bit words worth of info */ 16#define NCAPINTS 18 /* N 32-bit words worth of info */
17#define NBUGINTS 1 /* N 32-bit bug flags */ 17#define NBUGINTS 1 /* N 32-bit bug flags */
18 18
19/* 19/*
20 * Note: If the comment begins with a quoted string, that string is used 20 * Note: If the comment begins with a quoted string, that string is used
21 * in /proc/cpuinfo instead of the macro name. If the string is "", 21 * in /proc/cpuinfo instead of the macro name. If the string is "",
22 * this feature bit is not displayed in /proc/cpuinfo at all. 22 * this feature bit is not displayed in /proc/cpuinfo at all.
23 *
24 * When adding new features here that depend on other features,
25 * please update the table in kernel/cpu/cpuid-deps.c as well.
23 */ 26 */
24 27
25/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 28/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
26#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 29#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
27#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 30#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
28#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 31#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
29#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 32#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
30#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 33#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
31#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 34#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
32#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 35#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
33#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 36#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
34#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 37#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
35#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 38#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
36#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 39#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
37#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 40#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
38#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 41#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
39#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 42#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
40#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 43#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
41 /* (plus FCMOVcc, FCOMI with FPU) */ 44#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 45#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 46#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 47#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 48#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 49#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 50#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 51#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 52#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 53#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 54#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 55#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 56#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 57#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 58#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
57 59
58/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 60/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
59/* Don't duplicate feature flags which are redundant with Intel! */ 61/* Don't duplicate feature flags which are redundant with Intel! */
60#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 62#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
61#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 63#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
62#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 64#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
63#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 65#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
64#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 66#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
65#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 67#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
66#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 68#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
67#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 69#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
68#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 70#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
69#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 71#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
70 72
71/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 73/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
72#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 74#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
73#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 75#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
74#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 76#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
75 77
76/* Other features, Linux-defined mapping, word 3 */ 78/* Other features, Linux-defined mapping, word 3 */
77/* This range is used for feature bits which conflict or are synthesized */ 79/* This range is used for feature bits which conflict or are synthesized */
78#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 80#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
79#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 81#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
80#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 82#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
81#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 83#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
82/* cpu types for specific tunings: */ 84
83#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 85/* CPU types for specific tunings: */
84#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 86#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
85#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 87#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
86#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 88#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
87#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 89#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
88#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 90#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
89#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ 91#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
90#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 92#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
91#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 93#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
92#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 94#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
93#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 95#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
94#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 96#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
95#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 97#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
96#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 98#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
97#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 99#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
98#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ 100#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
99#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 101#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
100#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 102#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
101#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 103#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
102#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 104#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
103#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 105#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
104#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ 106#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
105#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 107#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
106#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 108#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
107#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 109#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 110#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
109#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ 111#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
112#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
110 113
111/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 114/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
112#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 115#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
113#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 116#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
114#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 117#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
115#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 118#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
116#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 119#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
117#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 120#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
118#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 121#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
119#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 122#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
120#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 123#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
121#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 124#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
122#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 125#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
123#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ 126#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
124#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 127#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
125#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 128#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
126#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 129#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
127#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 130#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
128#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 131#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
129#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 132#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
130#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 133#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
131#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 134#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
132#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 135#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
133#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 136#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
134#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 137#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
135#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 138#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
136#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 139#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
137#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 140#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
138#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 141#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
139#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 142#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
140#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 143#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
141#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 144#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
142#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 145#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
143 146
144/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 147/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
145#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 148#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
146#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 149#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
147#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 150#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
148#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 151#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
149#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 152#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
150#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 153#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
151#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 154#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
152#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 155#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
153#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 156#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
154#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 157#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
155 158
156/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 159/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
157#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 160#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
158#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 161#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
159#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 162#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
160#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 163#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
161#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 164#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
162#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 165#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
163#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 166#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
164#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 167#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
165#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 168#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
166#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 169#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
167#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 170#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
168#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 171#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
169#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 172#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
170#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 173#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
171#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 174#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
172#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 175#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
173#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 176#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
174#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 177#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
175#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 178#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
176#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 179#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
177#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 180#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
178#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 181#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
179#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ 182#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
180#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ 183#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
181#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ 184#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
182#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ 185#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
183 186
184/* 187/*
185 * Auxiliary flags: Linux defined - For features scattered in various 188 * Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,155 @@
187 * 190 *
188 * Reuse free bits when adding new feature flags! 191 * Reuse free bits when adding new feature flags!
189 */ 192 */
190#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ 193#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
191#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ 194#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
192#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 195#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
193#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 196#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
194#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
195#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
196#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
197 200
198#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
199#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
200#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
201 204
202#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 205#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
203#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 206#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
204#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ 207#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
205#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 208#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
206 209
207#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 210#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
208 211
209/* Virtualization flags: Linux defined, word 8 */ 212/* Virtualization flags: Linux defined, word 8 */
210#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 213#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
211#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 214#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
212#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 215#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
213#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 216#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
214#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 217#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
215 218
216#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 219#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
217#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 220#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
218 221
219 222
220/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 223/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
221#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 224#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
222#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 225#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
223#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 226#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
224#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 227#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
225#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 228#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
226#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 229#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
227#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 230#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
228#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 231#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
229#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 232#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
230#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 233#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
231#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ 234#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
232#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 235#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
233#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ 236#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
234#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 237#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
235#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ 238#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
236#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 239#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
237#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 240#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
238#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 241#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
239#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 242#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
240#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 243#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
241#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 244#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
242#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 245#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
243#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 246#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
244#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 247#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
245#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ 248#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
246#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ 249#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
247#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ 250#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
248 251
249/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 252/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
250#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 253#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
251#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 254#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
252#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 255#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
253#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 256#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
254 257
255/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ 258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
256#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ 259#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
257 260
258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ 261/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
259#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ 262#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
260#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ 263#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
261#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ 264#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
262 265
263/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
264#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
265#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ 268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
269#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
266 270
267/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ 271/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
268#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 272#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
269#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ 273#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
270#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ 274#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
271#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ 275#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
272#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ 276#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
273#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ 277#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
274#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ 278#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
275#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ 279#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
276#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ 280#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
277#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ 281#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
278 282
279/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ 283/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
280#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ 284#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
281#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ 285#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
282#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ 286#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
283#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ 287#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
284#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ 288#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
285#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ 289#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
286#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ 290#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
287#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ 291#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
288#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ 292#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
289#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 293#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
290#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 294#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
291#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 295#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
292#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ 296#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
293 297
294/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ 298/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
295#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 299#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
296#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 300#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
297#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 301#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
298#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 302#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
299#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 303#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
300#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 304#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
305#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
306#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
307#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
308#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
309#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
310#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
311#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
301 312
302/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ 313/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
303#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ 314#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
304#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ 315#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
305#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ 316#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
306 317
307/* 318/*
308 * BUG word(s) 319 * BUG word(s)
309 */ 320 */
310#define X86_BUG(x) (NCAPINTS*32 + (x)) 321#define X86_BUG(x) (NCAPINTS*32 + (x))
311 322
312#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 323#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
313#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 324#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
314#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 325#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
315#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 326#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
316#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 327#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
317#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 328#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
318#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 329#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
319#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 330#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
320#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 331#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
321#ifdef CONFIG_X86_32 332#ifdef CONFIG_X86_32
322/* 333/*
323 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 334 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
324 * to avoid confusion. 335 * to avoid confusion.
325 */ 336 */
326#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 337#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
327#endif 338#endif
328#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 339#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
329#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 340#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
330#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 341#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
331#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 342#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
343
332#endif /* _ASM_X86_CPUFEATURES_H */ 344#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..14d6d5007314 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0
21#else
22# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
23#endif
24
19#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
20# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) 26# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 27# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
63#define DISABLED_MASK13 0 69#define DISABLED_MASK13 0
64#define DISABLED_MASK14 0 70#define DISABLED_MASK14 0
65#define DISABLED_MASK15 0 71#define DISABLED_MASK15 0
66#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) 72#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
67#define DISABLED_MASK17 0 73#define DISABLED_MASK17 0
68#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 74#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
69 75
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index bde77d7c4390..37292bb5ce60 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -6,7 +6,7 @@ RM ?= rm -f
6 6
7# Make the path relative to DESTDIR, not prefix 7# Make the path relative to DESTDIR, not prefix
8ifndef DESTDIR 8ifndef DESTDIR
9prefix?=$(HOME) 9prefix ?= /usr/local
10endif 10endif
11mandir ?= $(prefix)/share/man 11mandir ?= $(prefix)/share/man
12man8dir = $(mandir)/man8 12man8dir = $(mandir)/man8
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 813826c50936..ec3052c0b004 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -45,8 +45,8 @@ $(LIBBPF)-clean:
45 $(call QUIET_CLEAN, libbpf) 45 $(call QUIET_CLEAN, libbpf)
46 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null 46 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
47 47
48prefix = /usr 48prefix = /usr/local
49bash_compdir ?= $(prefix)/share/bash-completion/completions 49bash_compdir ?= /usr/share/bash-completion/completions
50 50
51CC = gcc 51CC = gcc
52 52
@@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
76 $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d 76 $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
77 77
78install: 78install:
79 install -m 0755 -d $(prefix)/sbin
79 install $(OUTPUT)bpftool $(prefix)/sbin/bpftool 80 install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
80 install -m 0755 -d $(bash_compdir) 81 install -m 0755 -d $(bash_compdir)
81 install -m 0644 bash-completion/bpftool $(bash_compdir) 82 install -m 0644 bash-completion/bpftool $(bash_compdir)
@@ -88,5 +89,5 @@ doc-install:
88 89
89FORCE: 90FORCE:
90 91
91.PHONY: all clean FORCE 92.PHONY: all clean FORCE install doc doc-install
92.DEFAULT_GOAL := all 93.DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d6e4762170a4..d294bc8168be 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -58,11 +58,19 @@ bool show_pinned;
58struct pinned_obj_table prog_table; 58struct pinned_obj_table prog_table;
59struct pinned_obj_table map_table; 59struct pinned_obj_table map_table;
60 60
61static void __noreturn clean_and_exit(int i)
62{
63 if (json_output)
64 jsonw_destroy(&json_wtr);
65
66 exit(i);
67}
68
61void usage(void) 69void usage(void)
62{ 70{
63 last_do_help(last_argc - 1, last_argv + 1); 71 last_do_help(last_argc - 1, last_argv + 1);
64 72
65 exit(-1); 73 clean_and_exit(-1);
66} 74}
67 75
68static int do_help(int argc, char **argv) 76static int do_help(int argc, char **argv)
@@ -280,6 +288,7 @@ int main(int argc, char **argv)
280 hash_init(prog_table.table); 288 hash_init(prog_table.table);
281 hash_init(map_table.table); 289 hash_init(map_table.table);
282 290
291 opterr = 0;
283 while ((opt = getopt_long(argc, argv, "Vhpjf", 292 while ((opt = getopt_long(argc, argv, "Vhpjf",
284 options, NULL)) >= 0) { 293 options, NULL)) >= 0) {
285 switch (opt) { 294 switch (opt) {
@@ -291,13 +300,25 @@ int main(int argc, char **argv)
291 pretty_output = true; 300 pretty_output = true;
292 /* fall through */ 301 /* fall through */
293 case 'j': 302 case 'j':
294 json_output = true; 303 if (!json_output) {
304 json_wtr = jsonw_new(stdout);
305 if (!json_wtr) {
306 p_err("failed to create JSON writer");
307 return -1;
308 }
309 json_output = true;
310 }
311 jsonw_pretty(json_wtr, pretty_output);
295 break; 312 break;
296 case 'f': 313 case 'f':
297 show_pinned = true; 314 show_pinned = true;
298 break; 315 break;
299 default: 316 default:
300 usage(); 317 p_err("unrecognized option '%s'", argv[optind - 1]);
318 if (json_output)
319 clean_and_exit(-1);
320 else
321 usage();
301 } 322 }
302 } 323 }
303 324
@@ -306,15 +327,6 @@ int main(int argc, char **argv)
306 if (argc < 0) 327 if (argc < 0)
307 usage(); 328 usage();
308 329
309 if (json_output) {
310 json_wtr = jsonw_new(stdout);
311 if (!json_wtr) {
312 p_err("failed to create JSON writer");
313 return -1;
314 }
315 jsonw_pretty(json_wtr, pretty_output);
316 }
317
318 bfd_init(); 330 bfd_init();
319 331
320 ret = cmd_select(cmds, argc, argv, do_help); 332 ret = cmd_select(cmds, argc, argv, do_help);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 9c191e222d6f..bff330b49791 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -41,6 +41,7 @@
41#include <stdbool.h> 41#include <stdbool.h>
42#include <stdio.h> 42#include <stdio.h>
43#include <linux/bpf.h> 43#include <linux/bpf.h>
44#include <linux/compiler.h>
44#include <linux/kernel.h> 45#include <linux/kernel.h>
45#include <linux/hashtable.h> 46#include <linux/hashtable.h>
46 47
@@ -50,7 +51,7 @@
50 51
51#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) 52#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
52#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) 53#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
53#define BAD_ARG() ({ p_err("what is '%s'?\n", *argv); -1; }) 54#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
54 55
55#define ERR_MAX_LEN 1024 56#define ERR_MAX_LEN 1024
56 57
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
80 81
81bool is_prefix(const char *pfx, const char *str); 82bool is_prefix(const char *pfx, const char *str);
82void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep); 83void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
83void usage(void) __attribute__((noreturn)); 84void usage(void) __noreturn;
84 85
85struct pinned_obj_table { 86struct pinned_obj_table {
86 DECLARE_HASHTABLE(table, 16); 87 DECLARE_HASHTABLE(table, 16);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e2450c8e88e6..a8c3a33dd185 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
523 break; 523 break;
524 p_err("can't get next map: %s%s", strerror(errno), 524 p_err("can't get next map: %s%s", strerror(errno),
525 errno == EINVAL ? " -- kernel too old?" : ""); 525 errno == EINVAL ? " -- kernel too old?" : "");
526 return -1; 526 break;
527 } 527 }
528 528
529 fd = bpf_map_get_fd_by_id(id); 529 fd = bpf_map_get_fd_by_id(id);
530 if (fd < 0) { 530 if (fd < 0) {
531 if (errno == ENOENT)
532 continue;
531 p_err("can't get map by id (%u): %s", 533 p_err("can't get map by id (%u): %s",
532 id, strerror(errno)); 534 id, strerror(errno));
533 return -1; 535 break;
534 } 536 }
535 537
536 err = bpf_obj_get_info_by_fd(fd, &info, &len); 538 err = bpf_obj_get_info_by_fd(fd, &info, &len);
537 if (err) { 539 if (err) {
538 p_err("can't get map info: %s", strerror(errno)); 540 p_err("can't get map info: %s", strerror(errno));
539 close(fd); 541 close(fd);
540 return -1; 542 break;
541 } 543 }
542 544
543 if (json_output) 545 if (json_output)
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index ad619b96c276..dded77345bfb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
382 382
383 fd = bpf_prog_get_fd_by_id(id); 383 fd = bpf_prog_get_fd_by_id(id);
384 if (fd < 0) { 384 if (fd < 0) {
385 if (errno == ENOENT)
386 continue;
385 p_err("can't get prog by id (%u): %s", 387 p_err("can't get prog by id (%u): %s",
386 id, strerror(errno)); 388 id, strerror(errno));
387 err = -1; 389 err = -1;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index eaa3bec273c8..4c99c57736ce 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
193 for (;;) { 193 for (;;) {
194 readp = &record[records_read]; 194 readp = &record[records_read];
195 records_read += fread(readp, sizeof(struct kvp_record), 195 records_read += fread(readp, sizeof(struct kvp_record),
196 ENTRIES_PER_BLOCK * num_blocks, 196 ENTRIES_PER_BLOCK * num_blocks - records_read,
197 filep); 197 filep);
198 198
199 if (ferror(filep)) { 199 if (ferror(filep)) {
200 syslog(LOG_ERR, "Failed to read file, pool: %d", pool); 200 syslog(LOG_ERR,
201 "Failed to read file, pool: %d; error: %d %s",
202 pool, errno, strerror(errno));
203 kvp_release_lock(pool);
201 exit(EXIT_FAILURE); 204 exit(EXIT_FAILURE);
202 } 205 }
203 206
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
210 213
211 if (record == NULL) { 214 if (record == NULL) {
212 syslog(LOG_ERR, "malloc failed"); 215 syslog(LOG_ERR, "malloc failed");
216 kvp_release_lock(pool);
213 exit(EXIT_FAILURE); 217 exit(EXIT_FAILURE);
214 } 218 }
215 continue; 219 continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
224 fclose(filep); 228 fclose(filep);
225 kvp_release_lock(pool); 229 kvp_release_lock(pool);
226} 230}
231
227static int kvp_file_init(void) 232static int kvp_file_init(void)
228{ 233{
229 int fd; 234 int fd;
230 FILE *filep;
231 size_t records_read;
232 char *fname; 235 char *fname;
233 struct kvp_record *record;
234 struct kvp_record *readp;
235 int num_blocks;
236 int i; 236 int i;
237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; 237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
238 238
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
246 246
247 for (i = 0; i < KVP_POOL_COUNT; i++) { 247 for (i = 0; i < KVP_POOL_COUNT; i++) {
248 fname = kvp_file_info[i].fname; 248 fname = kvp_file_info[i].fname;
249 records_read = 0;
250 num_blocks = 1;
251 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); 249 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
252 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); 250 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
253 251
254 if (fd == -1) 252 if (fd == -1)
255 return 1; 253 return 1;
256 254
257
258 filep = fopen(fname, "re");
259 if (!filep) {
260 close(fd);
261 return 1;
262 }
263
264 record = malloc(alloc_unit * num_blocks);
265 if (record == NULL) {
266 fclose(filep);
267 close(fd);
268 return 1;
269 }
270 for (;;) {
271 readp = &record[records_read];
272 records_read += fread(readp, sizeof(struct kvp_record),
273 ENTRIES_PER_BLOCK,
274 filep);
275
276 if (ferror(filep)) {
277 syslog(LOG_ERR, "Failed to read file, pool: %d",
278 i);
279 exit(EXIT_FAILURE);
280 }
281
282 if (!feof(filep)) {
283 /*
284 * We have more data to read.
285 */
286 num_blocks++;
287 record = realloc(record, alloc_unit *
288 num_blocks);
289 if (record == NULL) {
290 fclose(filep);
291 close(fd);
292 return 1;
293 }
294 continue;
295 }
296 break;
297 }
298 kvp_file_info[i].fd = fd; 255 kvp_file_info[i].fd = fd;
299 kvp_file_info[i].num_blocks = num_blocks; 256 kvp_file_info[i].num_blocks = 1;
300 kvp_file_info[i].records = record; 257 kvp_file_info[i].records = malloc(alloc_unit);
301 kvp_file_info[i].num_records = records_read; 258 if (kvp_file_info[i].records == NULL)
302 fclose(filep); 259 return 1;
303 260 kvp_file_info[i].num_records = 0;
261 kvp_update_mem_state(i);
304 } 262 }
305 263
306 return 0; 264 return 0;
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 07fd03c74a77..04e32f965ad7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -84,8 +84,6 @@
84 84
85#define uninitialized_var(x) x = *(&(x)) 85#define uninitialized_var(x) x = *(&(x))
86 86
87#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
88
89#include <linux/types.h> 87#include <linux/types.h>
90 88
91/* 89/*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
135/* 133/*
136 * Prevent the compiler from merging or refetching reads or writes. The 134 * Prevent the compiler from merging or refetching reads or writes. The
137 * compiler is also forbidden from reordering successive instances of 135 * compiler is also forbidden from reordering successive instances of
138 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 136 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
139 * compiler is aware of some particular ordering. One way to make the 137 * particular ordering. One way to make the compiler aware of ordering is to
140 * compiler aware of ordering is to put the two invocations of READ_ONCE, 138 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
141 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 139 * statements.
142 * 140 *
143 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 141 * These two macros will also work on aggregate data types like structs or
144 * data types like structs or unions. If the size of the accessed data 142 * unions. If the size of the accessed data type exceeds the word size of
145 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 143 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
146 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 144 * fall back to memcpy and print a compile-time warning.
147 * compile-time warning.
148 * 145 *
149 * Their two major use cases are: (1) Mediating communication between 146 * Their two major use cases are: (1) Mediating communication between
150 * process-level code and irq/NMI handlers, all running on the same CPU, 147 * process-level code and irq/NMI handlers, all running on the same CPU,
151 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 148 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
152 * mutilate accesses that either do not require ordering or that interact 149 * mutilate accesses that either do not require ordering or that interact
153 * with an explicit memory barrier or atomic instruction that provides the 150 * with an explicit memory barrier or atomic instruction that provides the
154 * required ordering. 151 * required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/tools/include/linux/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 940c1b075659..6b0c36a58fcb 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) 48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) 49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
50#define pr_warn pr_err 50#define pr_warn pr_err
51#define pr_cont pr_err
51 52
52#define list_del_rcu list_del 53#define list_del_rcu list_del
53 54
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644
index 000000000000..53815d2cd047
--- /dev/null
+++ b/tools/include/uapi/asm-generic/bpf_perf_event.h
@@ -0,0 +1,9 @@
1#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
2#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
3
4#include <linux/ptrace.h>
5
6/* Export kernel pt_regs structure */
7typedef struct pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 2dffcbf705b3..653687d9771b 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -13,6 +13,7 @@
13#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 13#define MAP_NONBLOCK 0x10000 /* do not block on IO */
14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ 14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ 15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
16#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
16 17
17/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ 18/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
18 19
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..13a58531e6fa
--- /dev/null
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,7 @@
1#if defined(__aarch64__)
2#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
3#elif defined(__s390__)
4#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
5#else
6#include <uapi/asm-generic/bpf_perf_event.h>
7#endif
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
737 __u32 pad; 737 __u32 pad;
738}; 738};
739 739
740/* Query current scanout sequence number */
741struct drm_crtc_get_sequence {
742 __u32 crtc_id; /* requested crtc_id */
743 __u32 active; /* return: crtc output is active */
744 __u64 sequence; /* return: most recent vblank sequence */
745 __s64 sequence_ns; /* return: most recent time of first pixel out */
746};
747
748/* Queue event to be delivered at specified sequence. Time stamp marks
749 * when the first pixel of the refresh cycle leaves the display engine
750 * for the display
751 */
752#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
753#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
754
755struct drm_crtc_queue_sequence {
756 __u32 crtc_id;
757 __u32 flags;
758 __u64 sequence; /* on input, target sequence. on output, actual sequence */
759 __u64 user_data; /* user data passed to event */
760};
761
740#if defined(__cplusplus) 762#if defined(__cplusplus)
741} 763}
742#endif 764#endif
@@ -819,6 +841,9 @@ extern "C" {
819 841
820#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 842#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
821 843
844#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
845#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
846
822#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 847#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
823 848
824#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 849#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
863#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) 888#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
864#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) 889#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
865 890
891#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
892#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
893#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
894#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
895
866/** 896/**
867 * Device specific ioctls should only be in their respective headers 897 * Device specific ioctls should only be in their respective headers
868 * The device specific ioctl range is from 0x40 to 0x9f. 898 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
893 923
894#define DRM_EVENT_VBLANK 0x01 924#define DRM_EVENT_VBLANK 0x01
895#define DRM_EVENT_FLIP_COMPLETE 0x02 925#define DRM_EVENT_FLIP_COMPLETE 0x02
926#define DRM_EVENT_CRTC_SEQUENCE 0x03
896 927
897struct drm_event_vblank { 928struct drm_event_vblank {
898 struct drm_event base; 929 struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
903 __u32 crtc_id; /* 0 on older kernels that do not support this */ 934 __u32 crtc_id; /* 0 on older kernels that do not support this */
904}; 935};
905 936
937/* Event delivered at sequence. Time stamp marks when the first pixel
938 * of the refresh cycle leaves the display engine for the display
939 */
940struct drm_event_crtc_sequence {
941 struct drm_event base;
942 __u64 user_data;
943 __s64 time_ns;
944 __u64 sequence;
945};
946
906/* typedef area */ 947/* typedef area */
907#ifndef __KERNEL__ 948#ifndef __KERNEL__
908typedef struct drm_clip_rect drm_clip_rect_t; 949typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 9816590d3ad2..ac3c6503ca27 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
397#define I915_PARAM_MIN_EU_IN_POOL 39 397#define I915_PARAM_MIN_EU_IN_POOL 39
398#define I915_PARAM_MMAP_GTT_VERSION 40 398#define I915_PARAM_MMAP_GTT_VERSION 40
399 399
400/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 400/*
401 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
401 * priorities and the driver will attempt to execute batches in priority order. 402 * priorities and the driver will attempt to execute batches in priority order.
403 * The param returns a capability bitmask, nonzero implies that the scheduler
404 * is enabled, with different features present according to the mask.
405 *
406 * The initial priority for each batch is supplied by the context and is
407 * controlled via I915_CONTEXT_PARAM_PRIORITY.
402 */ 408 */
403#define I915_PARAM_HAS_SCHEDULER 41 409#define I915_PARAM_HAS_SCHEDULER 41
410#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
411#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
412#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
413
404#define I915_PARAM_HUC_STATUS 42 414#define I915_PARAM_HUC_STATUS 42
405 415
406/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 416/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
1309 * be specified 1319 * be specified
1310 */ 1320 */
1311 __u64 offset; 1321 __u64 offset;
1322#define I915_REG_READ_8B_WA (1ul << 0)
1323
1312 __u64 val; /* Return value */ 1324 __u64 val; /* Return value */
1313}; 1325};
1314/* Known registers: 1326/* Known registers:
1315 * 1327 *
1316 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1328 * Render engine timestamp - 0x2358 + 64bit - gen7+
1317 * - Note this register returns an invalid value if using the default 1329 * - Note this register returns an invalid value if using the default
1318 * single instruction 8byte read, in order to workaround that use 1330 * single instruction 8byte read, in order to workaround that pass
1319 * offset (0x2538 | 1) instead. 1331 * flag I915_REG_READ_8B_WA in offset field.
1320 * 1332 *
1321 */ 1333 */
1322 1334
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
1359#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1371#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1360#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1372#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1361#define I915_CONTEXT_PARAM_BANNABLE 0x5 1373#define I915_CONTEXT_PARAM_BANNABLE 0x5
1374#define I915_CONTEXT_PARAM_PRIORITY 0x6
1375#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1376#define I915_CONTEXT_DEFAULT_PRIORITY 0
1377#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1362 __u64 value; 1378 __u64 value;
1363}; 1379};
1364 1380
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
1510 __u32 n_boolean_regs; 1526 __u32 n_boolean_regs;
1511 __u32 n_flex_regs; 1527 __u32 n_flex_regs;
1512 1528
1513 __u64 __user mux_regs_ptr; 1529 /*
1514 __u64 __user boolean_regs_ptr; 1530 * These fields are pointers to tuples of u32 values (register
1515 __u64 __user flex_regs_ptr; 1531 * address, value). For example the expected length of the buffer
1532 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1533 */
1534 __u64 mux_regs_ptr;
1535 __u64 boolean_regs_ptr;
1536 __u64 flex_regs_ptr;
1516}; 1537};
1517 1538
1518#if defined(__cplusplus) 1539#if defined(__cplusplus)
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
index 067427259820..8f95303f9d80 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* Copyright (c) 2016 Facebook 2/* Copyright (c) 2016 Facebook
2 * 3 *
3 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ 8#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__ 9#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9 10
10#include <linux/types.h> 11#include <asm/bpf_perf_event.h>
11#include <linux/ptrace.h>
12 12
13struct bpf_perf_event_data { 13struct bpf_perf_event_data {
14 struct pt_regs regs; 14 bpf_user_pt_regs_t regs;
15 __u64 sample_period; 15 __u64 sample_period;
16}; 16};
17 17
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
index 481e103da78e..ef1305010925 100644
--- a/tools/include/uapi/linux/kcmp.h
+++ b/tools/include/uapi/linux/kcmp.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _UAPI_LINUX_KCMP_H 2#ifndef _UAPI_LINUX_KCMP_H
2#define _UAPI_LINUX_KCMP_H 3#define _UAPI_LINUX_KCMP_H
3 4
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7e99999d6236..496e59a2738b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
931#define KVM_CAP_PPC_SMT_POSSIBLE 147 931#define KVM_CAP_PPC_SMT_POSSIBLE 147
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150
934 935
935#ifdef KVM_CAP_IRQ_ROUTING 936#ifdef KVM_CAP_IRQ_ROUTING
936 937
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 362493a2f950..b9a4953018ed 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -942,6 +942,7 @@ enum perf_callchain_context {
942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
945#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
945 946
946#define PERF_FLAG_FD_NO_GROUP (1UL << 0) 947#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
947#define PERF_FLAG_FD_OUTPUT (1UL << 1) 948#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index a8d0759a9e40..af5f8c2df87a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _LINUX_PRCTL_H 2#ifndef _LINUX_PRCTL_H
2#define _LINUX_PRCTL_H 3#define _LINUX_PRCTL_H
3 4
@@ -197,4 +198,13 @@ struct prctl_mm_map {
197# define PR_CAP_AMBIENT_LOWER 3 198# define PR_CAP_AMBIENT_LOWER 3
198# define PR_CAP_AMBIENT_CLEAR_ALL 4 199# define PR_CAP_AMBIENT_CLEAR_ALL 4
199 200
201/* arm64 Scalable Vector Extension controls */
202/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
203#define PR_SVE_SET_VL 50 /* set task vector length */
204# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
205#define PR_SVE_GET_VL 51 /* get task vector length */
206/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
207# define PR_SVE_VL_LEN_MASK 0xffff
208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
209
200#endif /* _LINUX_PRCTL_H */ 210#endif /* _LINUX_PRCTL_H */
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 217cf6f95c36..a5684d0968b4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -478,7 +478,7 @@ class Provider(object):
478 @staticmethod 478 @staticmethod
479 def is_field_wanted(fields_filter, field): 479 def is_field_wanted(fields_filter, field):
480 """Indicate whether field is valid according to fields_filter.""" 480 """Indicate whether field is valid according to fields_filter."""
481 if not fields_filter or fields_filter == "help": 481 if not fields_filter:
482 return True 482 return True
483 return re.match(fields_filter, field) is not None 483 return re.match(fields_filter, field) is not None
484 484
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
549 549
550 def update_fields(self, fields_filter): 550 def update_fields(self, fields_filter):
551 """Refresh fields, applying fields_filter""" 551 """Refresh fields, applying fields_filter"""
552 self._fields = [field for field in self.get_available_fields() 552 self.fields = [field for field in self.get_available_fields()
553 if self.is_field_wanted(fields_filter, field)] 553 if self.is_field_wanted(fields_filter, field)]
554 554
555 @staticmethod 555 @staticmethod
556 def get_online_cpus(): 556 def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
950 curses.nocbreak() 950 curses.nocbreak()
951 curses.endwin() 951 curses.endwin()
952 952
953 def get_all_gnames(self): 953 @staticmethod
954 def get_all_gnames():
954 """Returns a list of (pid, gname) tuples of all running guests""" 955 """Returns a list of (pid, gname) tuples of all running guests"""
955 res = [] 956 res = []
956 try: 957 try:
@@ -963,7 +964,7 @@ class Tui(object):
963 # perform a sanity check before calling the more expensive 964 # perform a sanity check before calling the more expensive
964 # function to possibly extract the guest name 965 # function to possibly extract the guest name
965 if ' -name ' in line[1]: 966 if ' -name ' in line[1]:
966 res.append((line[0], self.get_gname_from_pid(line[0]))) 967 res.append((line[0], Tui.get_gname_from_pid(line[0])))
967 child.stdout.close() 968 child.stdout.close()
968 969
969 return res 970 return res
@@ -984,7 +985,8 @@ class Tui(object):
984 except Exception: 985 except Exception:
985 self.screen.addstr(row + 1, 2, 'Not available') 986 self.screen.addstr(row + 1, 2, 'Not available')
986 987
987 def get_pid_from_gname(self, gname): 988 @staticmethod
989 def get_pid_from_gname(gname):
988 """Fuzzy function to convert guest name to QEMU process pid. 990 """Fuzzy function to convert guest name to QEMU process pid.
989 991
990 Returns a list of potential pids, can be empty if no match found. 992 Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
992 994
993 """ 995 """
994 pids = [] 996 pids = []
995 for line in self.get_all_gnames(): 997 for line in Tui.get_all_gnames():
996 if gname == line[1]: 998 if gname == line[1]:
997 pids.append(int(line[0])) 999 pids.append(int(line[0]))
998 1000
@@ -1090,15 +1092,16 @@ class Tui(object):
1090 # sort by totals 1092 # sort by totals
1091 return (0, -stats[x][0]) 1093 return (0, -stats[x][0])
1092 total = 0. 1094 total = 0.
1093 for val in stats.values(): 1095 for key in stats.keys():
1094 total += val[0] 1096 if key.find('(') is -1:
1097 total += stats[key][0]
1095 if self._sorting == SORT_DEFAULT: 1098 if self._sorting == SORT_DEFAULT:
1096 sortkey = sortCurAvg 1099 sortkey = sortCurAvg
1097 else: 1100 else:
1098 sortkey = sortTotal 1101 sortkey = sortTotal
1102 tavg = 0
1099 for key in sorted(stats.keys(), key=sortkey): 1103 for key in sorted(stats.keys(), key=sortkey):
1100 1104 if row >= self.screen.getmaxyx()[0] - 1:
1101 if row >= self.screen.getmaxyx()[0]:
1102 break 1105 break
1103 values = stats[key] 1106 values = stats[key]
1104 if not values[0] and not values[1]: 1107 if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
1110 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % 1113 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
1111 (key, values[0], values[0] * 100 / total, 1114 (key, values[0], values[0] * 100 / total,
1112 cur)) 1115 cur))
1116 if cur is not '' and key.find('(') is -1:
1117 tavg += cur
1113 row += 1 1118 row += 1
1114 if row == 3: 1119 if row == 3:
1115 self.screen.addstr(4, 1, 'No matching events reported yet') 1120 self.screen.addstr(4, 1, 'No matching events reported yet')
1121 else:
1122 self.screen.addstr(row, 1, '%-40s %10d %8s' %
1123 ('Total', total, tavg if tavg else ''),
1124 curses.A_BOLD)
1116 self.screen.refresh() 1125 self.screen.refresh()
1117 1126
1118 def show_msg(self, text): 1127 def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
1358 if char == 'x': 1367 if char == 'x':
1359 self.update_drilldown() 1368 self.update_drilldown()
1360 # prevents display of current values on next refresh 1369 # prevents display of current values on next refresh
1361 self.stats.get() 1370 self.stats.get(self._display_guests)
1362 except KeyboardInterrupt: 1371 except KeyboardInterrupt:
1363 break 1372 break
1364 except curses.error: 1373 except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
1451 try: 1460 try:
1452 pids = Tui.get_pid_from_gname(val) 1461 pids = Tui.get_pid_from_gname(val)
1453 except: 1462 except:
1454 raise optparse.OptionValueError('Error while searching for guest ' 1463 sys.exit('Error while searching for guest "{}". Use "-p" to '
1455 '"{}", use "-p" to specify a pid ' 1464 'specify a pid instead?'.format(val))
1456 'instead'.format(val))
1457 if len(pids) == 0: 1465 if len(pids) == 0:
1458 raise optparse.OptionValueError('No guest by the name "{}" ' 1466 sys.exit('Error: No guest by the name "{}" found'.format(val))
1459 'found'.format(val))
1460 if len(pids) > 1: 1467 if len(pids) > 1:
1461 raise optparse.OptionValueError('Multiple processes found (pids: ' 1468 sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
1462 '{}) - use "-p" to specify a pid ' 1469 'to specify the desired pid'.format(" ".join(pids)))
1463 'instead'.format(" ".join(pids)))
1464 parser.values.pid = pids[0] 1470 parser.values.pid = pids[0]
1465 1471
1466 optparser = optparse.OptionParser(description=description_text, 1472 optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
1518 help='restrict statistics to guest by name', 1524 help='restrict statistics to guest by name',
1519 callback=cb_guest_to_pid, 1525 callback=cb_guest_to_pid,
1520 ) 1526 )
1521 (options, _) = optparser.parse_args(sys.argv) 1527 options, unkn = optparser.parse_args(sys.argv)
1528 if len(unkn) != 1:
1529 sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
1530 try:
1531 # verify that we were passed a valid regex up front
1532 re.compile(options.fields)
1533 except re.error:
1534 sys.exit('Error: "' + options.fields + '" is not a valid regular '
1535 'expression')
1536
1522 return options 1537 return options
1523 1538
1524 1539
@@ -1564,16 +1579,13 @@ def main():
1564 1579
1565 stats = Stats(options) 1580 stats = Stats(options)
1566 1581
1567 if options.fields == "help": 1582 if options.fields == 'help':
1568 event_list = "\n" 1583 stats.fields_filter = None
1569 s = stats.get() 1584 event_list = []
1570 for key in s.keys(): 1585 for key in stats.get().keys():
1571 if key.find('(') != -1: 1586 event_list.append(key.split('(', 1)[0])
1572 key = key[0:key.find('(')] 1587 sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
1573 if event_list.find('\n' + key + '\n') == -1: 1588 sys.exit(0)
1574 event_list += key + '\n'
1575 sys.stdout.write(event_list)
1576 return ""
1577 1589
1578 if options.log: 1590 if options.log:
1579 log(stats) 1591 log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index e5cf836be8a1..b5b3810c9e94 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
50*s*:: set update interval 50*s*:: set update interval
51 51
52*x*:: toggle reporting of stats for child trace events 52*x*:: toggle reporting of stats for child trace events
53 :: *Note*: The stats for the parents summarize the respective child trace
54 events
53 55
54Press any other key to refresh statistics immediately. 56Press any other key to refresh statistics immediately.
55 57
@@ -86,7 +88,7 @@ OPTIONS
86 88
87-f<fields>:: 89-f<fields>::
88--fields=<fields>:: 90--fields=<fields>::
89 fields to display (regex) 91 fields to display (regex), "-f help" for a list of available events
90 92
91-h:: 93-h::
92--help:: 94--help::
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 0f94af3ccaaa..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,9 +7,11 @@ ARCH := x86
7endif 7endif
8 8
9# always use the host compiler 9# always use the host compiler
10CC = gcc 10HOSTCC ?= gcc
11LD = ld 11HOSTLD ?= ld
12AR = ar 12CC = $(HOSTCC)
13LD = $(HOSTLD)
14AR = ar
13 15
14ifeq ($(srctree),) 16ifeq ($(srctree),)
15srctree := $(patsubst %/,%,$(dir $(CURDIR))) 17srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -44,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
44 @$(MAKE) $(build)=objtool 46 @$(MAKE) $(build)=objtool
45 47
46$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) 48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
47 @./sync-check.sh 49 @$(CONFIG_SHELL) ./sync-check.sh
48 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ 50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
49 51
50 52
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 8acfc47af70e..540a209b78ab 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
138 *type = INSN_STACK; 138 *type = INSN_STACK;
139 op->src.type = OP_SRC_ADD; 139 op->src.type = OP_SRC_ADD;
140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; 140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
141 op->dest.type = OP_SRC_REG; 141 op->dest.type = OP_DEST_REG;
142 op->dest.reg = CFI_SP; 142 op->dest.reg = CFI_SP;
143 } 143 }
144 break; 144 break;
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 4c6b5c9ef073..91e8e19ff5e0 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
44 const char *objname; 44 const char *objname;
45 45
46 argc--; argv++; 46 argc--; argv++;
47 if (argc <= 0)
48 usage_with_options(orc_usage, check_options);
49
47 if (!strncmp(argv[0], "gen", 3)) { 50 if (!strncmp(argv[0], "gen", 3)) {
48 argc = parse_options(argc, argv, check_options, orc_usage, 0); 51 argc = parse_options(argc, argv, check_options, orc_usage, 0);
49 if (argc != 1) 52 if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
52 objname = argv[0]; 55 objname = argv[0];
53 56
54 return check(objname, no_fp, no_unreachable, true); 57 return check(objname, no_fp, no_unreachable, true);
55
56 } 58 }
57 59
58 if (!strcmp(argv[0], "dump")) { 60 if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584eb1b..f40d46e24bcc 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)
428} 428}
429 429
430/* 430/*
431 * FIXME: For now, just ignore any alternatives which add retpolines. This is
432 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
433 * But it at least allows objtool to understand the control flow *around* the
434 * retpoline.
435 */
436static int add_nospec_ignores(struct objtool_file *file)
437{
438 struct section *sec;
439 struct rela *rela;
440 struct instruction *insn;
441
442 sec = find_section_by_name(file->elf, ".rela.discard.nospec");
443 if (!sec)
444 return 0;
445
446 list_for_each_entry(rela, &sec->rela_list, list) {
447 if (rela->sym->type != STT_SECTION) {
448 WARN("unexpected relocation symbol type in %s", sec->name);
449 return -1;
450 }
451
452 insn = find_insn(file, rela->sym->sec, rela->addend);
453 if (!insn) {
454 WARN("bad .discard.nospec entry");
455 return -1;
456 }
457
458 insn->ignore_alts = true;
459 }
460
461 return 0;
462}
463
464/*
431 * Find the destination instructions for all jumps. 465 * Find the destination instructions for all jumps.
432 */ 466 */
433static int add_jump_destinations(struct objtool_file *file) 467static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
456 } else if (rela->sym->sec->idx) { 490 } else if (rela->sym->sec->idx) {
457 dest_sec = rela->sym->sec; 491 dest_sec = rela->sym->sec;
458 dest_off = rela->sym->sym.st_value + rela->addend + 4; 492 dest_off = rela->sym->sym.st_value + rela->addend + 4;
493 } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
494 /*
495 * Retpoline jumps are really dynamic jumps in
496 * disguise, so convert them accordingly.
497 */
498 insn->type = INSN_JUMP_DYNAMIC;
499 continue;
459 } else { 500 } else {
460 /* sibling call */ 501 /* sibling call */
461 insn->jump_dest = 0; 502 insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
502 dest_off = insn->offset + insn->len + insn->immediate; 543 dest_off = insn->offset + insn->len + insn->immediate;
503 insn->call_dest = find_symbol_by_offset(insn->sec, 544 insn->call_dest = find_symbol_by_offset(insn->sec,
504 dest_off); 545 dest_off);
546 /*
547 * FIXME: Thanks to retpolines, it's now considered
548 * normal for a function to call within itself. So
549 * disable this warning for now.
550 */
551#if 0
505 if (!insn->call_dest) { 552 if (!insn->call_dest) {
506 WARN_FUNC("can't find call dest symbol at offset 0x%lx", 553 WARN_FUNC("can't find call dest symbol at offset 0x%lx",
507 insn->sec, insn->offset, dest_off); 554 insn->sec, insn->offset, dest_off);
508 return -1; 555 return -1;
509 } 556 }
557#endif
510 } else if (rela->sym->type == STT_SECTION) { 558 } else if (rela->sym->type == STT_SECTION) {
511 insn->call_dest = find_symbol_by_offset(rela->sym->sec, 559 insn->call_dest = find_symbol_by_offset(rela->sym->sec,
512 rela->addend+4); 560 rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
671 return ret; 719 return ret;
672 720
673 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 721 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
674 alt = malloc(sizeof(*alt));
675 if (!alt) {
676 WARN("malloc failed");
677 ret = -1;
678 goto out;
679 }
680 722
681 orig_insn = find_insn(file, special_alt->orig_sec, 723 orig_insn = find_insn(file, special_alt->orig_sec,
682 special_alt->orig_off); 724 special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
687 goto out; 729 goto out;
688 } 730 }
689 731
732 /* Ignore retpoline alternatives. */
733 if (orig_insn->ignore_alts)
734 continue;
735
690 new_insn = NULL; 736 new_insn = NULL;
691 if (!special_alt->group || special_alt->new_len) { 737 if (!special_alt->group || special_alt->new_len) {
692 new_insn = find_insn(file, special_alt->new_sec, 738 new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
712 goto out; 758 goto out;
713 } 759 }
714 760
761 alt = malloc(sizeof(*alt));
762 if (!alt) {
763 WARN("malloc failed");
764 ret = -1;
765 goto out;
766 }
767
715 alt->insn = new_insn; 768 alt->insn = new_insn;
716 list_add_tail(&alt->list, &orig_insn->alts); 769 list_add_tail(&alt->list, &orig_insn->alts);
717 770
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
1028 1081
1029 add_ignores(file); 1082 add_ignores(file);
1030 1083
1084 ret = add_nospec_ignores(file);
1085 if (ret)
1086 return ret;
1087
1031 ret = add_jump_destinations(file); 1088 ret = add_jump_destinations(file);
1032 if (ret) 1089 if (ret)
1033 return ret; 1090 return ret;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea70a83d..dbadb304a410 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
44 unsigned int len; 44 unsigned int len;
45 unsigned char type; 45 unsigned char type;
46 unsigned long immediate; 46 unsigned long immediate;
47 bool alt_group, visited, dead_end, ignore, hint, save, restore; 47 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
48 struct symbol *call_dest; 48 struct symbol *call_dest;
49 struct instruction *jump_dest; 49 struct instruction *jump_dest;
50 struct list_head alts; 50 struct list_head alts;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 24460155c82c..c1c338661699 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -26,6 +26,7 @@
26#include <stdlib.h> 26#include <stdlib.h>
27#include <string.h> 27#include <string.h>
28#include <unistd.h> 28#include <unistd.h>
29#include <errno.h>
29 30
30#include "elf.h" 31#include "elf.h"
31#include "warn.h" 32#include "warn.h"
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
358 359
359 elf->fd = open(name, flags); 360 elf->fd = open(name, flags);
360 if (elf->fd == -1) { 361 if (elf->fd == -1) {
361 perror("open"); 362 fprintf(stderr, "objtool: Can't open '%s': %s\n",
363 name, strerror(errno));
362 goto err; 364 goto err;
363 } 365 }
364 366
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 36c5bf6a2675..c3343820916a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; 76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
77 struct orc_entry *orc = NULL; 77 struct orc_entry *orc = NULL;
78 char *name; 78 char *name;
79 unsigned long nr_sections, orc_ip_addr = 0; 79 size_t nr_sections;
80 Elf64_Addr orc_ip_addr = 0;
80 size_t shstrtab_idx; 81 size_t shstrtab_idx;
81 Elf *elf; 82 Elf *elf;
82 Elf_Scn *scn; 83 Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
187 return -1; 188 return -1;
188 } 189 }
189 190
190 printf("%s+%lx:", name, rela.r_addend); 191 printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
191 192
192 } else { 193 } else {
193 printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]); 194 printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
194 } 195 }
195 196
196 197
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index e5ca31429c9b..e61fe703197b 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
165 165
166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ 166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); 167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
168 if (!sec)
169 return -1;
168 170
169 ip_relasec = elf_create_rela_section(file->elf, sec); 171 ip_relasec = elf_create_rela_section(file->elf, sec);
170 if (!ip_relasec) 172 if (!ip_relasec)
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index ed65e82f034e..0294bfb6c5f8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG
188 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) 188 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
189 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil 189 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
190 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) 190 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
191 ifeq ($(CC_NO_CLANG), 1) 191 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
192 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
193 endif
194 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) 192 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
195endif 193endif
196 194
@@ -576,14 +574,15 @@ ifndef NO_GTK2
576 endif 574 endif
577endif 575endif
578 576
579
580ifdef NO_LIBPERL 577ifdef NO_LIBPERL
581 CFLAGS += -DNO_LIBPERL 578 CFLAGS += -DNO_LIBPERL
582else 579else
583 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) 580 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
584 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) 581 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
585 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) 582 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
586 PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` 583 PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
584 PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
585 PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
587 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) 586 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
588 587
589 ifneq ($(feature-libperl), 1) 588 ifneq ($(feature-libperl), 1)
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 21322e0385b8..09ba923debe8 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
2PERF_HAVE_DWARF_REGS := 1 2PERF_HAVE_DWARF_REGS := 1
3endif 3endif
4HAVE_KVM_STAT_SUPPORT := 1 4HAVE_KVM_STAT_SUPPORT := 1
5PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index d2df54a6bc5a..bcfbaed78cc2 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -3,7 +3,7 @@
3 3
4#include <stdlib.h> 4#include <stdlib.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <../../../../arch/s390/include/uapi/asm/perf_regs.h> 6#include <asm/perf_regs.h>
7 7
8void perf_regs_load(u64 *regs); 8void perf_regs_load(u64 *regs);
9 9
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
index f47576ce13ea..a8ace5cc6301 100644
--- a/tools/perf/arch/s390/util/dwarf-regs.c
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -2,17 +2,43 @@
2/* 2/*
3 * Mapping of DWARF debug register numbers into register names. 3 * Mapping of DWARF debug register numbers into register names.
4 * 4 *
5 * Copyright IBM Corp. 2010 5 * Copyright IBM Corp. 2010, 2017
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 * 8 *
8 */ 9 */
9 10
11#include <errno.h>
10#include <stddef.h> 12#include <stddef.h>
11#include <dwarf-regs.h> 13#include <stdlib.h>
12#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/ptrace.h>
16#include <string.h>
17#include <dwarf-regs.h>
13#include "dwarf-regs-table.h" 18#include "dwarf-regs-table.h"
14 19
15const char *get_arch_regstr(unsigned int n) 20const char *get_arch_regstr(unsigned int n)
16{ 21{
17 return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n]; 22 return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
18} 23}
24
25/*
26 * Convert the register name into an offset to struct pt_regs (kernel).
27 * This is required by the BPF prologue generator. The BPF
28 * program is called in the BPF overflow handler in the perf
29 * core.
30 */
31int regs_query_register_offset(const char *name)
32{
33 unsigned long gpr;
34
35 if (!name || strncmp(name, "%r", 2))
36 return -EINVAL;
37
38 errno = 0;
39 gpr = strtoul(name + 2, NULL, 10);
40 if (errno || gpr >= 16)
41 return -EINVAL;
42
43 return offsetof(user_pt_regs, gprs) + 8 * gpr;
44}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d95fdcc26f4b..944070e98a2c 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
216 NULL 216 NULL
217}; 217};
218 218
219/*
220 * To get number of numa nodes present.
221 */
222static int nr_numa_nodes(void)
223{
224 int i, nr_nodes = 0;
225
226 for (i = 0; i < g->p.nr_nodes; i++) {
227 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
228 nr_nodes++;
229 }
230
231 return nr_nodes;
232}
233
234/*
235 * To check if given numa node is present.
236 */
237static int is_node_present(int node)
238{
239 return numa_bitmask_isbitset(numa_nodes_ptr, node);
240}
241
242/*
243 * To check given numa node has cpus.
244 */
245static bool node_has_cpus(int node)
246{
247 struct bitmask *cpu = numa_allocate_cpumask();
248 unsigned int i;
249
250 if (cpu && !numa_node_to_cpus(node, cpu)) {
251 for (i = 0; i < cpu->size; i++) {
252 if (numa_bitmask_isbitset(cpu, i))
253 return true;
254 }
255 }
256
257 return false; /* lets fall back to nocpus safely */
258}
259
219static cpu_set_t bind_to_cpu(int target_cpu) 260static cpu_set_t bind_to_cpu(int target_cpu)
220{ 261{
221 cpu_set_t orig_mask, mask; 262 cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
244 285
245static cpu_set_t bind_to_node(int target_node) 286static cpu_set_t bind_to_node(int target_node)
246{ 287{
247 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; 288 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
248 cpu_set_t orig_mask, mask; 289 cpu_set_t orig_mask, mask;
249 int cpu; 290 int cpu;
250 int ret; 291 int ret;
251 292
252 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); 293 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
253 BUG_ON(!cpus_per_node); 294 BUG_ON(!cpus_per_node);
254 295
255 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 296 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
649 int i; 690 int i;
650 691
651 for (i = 0; i < mul; i++) { 692 for (i = 0; i < mul; i++) {
652 if (t >= g->p.nr_tasks) { 693 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
653 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 694 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
654 goto out; 695 goto out;
655 } 696 }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
964 sum = 0; 1005 sum = 0;
965 1006
966 for (node = 0; node < g->p.nr_nodes; node++) { 1007 for (node = 0; node < g->p.nr_nodes; node++) {
1008 if (!is_node_present(node))
1009 continue;
967 nr = nodes[node]; 1010 nr = nodes[node];
968 nr_min = min(nr, nr_min); 1011 nr_min = min(nr, nr_min);
969 nr_max = max(nr, nr_max); 1012 nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
984 process_groups = 0; 1027 process_groups = 0;
985 1028
986 for (node = 0; node < g->p.nr_nodes; node++) { 1029 for (node = 0; node < g->p.nr_nodes; node++) {
987 int processes = count_node_processes(node); 1030 int processes;
988 1031
1032 if (!is_node_present(node))
1033 continue;
1034 processes = count_node_processes(node);
989 nr = nodes[node]; 1035 nr = nodes[node];
990 tprintf(" %2d/%-2d", nr, processes); 1036 tprintf(" %2d/%-2d", nr, processes);
991 1037
@@ -1291,7 +1337,7 @@ static void print_summary(void)
1291 1337
1292 printf("\n ###\n"); 1338 printf("\n ###\n");
1293 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1339 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1294 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); 1340 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1295 printf(" # %5dx %5ldMB global shared mem operations\n", 1341 printf(" # %5dx %5ldMB global shared mem operations\n",
1296 g->p.nr_loops, g->p.bytes_global/1024/1024); 1342 g->p.nr_loops, g->p.bytes_global/1024/1024);
1297 printf(" # %5dx %5ldMB process shared mem operations\n", 1343 printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bd1fedef3d1c..a0f7ed2b869b 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
284 add_man_viewer(value); 284 add_man_viewer(value);
285 return 0; 285 return 0;
286 } 286 }
287 if (!strstarts(var, "man.")) 287 if (strstarts(var, "man."))
288 return add_man_viewer_info(var, value); 288 return add_man_viewer_info(var, value);
289 289
290 return 0; 290 return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
314 314
315 if (!perf_cmd) 315 if (!perf_cmd)
316 return "perf"; 316 return "perf";
317 else if (!strstarts(perf_cmd, "perf")) 317 else if (strstarts(perf_cmd, "perf"))
318 return perf_cmd; 318 return perf_cmd;
319 319
320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s; 320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3d7f33e19df2..003255910c05 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
339 struct perf_evsel_config_term *err_term; 339 struct perf_evsel_config_term *err_term;
340 int rc = 0; 340 int rc = 0;
341 341
342 /*
343 * For initial_delay we need to add a dummy event so that we can track
344 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
345 * real events, the ones asked by the user.
346 */
347 if (opts->initial_delay) {
348 if (perf_evlist__add_dummy(evlist))
349 return -ENOMEM;
350
351 pos = perf_evlist__first(evlist);
352 pos->tracking = 0;
353 pos = perf_evlist__last(evlist);
354 pos->tracking = 1;
355 pos->attr.enable_on_exec = 1;
356 }
357
342 perf_evlist__config(evlist, opts, &callchain_param); 358 perf_evlist__config(evlist, opts, &callchain_param);
343 359
344 evlist__for_each_entry(evlist, pos) { 360 evlist__for_each_entry(evlist, pos) {
@@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
749 goto out; 765 goto out;
750 } 766 }
751 767
752 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 768 if (!perf_evlist__exclude_kernel(rec->evlist)) {
753 machine); 769 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
754 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" 770 machine);
755 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 771 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
756 "Check /proc/kallsyms permission or run as root.\n"); 772 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
757 773 "Check /proc/kallsyms permission or run as root.\n");
758 err = perf_event__synthesize_modules(tool, process_synthesized_event, 774
759 machine); 775 err = perf_event__synthesize_modules(tool, process_synthesized_event,
760 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" 776 machine);
761 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 777 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
762 "Check /proc/modules permission or run as root.\n"); 778 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
779 "Check /proc/modules permission or run as root.\n");
780 }
763 781
764 if (perf_guest) { 782 if (perf_guest) {
765 machines__process_guests(&session->machines, 783 machines__process_guests(&session->machines,
@@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
1693 1711
1694 err = -ENOMEM; 1712 err = -ENOMEM;
1695 1713
1696 if (symbol_conf.kptr_restrict) 1714 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1697 pr_warning( 1715 pr_warning(
1698"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 1716"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1699"check /proc/sys/kernel/kptr_restrict.\n\n" 1717"check /proc/sys/kernel/kptr_restrict.\n\n"
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1394cd8d96f7..af5dd038195e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
443 443
444 if (perf_evlist__exclude_kernel(rep->session->evlist))
445 return;
446
444 if (kernel_map == NULL || 447 if (kernel_map == NULL ||
445 (kernel_map->dso->hit && 448 (kernel_map->dso->hit &&
446 (kernel_kmap->ref_reloc_sym == NULL || 449 (kernel_kmap->ref_reloc_sym == NULL ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 68f36dc0344f..9b43bda45a41 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
1955 struct perf_evsel *evsel; 1955 struct perf_evsel *evsel;
1956 1956
1957 evlist__for_each_entry(script->session->evlist, evsel) { 1957 evlist__for_each_entry(script->session->evlist, evsel) {
1958 /*
1959 * Already setup? I.e. we may be called twice in cases like
1960 * Intel PT, one for the intel_pt// and dummy events, then
1961 * for the evsels syntheized from the auxtrace info.
1962 *
1963 * Ses perf_script__process_auxtrace_info.
1964 */
1965 if (evsel->priv != NULL)
1966 continue;
1967
1958 evsel->priv = perf_evsel_script__new(evsel, script->session->data); 1968 evsel->priv = perf_evsel_script__new(evsel, script->session->data);
1959 if (evsel->priv == NULL) 1969 if (evsel->priv == NULL)
1960 goto out_err_fclose; 1970 goto out_err_fclose;
@@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
2838 return set_maps(script); 2848 return set_maps(script);
2839} 2849}
2840 2850
2851#ifdef HAVE_AUXTRACE_SUPPORT
2852static int perf_script__process_auxtrace_info(struct perf_tool *tool,
2853 union perf_event *event,
2854 struct perf_session *session)
2855{
2856 int ret = perf_event__process_auxtrace_info(tool, event, session);
2857
2858 if (ret == 0) {
2859 struct perf_script *script = container_of(tool, struct perf_script, tool);
2860
2861 ret = perf_script__setup_per_event_dump(script);
2862 }
2863
2864 return ret;
2865}
2866#else
2867#define perf_script__process_auxtrace_info 0
2868#endif
2869
2841int cmd_script(int argc, const char **argv) 2870int cmd_script(int argc, const char **argv)
2842{ 2871{
2843 bool show_full_info = false; 2872 bool show_full_info = false;
@@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
2866 .feature = perf_event__process_feature, 2895 .feature = perf_event__process_feature,
2867 .build_id = perf_event__process_build_id, 2896 .build_id = perf_event__process_build_id,
2868 .id_index = perf_event__process_id_index, 2897 .id_index = perf_event__process_id_index,
2869 .auxtrace_info = perf_event__process_auxtrace_info, 2898 .auxtrace_info = perf_script__process_auxtrace_info,
2870 .auxtrace = perf_event__process_auxtrace, 2899 .auxtrace = perf_event__process_auxtrace,
2871 .auxtrace_error = perf_event__process_auxtrace_error, 2900 .auxtrace_error = perf_event__process_auxtrace_error,
2872 .stat = perf_event__process_stat_event, 2901 .stat = perf_event__process_stat_event,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 477a8699f0b5..9e0d2645ae13 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -77,6 +77,7 @@
77#include "sane_ctype.h" 77#include "sane_ctype.h"
78 78
79static volatile int done; 79static volatile int done;
80static volatile int resize;
80 81
81#define HEADER_LINE_NR 5 82#define HEADER_LINE_NR 5
82 83
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
85 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; 86 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
86} 87}
87 88
88static void perf_top__sig_winch(int sig __maybe_unused, 89static void winch_sig(int sig __maybe_unused)
89 siginfo_t *info __maybe_unused, void *arg)
90{ 90{
91 struct perf_top *top = arg; 91 resize = 1;
92}
92 93
94static void perf_top__resize(struct perf_top *top)
95{
93 get_term_dimensions(&top->winsize); 96 get_term_dimensions(&top->winsize);
94 perf_top__update_print_entries(top); 97 perf_top__update_print_entries(top);
95} 98}
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
473 case 'e': 476 case 'e':
474 prompt_integer(&top->print_entries, "Enter display entries (lines)"); 477 prompt_integer(&top->print_entries, "Enter display entries (lines)");
475 if (top->print_entries == 0) { 478 if (top->print_entries == 0) {
476 struct sigaction act = { 479 perf_top__resize(top);
477 .sa_sigaction = perf_top__sig_winch, 480 signal(SIGWINCH, winch_sig);
478 .sa_flags = SA_SIGINFO,
479 };
480 perf_top__sig_winch(SIGWINCH, NULL, top);
481 sigaction(SIGWINCH, &act, NULL);
482 } else { 481 } else {
483 signal(SIGWINCH, SIG_DFL); 482 signal(SIGWINCH, SIG_DFL);
484 } 483 }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
732 if (!machine->kptr_restrict_warned && 731 if (!machine->kptr_restrict_warned &&
733 symbol_conf.kptr_restrict && 732 symbol_conf.kptr_restrict &&
734 al.cpumode == PERF_RECORD_MISC_KERNEL) { 733 al.cpumode == PERF_RECORD_MISC_KERNEL) {
735 ui__warning( 734 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
735 ui__warning(
736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
737"Check /proc/sys/kernel/kptr_restrict.\n\n" 737"Check /proc/sys/kernel/kptr_restrict.\n\n"
738"Kernel%s samples will not be resolved.\n", 738"Kernel%s samples will not be resolved.\n",
739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
740 " modules" : ""); 740 " modules" : "");
741 if (use_browser <= 0) 741 if (use_browser <= 0)
742 sleep(5); 742 sleep(5);
743 }
743 machine->kptr_restrict_warned = true; 744 machine->kptr_restrict_warned = true;
744 } 745 }
745 746
@@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
1030 1031
1031 if (hits == top->samples) 1032 if (hits == top->samples)
1032 ret = perf_evlist__poll(top->evlist, 100); 1033 ret = perf_evlist__poll(top->evlist, 100);
1034
1035 if (resize) {
1036 perf_top__resize(top);
1037 resize = 0;
1038 }
1033 } 1039 }
1034 1040
1035 ret = 0; 1041 ret = 0;
@@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
1352 1358
1353 get_term_dimensions(&top.winsize); 1359 get_term_dimensions(&top.winsize);
1354 if (top.print_entries == 0) { 1360 if (top.print_entries == 0) {
1355 struct sigaction act = {
1356 .sa_sigaction = perf_top__sig_winch,
1357 .sa_flags = SA_SIGINFO,
1358 };
1359 perf_top__update_print_entries(&top); 1361 perf_top__update_print_entries(&top);
1360 sigaction(SIGWINCH, &act, NULL); 1362 signal(SIGWINCH, winch_sig);
1361 } 1363 }
1362 1364
1363 status = __cmd_top(&top); 1365 status = __cmd_top(&top);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f2757d38c7d7..84debdbad327 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1152 if (trace->host == NULL) 1152 if (trace->host == NULL)
1153 return -ENOMEM; 1153 return -ENOMEM;
1154 1154
1155 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) 1155 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1156 return -errno; 1156 if (err < 0)
1157 goto out;
1157 1158
1158 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1159 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1159 evlist->threads, trace__tool_process, false, 1160 evlist->threads, trace__tool_process, false,
1160 trace->opts.proc_map_timeout, 1); 1161 trace->opts.proc_map_timeout, 1);
1162out:
1161 if (err) 1163 if (err)
1162 symbol__exit(); 1164 symbol__exit();
1163 1165
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 77406d25e521..3e64f10b6d66 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
21arch/arm/include/uapi/asm/perf_regs.h 21arch/arm/include/uapi/asm/perf_regs.h
22arch/arm64/include/uapi/asm/perf_regs.h 22arch/arm64/include/uapi/asm/perf_regs.h
23arch/powerpc/include/uapi/asm/perf_regs.h 23arch/powerpc/include/uapi/asm/perf_regs.h
24arch/s390/include/uapi/asm/perf_regs.h
24arch/x86/include/uapi/asm/perf_regs.h 25arch/x86/include/uapi/asm/perf_regs.h
25arch/x86/include/uapi/asm/kvm.h 26arch/x86/include/uapi/asm/kvm.h
26arch/x86/include/uapi/asm/kvm_perf.h 27arch/x86/include/uapi/asm/kvm_perf.h
@@ -30,6 +31,7 @@ arch/x86/include/uapi/asm/vmx.h
30arch/powerpc/include/uapi/asm/kvm.h 31arch/powerpc/include/uapi/asm/kvm.h
31arch/s390/include/uapi/asm/kvm.h 32arch/s390/include/uapi/asm/kvm.h
32arch/s390/include/uapi/asm/kvm_perf.h 33arch/s390/include/uapi/asm/kvm_perf.h
34arch/s390/include/uapi/asm/ptrace.h
33arch/s390/include/uapi/asm/sie.h 35arch/s390/include/uapi/asm/sie.h
34arch/arm/include/uapi/asm/kvm.h 36arch/arm/include/uapi/asm/kvm.h
35arch/arm64/include/uapi/asm/kvm.h 37arch/arm64/include/uapi/asm/kvm.h
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index cf36de7ea255..0c6d1002b524 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
384} 384}
385 385
386int 386int
387jvmti_write_debug_info(void *agent, uint64_t code, const char *file, 387jvmti_write_debug_info(void *agent, uint64_t code,
388 jvmti_line_info_t *li, int nr_lines) 388 int nr_lines, jvmti_line_info_t *li,
389 const char * const * file_names)
389{ 390{
390 struct jr_code_debug_info rec; 391 struct jr_code_debug_info rec;
391 size_t sret, len, size, flen; 392 size_t sret, len, size, flen = 0;
392 uint64_t addr; 393 uint64_t addr;
393 const char *fn = file;
394 FILE *fp = agent; 394 FILE *fp = agent;
395 int i; 395 int i;
396 396
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
405 return -1; 405 return -1;
406 } 406 }
407 407
408 flen = strlen(file) + 1; 408 for (i = 0; i < nr_lines; ++i) {
409 flen += strlen(file_names[i]) + 1;
410 }
409 411
410 rec.p.id = JIT_CODE_DEBUG_INFO; 412 rec.p.id = JIT_CODE_DEBUG_INFO;
411 size = sizeof(rec); 413 size = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
421 * file[] : source file name 423 * file[] : source file name
422 */ 424 */
423 size += nr_lines * sizeof(struct debug_entry); 425 size += nr_lines * sizeof(struct debug_entry);
424 size += flen * nr_lines; 426 size += flen;
425 rec.p.total_size = size; 427 rec.p.total_size = size;
426 428
427 /* 429 /*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
452 if (sret != 1) 454 if (sret != 1)
453 goto error; 455 goto error;
454 456
455 sret = fwrite_unlocked(fn, flen, 1, fp); 457 sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
456 if (sret != 1) 458 if (sret != 1)
457 goto error; 459 goto error;
458 } 460 }
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index fe32d8344a82..6ed82f6c06dd 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -14,6 +14,7 @@ typedef struct {
14 unsigned long pc; 14 unsigned long pc;
15 int line_number; 15 int line_number;
16 int discrim; /* discriminator -- 0 for now */ 16 int discrim; /* discriminator -- 0 for now */
17 jmethodID methodID;
17} jvmti_line_info_t; 18} jvmti_line_info_t;
18 19
19void *jvmti_open(void); 20void *jvmti_open(void);
@@ -22,11 +23,9 @@ int jvmti_write_code(void *agent, char const *symbol_name,
22 uint64_t vma, void const *code, 23 uint64_t vma, void const *code,
23 const unsigned int code_size); 24 const unsigned int code_size);
24 25
25int jvmti_write_debug_info(void *agent, 26int jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
26 uint64_t code,
27 const char *file,
28 jvmti_line_info_t *li, 27 jvmti_line_info_t *li,
29 int nr_lines); 28 const char * const * file_names);
30 29
31#if defined(__cplusplus) 30#if defined(__cplusplus)
32} 31}
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c62c9fc9a525..6add3e982614 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
47 tab[lines].pc = (unsigned long)pc; 47 tab[lines].pc = (unsigned long)pc;
48 tab[lines].line_number = loc_tab[i].line_number; 48 tab[lines].line_number = loc_tab[i].line_number;
49 tab[lines].discrim = 0; /* not yet used */ 49 tab[lines].discrim = 0; /* not yet used */
50 tab[lines].methodID = m;
50 lines++; 51 lines++;
51 } else { 52 } else {
52 break; 53 break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
125 return JVMTI_ERROR_NONE; 126 return JVMTI_ERROR_NONE;
126} 127}
127 128
129static void
130copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
131{
132 /*
133 * Assume path name is class hierarchy, this is a common practice with Java programs
134 */
135 if (*class_sign == 'L') {
136 int j, i = 0;
137 char *p = strrchr(class_sign, '/');
138 if (p) {
139 /* drop the 'L' prefix and copy up to the final '/' */
140 for (i = 0; i < (p - class_sign); i++)
141 result[i] = class_sign[i+1];
142 }
143 /*
144 * append file name, we use loops and not string ops to avoid modifying
145 * class_sign which is used later for the symbol name
146 */
147 for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
148 result[i] = file_name[j];
149
150 result[i] = '\0';
151 } else {
152 /* fallback case */
153 size_t file_name_len = strlen(file_name);
154 strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
155 }
156}
157
158static jvmtiError
159get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
160{
161 jvmtiError ret;
162 jclass decl_class;
163 char *file_name = NULL;
164 char *class_sign = NULL;
165 char fn[PATH_MAX];
166 size_t len;
167
168 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
169 if (ret != JVMTI_ERROR_NONE) {
170 print_error(jvmti, "GetMethodDeclaringClass", ret);
171 return ret;
172 }
173
174 ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
175 if (ret != JVMTI_ERROR_NONE) {
176 print_error(jvmti, "GetSourceFileName", ret);
177 return ret;
178 }
179
180 ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
181 if (ret != JVMTI_ERROR_NONE) {
182 print_error(jvmti, "GetClassSignature", ret);
183 goto free_file_name_error;
184 }
185
186 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
187 len = strlen(fn);
188 *buffer = malloc((len + 1) * sizeof(char));
189 if (!*buffer) {
190 print_error(jvmti, "GetClassSignature", ret);
191 ret = JVMTI_ERROR_OUT_OF_MEMORY;
192 goto free_class_sign_error;
193 }
194 strcpy(*buffer, fn);
195 ret = JVMTI_ERROR_NONE;
196
197free_class_sign_error:
198 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
199free_file_name_error:
200 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
201
202 return ret;
203}
204
205static jvmtiError
206fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
207 const jvmti_line_info_t * line_tab,
208 char ** file_names)
209{
210 int index;
211 jvmtiError ret;
212
213 for (index = 0; index < nr_lines; ++index) {
214 ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
215 if (ret != JVMTI_ERROR_NONE)
216 return ret;
217 }
218
219 return JVMTI_ERROR_NONE;
220}
221
128static void JNICALL 222static void JNICALL
129compiled_method_load_cb(jvmtiEnv *jvmti, 223compiled_method_load_cb(jvmtiEnv *jvmti,
130 jmethodID method, 224 jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
135 const void *compile_info) 229 const void *compile_info)
136{ 230{
137 jvmti_line_info_t *line_tab = NULL; 231 jvmti_line_info_t *line_tab = NULL;
232 char ** line_file_names = NULL;
138 jclass decl_class; 233 jclass decl_class;
139 char *class_sign = NULL; 234 char *class_sign = NULL;
140 char *func_name = NULL; 235 char *func_name = NULL;
141 char *func_sign = NULL; 236 char *func_sign = NULL;
142 char *file_name= NULL; 237 char *file_name = NULL;
143 char fn[PATH_MAX]; 238 char fn[PATH_MAX];
144 uint64_t addr = (uint64_t)(uintptr_t)code_addr; 239 uint64_t addr = (uint64_t)(uintptr_t)code_addr;
145 jvmtiError ret; 240 jvmtiError ret;
146 int nr_lines = 0; /* in line_tab[] */ 241 int nr_lines = 0; /* in line_tab[] */
147 size_t len; 242 size_t len;
243 int output_debug_info = 0;
148 244
149 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method, 245 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
150 &decl_class); 246 &decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
158 if (ret != JVMTI_ERROR_NONE) { 254 if (ret != JVMTI_ERROR_NONE) {
159 warnx("jvmti: cannot get line table for method"); 255 warnx("jvmti: cannot get line table for method");
160 nr_lines = 0; 256 nr_lines = 0;
257 } else if (nr_lines > 0) {
258 line_file_names = malloc(sizeof(char*) * nr_lines);
259 if (!line_file_names) {
260 warnx("jvmti: cannot allocate space for line table method names");
261 } else {
262 memset(line_file_names, 0, sizeof(char*) * nr_lines);
263 ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
264 if (ret != JVMTI_ERROR_NONE) {
265 warnx("jvmti: fill_source_filenames failed");
266 } else {
267 output_debug_info = 1;
268 }
269 }
161 } 270 }
162 } 271 }
163 272
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
181 goto error; 290 goto error;
182 } 291 }
183 292
184 /* 293 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
185 * Assume path name is class hierarchy, this is a common practice with Java programs 294
186 */
187 if (*class_sign == 'L') {
188 int j, i = 0;
189 char *p = strrchr(class_sign, '/');
190 if (p) {
191 /* drop the 'L' prefix and copy up to the final '/' */
192 for (i = 0; i < (p - class_sign); i++)
193 fn[i] = class_sign[i+1];
194 }
195 /*
196 * append file name, we use loops and not string ops to avoid modifying
197 * class_sign which is used later for the symbol name
198 */
199 for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
200 fn[i] = file_name[j];
201 fn[i] = '\0';
202 } else {
203 /* fallback case */
204 strcpy(fn, file_name);
205 }
206 /* 295 /*
207 * write source line info record if we have it 296 * write source line info record if we have it
208 */ 297 */
209 if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines)) 298 if (output_debug_info)
210 warnx("jvmti: write_debug_info() failed"); 299 if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
300 warnx("jvmti: write_debug_info() failed");
211 301
212 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2; 302 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
213 { 303 {
@@ -223,6 +313,13 @@ error:
223 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); 313 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
224 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); 314 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
225 free(line_tab); 315 free(line_tab);
316 while (line_file_names && (nr_lines > 0)) {
317 if (line_file_names[nr_lines - 1]) {
318 free(line_file_names[nr_lines - 1]);
319 }
320 nr_lines -= 1;
321 }
322 free(line_file_names);
226} 323}
227 324
228static void JNICALL 325static void JNICALL
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 7a84d73324e3..8b3da21a08f1 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,8 +10,8 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq) 13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
14libc=$(echo $ld | sed 's/ld/libc/g') 14nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
15 15
16trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
17 idx=0 17 idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
37 done 37 done
38} 38}
39 39
40# Check for IPv6 interface existence
41ip a sh lo | fgrep -q inet6 || exit 2
42
40skip_if_no_perf_probe && \ 43skip_if_no_perf_probe && \
41perf probe -q $libc inet_pton && \ 44perf probe -q $libc inet_pton && \
42trace_libc_inet_pton_backtrace 45trace_libc_inet_pton_backtrace
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2e68c5f120da..2a9ef080efd0 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
17file=$(mktemp /tmp/temporary_file.XXXXX) 17file=$(mktemp /tmp/temporary_file.XXXXX)
18 18
19trace_open_vfs_getname() { 19trace_open_vfs_getname() {
20 perf trace -e open touch $file 2>&1 | \ 20 test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
21 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" 21
22 perf trace -e ${svc:-open} touch $file 2>&1 | \
23 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
22} 24}
23 25
24 26
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bc4a7344e274..89c8e1604ca7 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
84 84
85 evsel = perf_evlist__first(evlist); 85 evsel = perf_evlist__first(evlist);
86 evsel->attr.task = 1; 86 evsel->attr.task = 1;
87#ifdef __s390x__
88 evsel->attr.sample_freq = 1000000;
89#else
87 evsel->attr.sample_freq = 1; 90 evsel->attr.sample_freq = 1;
91#endif
88 evsel->attr.inherit = 0; 92 evsel->attr.inherit = 0;
89 evsel->attr.watermark = 0; 93 evsel->attr.watermark = 0;
90 evsel->attr.wakeup_events = 1; 94 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 9e1668b2c5d7..417e3ecfe9d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
62 P_MMAP_FLAG(POPULATE); 62 P_MMAP_FLAG(POPULATE);
63 P_MMAP_FLAG(STACK); 63 P_MMAP_FLAG(STACK);
64 P_MMAP_FLAG(UNINITIALIZED); 64 P_MMAP_FLAG(UNINITIALIZED);
65#ifdef MAP_SYNC
66 P_MMAP_FLAG(SYNC);
67#endif
65#undef P_MMAP_FLAG 68#undef P_MMAP_FLAG
66 69
67 if (flags) 70 if (flags)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index da1c4c4a0dd8..3369c7830260 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
166 struct ins_operands *ops) 166 struct ins_operands *ops)
167{ 167{
168 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 168 return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
169} 169}
170 170
171int ins__scnprintf(struct ins *ins, char *bf, size_t size, 171int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
230 struct ins_operands *ops) 230 struct ins_operands *ops)
231{ 231{
232 if (ops->target.name) 232 if (ops->target.name)
233 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 233 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
234 234
235 if (ops->target.addr == 0) 235 if (ops->target.addr == 0)
236 return ins__raw_scnprintf(ins, bf, size, ops); 236 return ins__raw_scnprintf(ins, bf, size, ops);
237 237
238 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 238 return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
239} 239}
240 240
241static struct ins_ops call_ops = { 241static struct ins_ops call_ops = {
@@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
299 c++; 299 c++;
300 } 300 }
301 301
302 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 302 return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
303 ins->name, c ? c - ops->raw : 0, ops->raw, 303 ins->name, c ? c - ops->raw : 0, ops->raw,
304 ops->target.offset); 304 ops->target.offset);
305} 305}
@@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
372 if (ops->locked.ins.ops == NULL) 372 if (ops->locked.ins.ops == NULL)
373 return ins__raw_scnprintf(ins, bf, size, ops); 373 return ins__raw_scnprintf(ins, bf, size, ops);
374 374
375 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 375 printed = scnprintf(bf, size, "%-6s ", ins->name);
376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
377 size - printed, ops->locked.ops); 377 size - printed, ops->locked.ops);
378} 378}
@@ -448,7 +448,7 @@ out_free_source:
448static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 448static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
449 struct ins_operands *ops) 449 struct ins_operands *ops)
450{ 450{
451 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 451 return scnprintf(bf, size, "%-6s %s,%s", ins->name,
452 ops->source.name ?: ops->source.raw, 452 ops->source.name ?: ops->source.raw,
453 ops->target.name ?: ops->target.raw); 453 ops->target.name ?: ops->target.raw);
454} 454}
@@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
488static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 488static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
489 struct ins_operands *ops) 489 struct ins_operands *ops)
490{ 490{
491 return scnprintf(bf, size, "%-6.6s %s", ins->name, 491 return scnprintf(bf, size, "%-6s %s", ins->name,
492 ops->target.name ?: ops->target.raw); 492 ops->target.name ?: ops->target.raw);
493} 493}
494 494
@@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
501 struct ins_operands *ops __maybe_unused) 501 struct ins_operands *ops __maybe_unused)
502{ 502{
503 return scnprintf(bf, size, "%-6.6s", "nop"); 503 return scnprintf(bf, size, "%-6s", "nop");
504} 504}
505 505
506static struct ins_ops nop_ops = { 506static struct ins_ops nop_ops = {
@@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
925{ 925{
926 if (raw || !dl->ins.ops) 926 if (raw || !dl->ins.ops)
927 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 927 return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
928 928
929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
930} 930}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c6c891e154a6..b62e523a7035 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
257 .config = PERF_COUNT_SW_DUMMY, 257 .config = PERF_COUNT_SW_DUMMY,
258 .size = sizeof(attr), /* to capture ABI version */ 258 .size = sizeof(attr), /* to capture ABI version */
259 }; 259 };
260 struct perf_evsel *evsel = perf_evsel__new(&attr); 260 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
261 261
262 if (evsel == NULL) 262 if (evsel == NULL)
263 return -ENOMEM; 263 return -ENOMEM;
@@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1786state_err: 1786state_err:
1787 return; 1787 return;
1788} 1788}
1789
1790bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1791{
1792 struct perf_evsel *evsel;
1793
1794 evlist__for_each_entry(evlist, evsel) {
1795 if (!evsel->attr.exclude_kernel)
1796 return false;
1797 }
1798
1799 return true;
1800}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e72ae64c11ac..491f69542920 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
312 312
313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
314 union perf_event *event); 314 union perf_event *event);
315
316bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
315#endif /* __PERF_EVLIST_H */ 317#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f894893c203d..d5fbcf8c7aa7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
733 list_for_each_entry(term, config_terms, list) { 733 list_for_each_entry(term, config_terms, list) {
734 switch (term->type) { 734 switch (term->type) {
735 case PERF_EVSEL__CONFIG_TERM_PERIOD: 735 case PERF_EVSEL__CONFIG_TERM_PERIOD:
736 attr->sample_period = term->val.period; 736 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
737 attr->freq = 0; 737 attr->sample_period = term->val.period;
738 attr->freq = 0;
739 }
738 break; 740 break;
739 case PERF_EVSEL__CONFIG_TERM_FREQ: 741 case PERF_EVSEL__CONFIG_TERM_FREQ:
740 attr->sample_freq = term->val.freq; 742 if (!(term->weak && opts->user_freq != UINT_MAX)) {
741 attr->freq = 1; 743 attr->sample_freq = term->val.freq;
744 attr->freq = 1;
745 }
742 break; 746 break;
743 case PERF_EVSEL__CONFIG_TERM_TIME: 747 case PERF_EVSEL__CONFIG_TERM_TIME:
744 if (term->val.time) 748 if (term->val.time)
@@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
1371static int 1375static int
1372perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread) 1376perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1373{ 1377{
1374 struct perf_stat_evsel *ps = leader->priv; 1378 struct perf_stat_evsel *ps = leader->stats;
1375 u64 read_format = leader->attr.read_format; 1379 u64 read_format = leader->attr.read_format;
1376 int size = perf_evsel__read_size(leader); 1380 int size = perf_evsel__read_size(leader);
1377 u64 *data = ps->group_data; 1381 u64 *data = ps->group_data;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 9277df96ffda..157f49e8a772 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
67 bool overwrite; 67 bool overwrite;
68 char *branch; 68 char *branch;
69 } val; 69 } val;
70 bool weak;
70}; 71};
71 72
72struct perf_stat_evsel; 73struct perf_stat_evsel;
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 125ecd2a300d..52dc8d911173 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -97,6 +97,16 @@
97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) 97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) 98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
99 99
100/* Identifiers for segment registers */
101#define INAT_SEG_REG_IGNORE 0
102#define INAT_SEG_REG_DEFAULT 1
103#define INAT_SEG_REG_CS 2
104#define INAT_SEG_REG_SS 3
105#define INAT_SEG_REG_DS 4
106#define INAT_SEG_REG_ES 5
107#define INAT_SEG_REG_FS 6
108#define INAT_SEG_REG_GS 7
109
100/* Attribute search APIs */ 110/* Attribute search APIs */
101extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 111extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
102extern int inat_get_last_prefix_id(insn_byte_t last_pfx); 112extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6a8d03c3d9b7..270f3223c6df 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
172{ 172{
173 int i; 173 int i;
174 174
175 if (machine == NULL)
176 return;
177
175 machine__destroy_kernel_maps(machine); 178 machine__destroy_kernel_maps(machine);
176 map_groups__exit(&machine->kmaps); 179 map_groups__exit(&machine->kmaps);
177 dsos__exit(&machine->dsos); 180 dsos__exit(&machine->dsos);
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index efd78b827b05..3a5cb5a6e94a 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 70static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{ 71{
72 struct perf_event_mmap_page *pc = mm->base; 72 struct perf_event_mmap_page *pc = mm->base;
73 u64 head = ACCESS_ONCE(pc->data_head); 73 u64 head = READ_ONCE(pc->data_head);
74 rmb(); 74 rmb();
75 return head; 75 return head;
76} 76}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a7fcd95961ef..170316795a18 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1116,6 +1116,7 @@ do { \
1116 INIT_LIST_HEAD(&__t->list); \ 1116 INIT_LIST_HEAD(&__t->list); \
1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ 1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
1118 __t->val.__name = __val; \ 1118 __t->val.__name = __val; \
1119 __t->weak = term->weak; \
1119 list_add_tail(&__t->list, head_terms); \ 1120 list_add_tail(&__t->list, head_terms); \
1120} while (0) 1121} while (0)
1121 1122
@@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
2410 2411
2411 *term = *temp; 2412 *term = *temp;
2412 INIT_LIST_HEAD(&term->list); 2413 INIT_LIST_HEAD(&term->list);
2414 term->weak = false;
2413 2415
2414 switch (term->type_val) { 2416 switch (term->type_val) {
2415 case PARSE_EVENTS__TERM_TYPE_NUM: 2417 case PARSE_EVENTS__TERM_TYPE_NUM:
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index be337c266697..88108cd11b4c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -101,6 +101,9 @@ struct parse_events_term {
101 /* error string indexes for within parsed string */ 101 /* error string indexes for within parsed string */
102 int err_term; 102 int err_term;
103 int err_val; 103 int err_val;
104
105 /* Coming from implicit alias */
106 bool weak;
104}; 107};
105 108
106struct parse_events_error { 109struct parse_events_error {
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 07cb2ac041d7..80fb1593913a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
405 parse_events_terms__purge(&list); 405 parse_events_terms__purge(&list);
406 return ret; 406 return ret;
407 } 407 }
408 /*
409 * Weak terms don't override command line options,
410 * which we don't want for implicit terms in aliases.
411 */
412 cloned->weak = true;
408 list_add_tail(&cloned->list, &list); 413 list_add_tail(&cloned->list, &list);
409 } 414 }
410 list_splice(&list, terms); 415 list_splice(&list, terms);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 333a48655ee0..9316e648a880 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2
2LIBDIR := ../../../lib 3LIBDIR := ../../../lib
3BPFDIR := $(LIBDIR)/bpf 4BPFDIR := $(LIBDIR)/bpf
4APIDIR := ../../../include/uapi 5APIDIR := ../../../include/uapi
@@ -10,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
10endif 11endif
11 12
12CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
13LDLIBS += -lcap -lelf 14LDLIBS += -lcap -lelf -lrt
14 15
15TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
16 test_align test_verifier_log test_dev_cgroup 17 test_align test_verifier_log test_dev_cgroup
@@ -38,7 +39,7 @@ $(BPFOBJ): force
38CLANG ?= clang 39CLANG ?= clang
39LLC ?= llc 40LLC ?= llc
40 41
41PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) 42PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
42 43
43# Let newer LLVM versions transparently probe the kernel for availability 44# Let newer LLVM versions transparently probe the kernel for availability
44# of full BPF instruction set. 45# of full BPF instruction set.
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..471bbbdb94db 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
474 .result = REJECT, 474 .result = REJECT,
475 .matches = { 475 .matches = {
476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
477 /* ptr & 0x40 == either 0 or 0x40 */ 477 /* R5 bitwise operator &= on pointer prohibited */
478 {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
479 /* ptr << 2 == unknown, (4n) */
480 {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
481 /* (4n) + 14 == (4n+2). We blow our bounds, because
482 * the add could overflow.
483 */
484 {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
485 /* Checked s>=0 */
486 {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
487 /* packet pointer + nonnegative (4n+2) */
488 {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
489 {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
491 * We checked the bounds, but it might have been able
492 * to overflow if the packet pointer started in the
493 * upper half of the address space.
494 * So we did not get a 'range' on R6, and the access
495 * attempt will fail.
496 */
497 {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
498 } 478 }
499 }, 479 },
500 { 480 {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 69427531408d..6761be18a91f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
351 info_len != sizeof(struct bpf_map_info) || 351 info_len != sizeof(struct bpf_map_info) ||
352 strcmp((char *)map_infos[i].name, expected_map_name), 352 strcmp((char *)map_infos[i].name, expected_map_name),
353 "get-map-info(fd)", 353 "get-map-info(fd)",
354 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", 354 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
355 err, errno, 355 err, errno,
356 map_infos[i].type, BPF_MAP_TYPE_ARRAY, 356 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
357 info_len, sizeof(struct bpf_map_info), 357 info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
395 *(int *)prog_infos[i].map_ids != map_infos[i].id || 395 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
396 strcmp((char *)prog_infos[i].name, expected_prog_name), 396 strcmp((char *)prog_infos[i].name, expected_prog_name),
397 "get-prog-info(fd)", 397 "get-prog-info(fd)",
398 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", 398 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
399 err, errno, i, 399 err, errno, i,
400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
401 info_len, sizeof(struct bpf_prog_info), 401 info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
463 memcmp(&prog_info, &prog_infos[i], info_len) || 463 memcmp(&prog_info, &prog_infos[i], info_len) ||
464 *(int *)prog_info.map_ids != saved_map_id, 464 *(int *)prog_info.map_ids != saved_map_id,
465 "get-prog-info(next_id->fd)", 465 "get-prog-info(next_id->fd)",
466 "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", 466 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
467 err, errno, info_len, sizeof(struct bpf_prog_info), 467 err, errno, info_len, sizeof(struct bpf_prog_info),
468 memcmp(&prog_info, &prog_infos[i], info_len), 468 memcmp(&prog_info, &prog_infos[i], info_len),
469 *(int *)prog_info.map_ids, saved_map_id); 469 *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
509 memcmp(&map_info, &map_infos[i], info_len) || 509 memcmp(&map_info, &map_infos[i], info_len) ||
510 array_value != array_magic_value, 510 array_value != array_magic_value,
511 "check get-map-info(next_id->fd)", 511 "check get-map-info(next_id->fd)",
512 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", 512 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
513 err, errno, info_len, sizeof(struct bpf_map_info), 513 err, errno, info_len, sizeof(struct bpf_map_info),
514 memcmp(&map_info, &map_infos[i], info_len), 514 memcmp(&map_info, &map_infos[i], info_len),
515 array_value, array_magic_value); 515 array_value, array_magic_value);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3c64f30cf63c..5ed4175c4ff8 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -273,6 +273,46 @@ static struct bpf_test tests[] = {
273 .result = REJECT, 273 .result = REJECT,
274 }, 274 },
275 { 275 {
276 "arsh32 on imm",
277 .insns = {
278 BPF_MOV64_IMM(BPF_REG_0, 1),
279 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
280 BPF_EXIT_INSN(),
281 },
282 .result = REJECT,
283 .errstr = "BPF_ARSH not supported for 32 bit ALU",
284 },
285 {
286 "arsh32 on reg",
287 .insns = {
288 BPF_MOV64_IMM(BPF_REG_0, 1),
289 BPF_MOV64_IMM(BPF_REG_1, 5),
290 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
291 BPF_EXIT_INSN(),
292 },
293 .result = REJECT,
294 .errstr = "BPF_ARSH not supported for 32 bit ALU",
295 },
296 {
297 "arsh64 on imm",
298 .insns = {
299 BPF_MOV64_IMM(BPF_REG_0, 1),
300 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
301 BPF_EXIT_INSN(),
302 },
303 .result = ACCEPT,
304 },
305 {
306 "arsh64 on reg",
307 .insns = {
308 BPF_MOV64_IMM(BPF_REG_0, 1),
309 BPF_MOV64_IMM(BPF_REG_1, 5),
310 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
311 BPF_EXIT_INSN(),
312 },
313 .result = ACCEPT,
314 },
315 {
276 "no bpf_exit", 316 "no bpf_exit",
277 .insns = { 317 .insns = {
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), 318 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
@@ -422,9 +462,7 @@ static struct bpf_test tests[] = {
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 462 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(), 463 BPF_EXIT_INSN(),
424 }, 464 },
425 .errstr_unpriv = "R1 subtraction from stack pointer", 465 .errstr = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT, 466 .result = REJECT,
429 }, 467 },
430 { 468 {
@@ -606,7 +644,6 @@ static struct bpf_test tests[] = {
606 }, 644 },
607 .errstr = "misaligned stack access", 645 .errstr = "misaligned stack access",
608 .result = REJECT, 646 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 }, 647 },
611 { 648 {
612 "invalid map_fd for function call", 649 "invalid map_fd for function call",
@@ -1797,7 +1834,6 @@ static struct bpf_test tests[] = {
1797 }, 1834 },
1798 .result = REJECT, 1835 .result = REJECT,
1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1836 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 }, 1837 },
1802 { 1838 {
1803 "PTR_TO_STACK store/load - bad alignment on reg", 1839 "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1846,6 @@ static struct bpf_test tests[] = {
1810 }, 1846 },
1811 .result = REJECT, 1847 .result = REJECT,
1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1848 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 }, 1849 },
1815 { 1850 {
1816 "PTR_TO_STACK store/load - out of bounds low", 1851 "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1897,8 @@ static struct bpf_test tests[] = {
1862 BPF_MOV64_IMM(BPF_REG_0, 0), 1897 BPF_MOV64_IMM(BPF_REG_0, 0),
1863 BPF_EXIT_INSN(), 1898 BPF_EXIT_INSN(),
1864 }, 1899 },
1865 .result = ACCEPT, 1900 .result = REJECT,
1866 .result_unpriv = REJECT, 1901 .errstr = "R1 pointer += pointer",
1867 .errstr_unpriv = "R1 pointer += pointer",
1868 }, 1902 },
1869 { 1903 {
1870 "unpriv: neg pointer", 1904 "unpriv: neg pointer",
@@ -2559,6 +2593,29 @@ static struct bpf_test tests[] = {
2559 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2593 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2560 }, 2594 },
2561 { 2595 {
2596 "context stores via ST",
2597 .insns = {
2598 BPF_MOV64_IMM(BPF_REG_0, 0),
2599 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2600 BPF_EXIT_INSN(),
2601 },
2602 .errstr = "BPF_ST stores into R1 context is not allowed",
2603 .result = REJECT,
2604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2605 },
2606 {
2607 "context stores via XADD",
2608 .insns = {
2609 BPF_MOV64_IMM(BPF_REG_0, 0),
2610 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2611 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2612 BPF_EXIT_INSN(),
2613 },
2614 .errstr = "BPF_XADD stores into R1 context is not allowed",
2615 .result = REJECT,
2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2617 },
2618 {
2562 "direct packet access: test1", 2619 "direct packet access: test1",
2563 .insns = { 2620 .insns = {
2564 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2592,7 +2649,8 @@ static struct bpf_test tests[] = {
2592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2649 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2593 offsetof(struct __sk_buff, data)), 2650 offsetof(struct __sk_buff, data)),
2594 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2651 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2652 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2653 offsetof(struct __sk_buff, len)),
2596 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), 2654 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2597 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), 2655 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2656 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2957,7 @@ static struct bpf_test tests[] = {
2899 BPF_MOV64_IMM(BPF_REG_0, 0), 2957 BPF_MOV64_IMM(BPF_REG_0, 0),
2900 BPF_EXIT_INSN(), 2958 BPF_EXIT_INSN(),
2901 }, 2959 },
2902 .errstr = "invalid access to packet", 2960 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
2903 .result = REJECT, 2961 .result = REJECT,
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2962 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 }, 2963 },
@@ -3885,9 +3943,7 @@ static struct bpf_test tests[] = {
3885 BPF_EXIT_INSN(), 3943 BPF_EXIT_INSN(),
3886 }, 3944 },
3887 .fixup_map2 = { 3, 11 }, 3945 .fixup_map2 = { 3, 11 },
3888 .errstr_unpriv = "R0 pointer += pointer", 3946 .errstr = "R0 pointer += pointer",
3889 .errstr = "R0 invalid mem access 'inv'",
3890 .result_unpriv = REJECT,
3891 .result = REJECT, 3947 .result = REJECT,
3892 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3948 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3893 }, 3949 },
@@ -3928,7 +3984,7 @@ static struct bpf_test tests[] = {
3928 BPF_EXIT_INSN(), 3984 BPF_EXIT_INSN(),
3929 }, 3985 },
3930 .fixup_map1 = { 4 }, 3986 .fixup_map1 = { 4 },
3931 .errstr = "R4 invalid mem access", 3987 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3932 .result = REJECT, 3988 .result = REJECT,
3933 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3989 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3934 }, 3990 },
@@ -3949,7 +4005,7 @@ static struct bpf_test tests[] = {
3949 BPF_EXIT_INSN(), 4005 BPF_EXIT_INSN(),
3950 }, 4006 },
3951 .fixup_map1 = { 4 }, 4007 .fixup_map1 = { 4 },
3952 .errstr = "R4 invalid mem access", 4008 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3953 .result = REJECT, 4009 .result = REJECT,
3954 .prog_type = BPF_PROG_TYPE_SCHED_CLS 4010 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3955 }, 4011 },
@@ -3970,7 +4026,7 @@ static struct bpf_test tests[] = {
3970 BPF_EXIT_INSN(), 4026 BPF_EXIT_INSN(),
3971 }, 4027 },
3972 .fixup_map1 = { 4 }, 4028 .fixup_map1 = { 4 },
3973 .errstr = "R4 invalid mem access", 4029 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3974 .result = REJECT, 4030 .result = REJECT,
3975 .prog_type = BPF_PROG_TYPE_SCHED_CLS 4031 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3976 }, 4032 },
@@ -4279,7 +4335,8 @@ static struct bpf_test tests[] = {
4279 .fixup_map1 = { 2 }, 4335 .fixup_map1 = { 2 },
4280 .errstr_unpriv = "R2 leaks addr into mem", 4336 .errstr_unpriv = "R2 leaks addr into mem",
4281 .result_unpriv = REJECT, 4337 .result_unpriv = REJECT,
4282 .result = ACCEPT, 4338 .result = REJECT,
4339 .errstr = "BPF_XADD stores into R1 context is not allowed",
4283 }, 4340 },
4284 { 4341 {
4285 "leak pointer into ctx 2", 4342 "leak pointer into ctx 2",
@@ -4293,7 +4350,8 @@ static struct bpf_test tests[] = {
4293 }, 4350 },
4294 .errstr_unpriv = "R10 leaks addr into mem", 4351 .errstr_unpriv = "R10 leaks addr into mem",
4295 .result_unpriv = REJECT, 4352 .result_unpriv = REJECT,
4296 .result = ACCEPT, 4353 .result = REJECT,
4354 .errstr = "BPF_XADD stores into R1 context is not allowed",
4297 }, 4355 },
4298 { 4356 {
4299 "leak pointer into ctx 3", 4357 "leak pointer into ctx 3",
@@ -5195,10 +5253,8 @@ static struct bpf_test tests[] = {
5195 BPF_EXIT_INSN(), 5253 BPF_EXIT_INSN(),
5196 }, 5254 },
5197 .fixup_map2 = { 3 }, 5255 .fixup_map2 = { 3 },
5198 .errstr_unpriv = "R0 bitwise operator &= on pointer", 5256 .errstr = "R0 bitwise operator &= on pointer",
5199 .errstr = "invalid mem access 'inv'",
5200 .result = REJECT, 5257 .result = REJECT,
5201 .result_unpriv = REJECT,
5202 }, 5258 },
5203 { 5259 {
5204 "map element value illegal alu op, 2", 5260 "map element value illegal alu op, 2",
@@ -5214,10 +5270,8 @@ static struct bpf_test tests[] = {
5214 BPF_EXIT_INSN(), 5270 BPF_EXIT_INSN(),
5215 }, 5271 },
5216 .fixup_map2 = { 3 }, 5272 .fixup_map2 = { 3 },
5217 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", 5273 .errstr = "R0 32-bit pointer arithmetic prohibited",
5218 .errstr = "invalid mem access 'inv'",
5219 .result = REJECT, 5274 .result = REJECT,
5220 .result_unpriv = REJECT,
5221 }, 5275 },
5222 { 5276 {
5223 "map element value illegal alu op, 3", 5277 "map element value illegal alu op, 3",
@@ -5233,10 +5287,8 @@ static struct bpf_test tests[] = {
5233 BPF_EXIT_INSN(), 5287 BPF_EXIT_INSN(),
5234 }, 5288 },
5235 .fixup_map2 = { 3 }, 5289 .fixup_map2 = { 3 },
5236 .errstr_unpriv = "R0 pointer arithmetic with /= operator", 5290 .errstr = "R0 pointer arithmetic with /= operator",
5237 .errstr = "invalid mem access 'inv'",
5238 .result = REJECT, 5291 .result = REJECT,
5239 .result_unpriv = REJECT,
5240 }, 5292 },
5241 { 5293 {
5242 "map element value illegal alu op, 4", 5294 "map element value illegal alu op, 4",
@@ -6019,8 +6071,7 @@ static struct bpf_test tests[] = {
6019 BPF_EXIT_INSN(), 6071 BPF_EXIT_INSN(),
6020 }, 6072 },
6021 .fixup_map_in_map = { 3 }, 6073 .fixup_map_in_map = { 3 },
6022 .errstr = "R1 type=inv expected=map_ptr", 6074 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6023 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6024 .result = REJECT, 6075 .result = REJECT,
6025 }, 6076 },
6026 { 6077 {
@@ -6117,6 +6168,30 @@ static struct bpf_test tests[] = {
6117 .result = ACCEPT, 6168 .result = ACCEPT,
6118 }, 6169 },
6119 { 6170 {
6171 "ld_abs: tests on r6 and skb data reload helper",
6172 .insns = {
6173 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6174 BPF_LD_ABS(BPF_B, 0),
6175 BPF_LD_ABS(BPF_H, 0),
6176 BPF_LD_ABS(BPF_W, 0),
6177 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6178 BPF_MOV64_IMM(BPF_REG_6, 0),
6179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6180 BPF_MOV64_IMM(BPF_REG_2, 1),
6181 BPF_MOV64_IMM(BPF_REG_3, 2),
6182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6183 BPF_FUNC_skb_vlan_push),
6184 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6185 BPF_LD_ABS(BPF_B, 0),
6186 BPF_LD_ABS(BPF_H, 0),
6187 BPF_LD_ABS(BPF_W, 0),
6188 BPF_MOV64_IMM(BPF_REG_0, 42),
6189 BPF_EXIT_INSN(),
6190 },
6191 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6192 .result = ACCEPT,
6193 },
6194 {
6120 "ld_ind: check calling conv, r1", 6195 "ld_ind: check calling conv, r1",
6121 .insns = { 6196 .insns = {
6122 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6197 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -6300,7 +6375,7 @@ static struct bpf_test tests[] = {
6300 BPF_EXIT_INSN(), 6375 BPF_EXIT_INSN(),
6301 }, 6376 },
6302 .fixup_map1 = { 3 }, 6377 .fixup_map1 = { 3 },
6303 .errstr = "R0 min value is negative", 6378 .errstr = "unbounded min value",
6304 .result = REJECT, 6379 .result = REJECT,
6305 }, 6380 },
6306 { 6381 {
@@ -6324,7 +6399,7 @@ static struct bpf_test tests[] = {
6324 BPF_EXIT_INSN(), 6399 BPF_EXIT_INSN(),
6325 }, 6400 },
6326 .fixup_map1 = { 3 }, 6401 .fixup_map1 = { 3 },
6327 .errstr = "R0 min value is negative", 6402 .errstr = "unbounded min value",
6328 .result = REJECT, 6403 .result = REJECT,
6329 }, 6404 },
6330 { 6405 {
@@ -6350,7 +6425,7 @@ static struct bpf_test tests[] = {
6350 BPF_EXIT_INSN(), 6425 BPF_EXIT_INSN(),
6351 }, 6426 },
6352 .fixup_map1 = { 3 }, 6427 .fixup_map1 = { 3 },
6353 .errstr = "R8 invalid mem access 'inv'", 6428 .errstr = "unbounded min value",
6354 .result = REJECT, 6429 .result = REJECT,
6355 }, 6430 },
6356 { 6431 {
@@ -6375,7 +6450,7 @@ static struct bpf_test tests[] = {
6375 BPF_EXIT_INSN(), 6450 BPF_EXIT_INSN(),
6376 }, 6451 },
6377 .fixup_map1 = { 3 }, 6452 .fixup_map1 = { 3 },
6378 .errstr = "R8 invalid mem access 'inv'", 6453 .errstr = "unbounded min value",
6379 .result = REJECT, 6454 .result = REJECT,
6380 }, 6455 },
6381 { 6456 {
@@ -6423,7 +6498,7 @@ static struct bpf_test tests[] = {
6423 BPF_EXIT_INSN(), 6498 BPF_EXIT_INSN(),
6424 }, 6499 },
6425 .fixup_map1 = { 3 }, 6500 .fixup_map1 = { 3 },
6426 .errstr = "R0 min value is negative", 6501 .errstr = "unbounded min value",
6427 .result = REJECT, 6502 .result = REJECT,
6428 }, 6503 },
6429 { 6504 {
@@ -6494,7 +6569,7 @@ static struct bpf_test tests[] = {
6494 BPF_EXIT_INSN(), 6569 BPF_EXIT_INSN(),
6495 }, 6570 },
6496 .fixup_map1 = { 3 }, 6571 .fixup_map1 = { 3 },
6497 .errstr = "R0 min value is negative", 6572 .errstr = "unbounded min value",
6498 .result = REJECT, 6573 .result = REJECT,
6499 }, 6574 },
6500 { 6575 {
@@ -6545,7 +6620,7 @@ static struct bpf_test tests[] = {
6545 BPF_EXIT_INSN(), 6620 BPF_EXIT_INSN(),
6546 }, 6621 },
6547 .fixup_map1 = { 3 }, 6622 .fixup_map1 = { 3 },
6548 .errstr = "R0 min value is negative", 6623 .errstr = "unbounded min value",
6549 .result = REJECT, 6624 .result = REJECT,
6550 }, 6625 },
6551 { 6626 {
@@ -6572,7 +6647,7 @@ static struct bpf_test tests[] = {
6572 BPF_EXIT_INSN(), 6647 BPF_EXIT_INSN(),
6573 }, 6648 },
6574 .fixup_map1 = { 3 }, 6649 .fixup_map1 = { 3 },
6575 .errstr = "R0 min value is negative", 6650 .errstr = "unbounded min value",
6576 .result = REJECT, 6651 .result = REJECT,
6577 }, 6652 },
6578 { 6653 {
@@ -6598,7 +6673,7 @@ static struct bpf_test tests[] = {
6598 BPF_EXIT_INSN(), 6673 BPF_EXIT_INSN(),
6599 }, 6674 },
6600 .fixup_map1 = { 3 }, 6675 .fixup_map1 = { 3 },
6601 .errstr = "R0 min value is negative", 6676 .errstr = "unbounded min value",
6602 .result = REJECT, 6677 .result = REJECT,
6603 }, 6678 },
6604 { 6679 {
@@ -6627,7 +6702,7 @@ static struct bpf_test tests[] = {
6627 BPF_EXIT_INSN(), 6702 BPF_EXIT_INSN(),
6628 }, 6703 },
6629 .fixup_map1 = { 3 }, 6704 .fixup_map1 = { 3 },
6630 .errstr = "R0 min value is negative", 6705 .errstr = "unbounded min value",
6631 .result = REJECT, 6706 .result = REJECT,
6632 }, 6707 },
6633 { 6708 {
@@ -6657,7 +6732,7 @@ static struct bpf_test tests[] = {
6657 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6732 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6658 }, 6733 },
6659 .fixup_map1 = { 4 }, 6734 .fixup_map1 = { 4 },
6660 .errstr = "R0 min value is negative", 6735 .errstr = "R0 invalid mem access 'inv'",
6661 .result = REJECT, 6736 .result = REJECT,
6662 }, 6737 },
6663 { 6738 {
@@ -6685,8 +6760,7 @@ static struct bpf_test tests[] = {
6685 BPF_EXIT_INSN(), 6760 BPF_EXIT_INSN(),
6686 }, 6761 },
6687 .fixup_map1 = { 3 }, 6762 .fixup_map1 = { 3 },
6688 .errstr_unpriv = "R0 pointer comparison prohibited", 6763 .errstr = "unbounded min value",
6689 .errstr = "R0 min value is negative",
6690 .result = REJECT, 6764 .result = REJECT,
6691 .result_unpriv = REJECT, 6765 .result_unpriv = REJECT,
6692 }, 6766 },
@@ -6742,6 +6816,462 @@ static struct bpf_test tests[] = {
6742 .result = REJECT, 6816 .result = REJECT,
6743 }, 6817 },
6744 { 6818 {
6819 "bounds check based on zero-extended MOV",
6820 .insns = {
6821 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6824 BPF_LD_MAP_FD(BPF_REG_1, 0),
6825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6826 BPF_FUNC_map_lookup_elem),
6827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6828 /* r2 = 0x0000'0000'ffff'ffff */
6829 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6830 /* r2 = 0 */
6831 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6832 /* no-op */
6833 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6834 /* access at offset 0 */
6835 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6836 /* exit */
6837 BPF_MOV64_IMM(BPF_REG_0, 0),
6838 BPF_EXIT_INSN(),
6839 },
6840 .fixup_map1 = { 3 },
6841 .result = ACCEPT
6842 },
6843 {
6844 "bounds check based on sign-extended MOV. test1",
6845 .insns = {
6846 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6849 BPF_LD_MAP_FD(BPF_REG_1, 0),
6850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6851 BPF_FUNC_map_lookup_elem),
6852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6853 /* r2 = 0xffff'ffff'ffff'ffff */
6854 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6855 /* r2 = 0xffff'ffff */
6856 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6857 /* r0 = <oob pointer> */
6858 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6859 /* access to OOB pointer */
6860 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6861 /* exit */
6862 BPF_MOV64_IMM(BPF_REG_0, 0),
6863 BPF_EXIT_INSN(),
6864 },
6865 .fixup_map1 = { 3 },
6866 .errstr = "map_value pointer and 4294967295",
6867 .result = REJECT
6868 },
6869 {
6870 "bounds check based on sign-extended MOV. test2",
6871 .insns = {
6872 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6875 BPF_LD_MAP_FD(BPF_REG_1, 0),
6876 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6877 BPF_FUNC_map_lookup_elem),
6878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6879 /* r2 = 0xffff'ffff'ffff'ffff */
6880 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6881 /* r2 = 0xfff'ffff */
6882 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6883 /* r0 = <oob pointer> */
6884 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6885 /* access to OOB pointer */
6886 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6887 /* exit */
6888 BPF_MOV64_IMM(BPF_REG_0, 0),
6889 BPF_EXIT_INSN(),
6890 },
6891 .fixup_map1 = { 3 },
6892 .errstr = "R0 min value is outside of the array range",
6893 .result = REJECT
6894 },
6895 {
6896 "bounds check based on reg_off + var_off + insn_off. test1",
6897 .insns = {
6898 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6899 offsetof(struct __sk_buff, mark)),
6900 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6903 BPF_LD_MAP_FD(BPF_REG_1, 0),
6904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6905 BPF_FUNC_map_lookup_elem),
6906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6907 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6909 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6911 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6912 BPF_MOV64_IMM(BPF_REG_0, 0),
6913 BPF_EXIT_INSN(),
6914 },
6915 .fixup_map1 = { 4 },
6916 .errstr = "value_size=8 off=1073741825",
6917 .result = REJECT,
6918 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6919 },
6920 {
6921 "bounds check based on reg_off + var_off + insn_off. test2",
6922 .insns = {
6923 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6924 offsetof(struct __sk_buff, mark)),
6925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6926 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6928 BPF_LD_MAP_FD(BPF_REG_1, 0),
6929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6930 BPF_FUNC_map_lookup_elem),
6931 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6932 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6936 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6937 BPF_MOV64_IMM(BPF_REG_0, 0),
6938 BPF_EXIT_INSN(),
6939 },
6940 .fixup_map1 = { 4 },
6941 .errstr = "value 1073741823",
6942 .result = REJECT,
6943 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6944 },
6945 {
6946 "bounds check after truncation of non-boundary-crossing range",
6947 .insns = {
6948 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6951 BPF_LD_MAP_FD(BPF_REG_1, 0),
6952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6953 BPF_FUNC_map_lookup_elem),
6954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6955 /* r1 = [0x00, 0xff] */
6956 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6957 BPF_MOV64_IMM(BPF_REG_2, 1),
6958 /* r2 = 0x10'0000'0000 */
6959 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6960 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6961 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6962 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6964 /* r1 = [0x00, 0xff] */
6965 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6966 /* r1 = 0 */
6967 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6968 /* no-op */
6969 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6970 /* access at offset 0 */
6971 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6972 /* exit */
6973 BPF_MOV64_IMM(BPF_REG_0, 0),
6974 BPF_EXIT_INSN(),
6975 },
6976 .fixup_map1 = { 3 },
6977 .result = ACCEPT
6978 },
6979 {
6980 "bounds check after truncation of boundary-crossing range (1)",
6981 .insns = {
6982 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6985 BPF_LD_MAP_FD(BPF_REG_1, 0),
6986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6987 BPF_FUNC_map_lookup_elem),
6988 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6989 /* r1 = [0x00, 0xff] */
6990 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6992 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6994 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6995 * [0x0000'0000, 0x0000'007f]
6996 */
6997 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6998 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6999 /* r1 = [0x00, 0xff] or
7000 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7001 */
7002 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7003 /* r1 = 0 or
7004 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7005 */
7006 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7007 /* no-op or OOB pointer computation */
7008 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7009 /* potentially OOB access */
7010 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7011 /* exit */
7012 BPF_MOV64_IMM(BPF_REG_0, 0),
7013 BPF_EXIT_INSN(),
7014 },
7015 .fixup_map1 = { 3 },
7016 /* not actually fully unbounded, but the bound is very high */
7017 .errstr = "R0 unbounded memory access",
7018 .result = REJECT
7019 },
7020 {
7021 "bounds check after truncation of boundary-crossing range (2)",
7022 .insns = {
7023 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7024 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7026 BPF_LD_MAP_FD(BPF_REG_1, 0),
7027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7028 BPF_FUNC_map_lookup_elem),
7029 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7030 /* r1 = [0x00, 0xff] */
7031 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7033 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7035 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7036 * [0x0000'0000, 0x0000'007f]
7037 * difference to previous test: truncation via MOV32
7038 * instead of ALU32.
7039 */
7040 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7041 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7042 /* r1 = [0x00, 0xff] or
7043 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7044 */
7045 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7046 /* r1 = 0 or
7047 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7048 */
7049 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7050 /* no-op or OOB pointer computation */
7051 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7052 /* potentially OOB access */
7053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7054 /* exit */
7055 BPF_MOV64_IMM(BPF_REG_0, 0),
7056 BPF_EXIT_INSN(),
7057 },
7058 .fixup_map1 = { 3 },
7059 /* not actually fully unbounded, but the bound is very high */
7060 .errstr = "R0 unbounded memory access",
7061 .result = REJECT
7062 },
7063 {
7064 "bounds check after wrapping 32-bit addition",
7065 .insns = {
7066 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7067 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7069 BPF_LD_MAP_FD(BPF_REG_1, 0),
7070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7071 BPF_FUNC_map_lookup_elem),
7072 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7073 /* r1 = 0x7fff'ffff */
7074 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7075 /* r1 = 0xffff'fffe */
7076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7077 /* r1 = 0 */
7078 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7079 /* no-op */
7080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7081 /* access at offset 0 */
7082 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7083 /* exit */
7084 BPF_MOV64_IMM(BPF_REG_0, 0),
7085 BPF_EXIT_INSN(),
7086 },
7087 .fixup_map1 = { 3 },
7088 .result = ACCEPT
7089 },
7090 {
7091 "bounds check after shift with oversized count operand",
7092 .insns = {
7093 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7094 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7096 BPF_LD_MAP_FD(BPF_REG_1, 0),
7097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7098 BPF_FUNC_map_lookup_elem),
7099 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7100 BPF_MOV64_IMM(BPF_REG_2, 32),
7101 BPF_MOV64_IMM(BPF_REG_1, 1),
7102 /* r1 = (u32)1 << (u32)32 = ? */
7103 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7104 /* r1 = [0x0000, 0xffff] */
7105 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7106 /* computes unknown pointer, potentially OOB */
7107 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7108 /* potentially OOB access */
7109 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7110 /* exit */
7111 BPF_MOV64_IMM(BPF_REG_0, 0),
7112 BPF_EXIT_INSN(),
7113 },
7114 .fixup_map1 = { 3 },
7115 .errstr = "R0 max value is outside of the array range",
7116 .result = REJECT
7117 },
7118 {
7119 "bounds check after right shift of maybe-negative number",
7120 .insns = {
7121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7124 BPF_LD_MAP_FD(BPF_REG_1, 0),
7125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7126 BPF_FUNC_map_lookup_elem),
7127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7128 /* r1 = [0x00, 0xff] */
7129 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7130 /* r1 = [-0x01, 0xfe] */
7131 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7132 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7133 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7134 /* r1 = 0 or 0xffff'ffff'ffff */
7135 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7136 /* computes unknown pointer, potentially OOB */
7137 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7138 /* potentially OOB access */
7139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7140 /* exit */
7141 BPF_MOV64_IMM(BPF_REG_0, 0),
7142 BPF_EXIT_INSN(),
7143 },
7144 .fixup_map1 = { 3 },
7145 .errstr = "R0 unbounded memory access",
7146 .result = REJECT
7147 },
7148 {
7149 "bounds check map access with off+size signed 32bit overflow. test1",
7150 .insns = {
7151 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7154 BPF_LD_MAP_FD(BPF_REG_1, 0),
7155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7156 BPF_FUNC_map_lookup_elem),
7157 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7158 BPF_EXIT_INSN(),
7159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7161 BPF_JMP_A(0),
7162 BPF_EXIT_INSN(),
7163 },
7164 .fixup_map1 = { 3 },
7165 .errstr = "map_value pointer and 2147483646",
7166 .result = REJECT
7167 },
7168 {
7169 "bounds check map access with off+size signed 32bit overflow. test2",
7170 .insns = {
7171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7174 BPF_LD_MAP_FD(BPF_REG_1, 0),
7175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7176 BPF_FUNC_map_lookup_elem),
7177 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7178 BPF_EXIT_INSN(),
7179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7182 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7183 BPF_JMP_A(0),
7184 BPF_EXIT_INSN(),
7185 },
7186 .fixup_map1 = { 3 },
7187 .errstr = "pointer offset 1073741822",
7188 .result = REJECT
7189 },
7190 {
7191 "bounds check map access with off+size signed 32bit overflow. test3",
7192 .insns = {
7193 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7196 BPF_LD_MAP_FD(BPF_REG_1, 0),
7197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7198 BPF_FUNC_map_lookup_elem),
7199 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7200 BPF_EXIT_INSN(),
7201 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7202 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7203 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7204 BPF_JMP_A(0),
7205 BPF_EXIT_INSN(),
7206 },
7207 .fixup_map1 = { 3 },
7208 .errstr = "pointer offset -1073741822",
7209 .result = REJECT
7210 },
7211 {
7212 "bounds check map access with off+size signed 32bit overflow. test4",
7213 .insns = {
7214 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7217 BPF_LD_MAP_FD(BPF_REG_1, 0),
7218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7219 BPF_FUNC_map_lookup_elem),
7220 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7221 BPF_EXIT_INSN(),
7222 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7223 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7224 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7225 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7226 BPF_JMP_A(0),
7227 BPF_EXIT_INSN(),
7228 },
7229 .fixup_map1 = { 3 },
7230 .errstr = "map_value pointer and 1000000000000",
7231 .result = REJECT
7232 },
7233 {
7234 "pointer/scalar confusion in state equality check (way 1)",
7235 .insns = {
7236 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7239 BPF_LD_MAP_FD(BPF_REG_1, 0),
7240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7241 BPF_FUNC_map_lookup_elem),
7242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7244 BPF_JMP_A(1),
7245 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7246 BPF_JMP_A(0),
7247 BPF_EXIT_INSN(),
7248 },
7249 .fixup_map1 = { 3 },
7250 .result = ACCEPT,
7251 .result_unpriv = REJECT,
7252 .errstr_unpriv = "R0 leaks addr as return value"
7253 },
7254 {
7255 "pointer/scalar confusion in state equality check (way 2)",
7256 .insns = {
7257 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7258 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7260 BPF_LD_MAP_FD(BPF_REG_1, 0),
7261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7262 BPF_FUNC_map_lookup_elem),
7263 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7265 BPF_JMP_A(1),
7266 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7267 BPF_EXIT_INSN(),
7268 },
7269 .fixup_map1 = { 3 },
7270 .result = ACCEPT,
7271 .result_unpriv = REJECT,
7272 .errstr_unpriv = "R0 leaks addr as return value"
7273 },
7274 {
6745 "variable-offset ctx access", 7275 "variable-offset ctx access",
6746 .insns = { 7276 .insns = {
6747 /* Get an unknown value */ 7277 /* Get an unknown value */
@@ -6783,6 +7313,71 @@ static struct bpf_test tests[] = {
6783 .prog_type = BPF_PROG_TYPE_LWT_IN, 7313 .prog_type = BPF_PROG_TYPE_LWT_IN,
6784 }, 7314 },
6785 { 7315 {
7316 "indirect variable-offset stack access",
7317 .insns = {
7318 /* Fill the top 8 bytes of the stack */
7319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7320 /* Get an unknown value */
7321 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7322 /* Make it small and 4-byte aligned */
7323 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7324 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7325 /* add it to fp. We now have either fp-4 or fp-8, but
7326 * we don't know which
7327 */
7328 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7329 /* dereference it indirectly */
7330 BPF_LD_MAP_FD(BPF_REG_1, 0),
7331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7332 BPF_FUNC_map_lookup_elem),
7333 BPF_MOV64_IMM(BPF_REG_0, 0),
7334 BPF_EXIT_INSN(),
7335 },
7336 .fixup_map1 = { 5 },
7337 .errstr = "variable stack read R2",
7338 .result = REJECT,
7339 .prog_type = BPF_PROG_TYPE_LWT_IN,
7340 },
7341 {
7342 "direct stack access with 32-bit wraparound. test1",
7343 .insns = {
7344 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7347 BPF_MOV32_IMM(BPF_REG_0, 0),
7348 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7349 BPF_EXIT_INSN()
7350 },
7351 .errstr = "fp pointer and 2147483647",
7352 .result = REJECT
7353 },
7354 {
7355 "direct stack access with 32-bit wraparound. test2",
7356 .insns = {
7357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7360 BPF_MOV32_IMM(BPF_REG_0, 0),
7361 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7362 BPF_EXIT_INSN()
7363 },
7364 .errstr = "fp pointer and 1073741823",
7365 .result = REJECT
7366 },
7367 {
7368 "direct stack access with 32-bit wraparound. test3",
7369 .insns = {
7370 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7373 BPF_MOV32_IMM(BPF_REG_0, 0),
7374 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7375 BPF_EXIT_INSN()
7376 },
7377 .errstr = "fp pointer offset 1073741822",
7378 .result = REJECT
7379 },
7380 {
6786 "liveness pruning and write screening", 7381 "liveness pruning and write screening",
6787 .insns = { 7382 .insns = {
6788 /* Get an unknown value */ 7383 /* Get an unknown value */
@@ -7104,6 +7699,19 @@ static struct bpf_test tests[] = {
7104 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7105 }, 7700 },
7106 { 7701 {
7702 "pkt_end - pkt_start is allowed",
7703 .insns = {
7704 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7705 offsetof(struct __sk_buff, data_end)),
7706 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7707 offsetof(struct __sk_buff, data)),
7708 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7709 BPF_EXIT_INSN(),
7710 },
7711 .result = ACCEPT,
7712 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7713 },
7714 {
7107 "XDP pkt read, pkt_end mangling, bad access 1", 7715 "XDP pkt read, pkt_end mangling, bad access 1",
7108 .insns = { 7716 .insns = {
7109 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7717 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7118,7 +7726,7 @@ static struct bpf_test tests[] = {
7118 BPF_MOV64_IMM(BPF_REG_0, 0), 7726 BPF_MOV64_IMM(BPF_REG_0, 0),
7119 BPF_EXIT_INSN(), 7727 BPF_EXIT_INSN(),
7120 }, 7728 },
7121 .errstr = "R1 offset is outside of the packet", 7729 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7122 .result = REJECT, 7730 .result = REJECT,
7123 .prog_type = BPF_PROG_TYPE_XDP, 7731 .prog_type = BPF_PROG_TYPE_XDP,
7124 }, 7732 },
@@ -7137,7 +7745,7 @@ static struct bpf_test tests[] = {
7137 BPF_MOV64_IMM(BPF_REG_0, 0), 7745 BPF_MOV64_IMM(BPF_REG_0, 0),
7138 BPF_EXIT_INSN(), 7746 BPF_EXIT_INSN(),
7139 }, 7747 },
7140 .errstr = "R1 offset is outside of the packet", 7748 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7141 .result = REJECT, 7749 .result = REJECT,
7142 .prog_type = BPF_PROG_TYPE_XDP, 7750 .prog_type = BPF_PROG_TYPE_XDP,
7143 }, 7751 },
@@ -8026,6 +8634,127 @@ static struct bpf_test tests[] = {
8026 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 8634 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8027 }, 8635 },
8028 { 8636 {
8637 "check deducing bounds from const, 1",
8638 .insns = {
8639 BPF_MOV64_IMM(BPF_REG_0, 1),
8640 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
8641 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8642 BPF_EXIT_INSN(),
8643 },
8644 .result = REJECT,
8645 .errstr = "R0 tried to subtract pointer from scalar",
8646 },
8647 {
8648 "check deducing bounds from const, 2",
8649 .insns = {
8650 BPF_MOV64_IMM(BPF_REG_0, 1),
8651 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
8652 BPF_EXIT_INSN(),
8653 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
8654 BPF_EXIT_INSN(),
8655 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8656 BPF_EXIT_INSN(),
8657 },
8658 .result = ACCEPT,
8659 },
8660 {
8661 "check deducing bounds from const, 3",
8662 .insns = {
8663 BPF_MOV64_IMM(BPF_REG_0, 0),
8664 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8665 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8666 BPF_EXIT_INSN(),
8667 },
8668 .result = REJECT,
8669 .errstr = "R0 tried to subtract pointer from scalar",
8670 },
8671 {
8672 "check deducing bounds from const, 4",
8673 .insns = {
8674 BPF_MOV64_IMM(BPF_REG_0, 0),
8675 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
8676 BPF_EXIT_INSN(),
8677 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8678 BPF_EXIT_INSN(),
8679 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8680 BPF_EXIT_INSN(),
8681 },
8682 .result = ACCEPT,
8683 },
8684 {
8685 "check deducing bounds from const, 5",
8686 .insns = {
8687 BPF_MOV64_IMM(BPF_REG_0, 0),
8688 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8689 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8690 BPF_EXIT_INSN(),
8691 },
8692 .result = REJECT,
8693 .errstr = "R0 tried to subtract pointer from scalar",
8694 },
8695 {
8696 "check deducing bounds from const, 6",
8697 .insns = {
8698 BPF_MOV64_IMM(BPF_REG_0, 0),
8699 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8700 BPF_EXIT_INSN(),
8701 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8702 BPF_EXIT_INSN(),
8703 },
8704 .result = REJECT,
8705 .errstr = "R0 tried to subtract pointer from scalar",
8706 },
8707 {
8708 "check deducing bounds from const, 7",
8709 .insns = {
8710 BPF_MOV64_IMM(BPF_REG_0, ~0),
8711 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8712 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
8713 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8714 offsetof(struct __sk_buff, mark)),
8715 BPF_EXIT_INSN(),
8716 },
8717 .result = REJECT,
8718 .errstr = "dereference of modified ctx ptr",
8719 },
8720 {
8721 "check deducing bounds from const, 8",
8722 .insns = {
8723 BPF_MOV64_IMM(BPF_REG_0, ~0),
8724 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
8725 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
8726 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8727 offsetof(struct __sk_buff, mark)),
8728 BPF_EXIT_INSN(),
8729 },
8730 .result = REJECT,
8731 .errstr = "dereference of modified ctx ptr",
8732 },
8733 {
8734 "check deducing bounds from const, 9",
8735 .insns = {
8736 BPF_MOV64_IMM(BPF_REG_0, 0),
8737 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
8738 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8739 BPF_EXIT_INSN(),
8740 },
8741 .result = REJECT,
8742 .errstr = "R0 tried to subtract pointer from scalar",
8743 },
8744 {
8745 "check deducing bounds from const, 10",
8746 .insns = {
8747 BPF_MOV64_IMM(BPF_REG_0, 0),
8748 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
8749 /* Marks reg as unknown. */
8750 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
8751 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8752 BPF_EXIT_INSN(),
8753 },
8754 .result = REJECT,
8755 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
8756 },
8757 {
8029 "bpf_exit with invalid return code. test1", 8758 "bpf_exit with invalid return code. test1",
8030 .insns = { 8759 .insns = {
8031 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 8760 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 3cc0b561489e..e9626cf5607a 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -3,6 +3,8 @@
3#include <stdio.h> 3#include <stdio.h>
4#include <string.h> 4#include <string.h>
5#include <unistd.h> 5#include <unistd.h>
6#include <sys/time.h>
7#include <sys/resource.h>
6 8
7#include <linux/bpf.h> 9#include <linux/bpf.h>
8#include <linux/filter.h> 10#include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
131 133
132int main(int argc, char **argv) 134int main(int argc, char **argv)
133{ 135{
136 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
134 char full_log[LOG_SIZE]; 137 char full_log[LOG_SIZE];
135 char log[LOG_SIZE]; 138 char log[LOG_SIZE];
136 size_t want_len; 139 size_t want_len;
137 int i; 140 int i;
138 141
142 /* allow unlimited locked memory to have more consistent error code */
143 if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
144 perror("Unable to lift memlock rlimit");
145
139 memset(log, 1, LOG_SIZE); 146 memset(log, 1, LOG_SIZE);
140 147
141 /* Test incorrect attr */ 148 /* Test incorrect attr */
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index e57b4ac40e72..7177bea1fdfa 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,3 +1,4 @@
1CONFIG_USER_NS=y 1CONFIG_USER_NS=y
2CONFIG_BPF_SYSCALL=y 2CONFIG_BPF_SYSCALL=y
3CONFIG_TEST_BPF=m 3CONFIG_TEST_BPF=m
4CONFIG_NUMA=y
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337128db..5d4f10ac2af2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -7,7 +7,7 @@ include ../lib.mk
7 7
8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ 8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ 9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
10 protection_keys test_vdso 10 protection_keys test_vdso test_vsyscall
11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
12 test_FCMOV test_FCOMI test_FISTTP \ 12 test_FCMOV test_FCOMI test_FISTTP \
13 vdso_restorer 13 vdso_restorer
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 66e5ce5b91f0..1aef72df20a1 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,
122 * NB: Different Linux versions do different things with the 122 * NB: Different Linux versions do different things with the
123 * accessed bit in set_thread_area(). 123 * accessed bit in set_thread_area().
124 */ 124 */
125 if (ar != expected_ar && 125 if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
126 (ldt || ar != (expected_ar | AR_ACCESSED))) {
127 printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", 126 printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
128 (ldt ? "LDT" : "GDT"), index, ar, expected_ar); 127 (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
129 nerrs++; 128 nerrs++;
@@ -627,13 +626,10 @@ static void do_multicpu_tests(void)
627static int finish_exec_test(void) 626static int finish_exec_test(void)
628{ 627{
629 /* 628 /*
630 * In a sensible world, this would be check_invalid_segment(0, 1); 629 * Older kernel versions did inherit the LDT on exec() which is
631 * For better or for worse, though, the LDT is inherited across exec. 630 * wrong because exec() starts from a clean state.
632 * We can probably change this safely, but for now we test it.
633 */ 631 */
634 check_valid_segment(0, 1, 632 check_invalid_segment(0, 1);
635 AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
636 42, true);
637 633
638 return nerrs ? 1 : 0; 634 return nerrs ? 1 : 0;
639} 635}
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 000000000000..7a744fa7b786
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#define _GNU_SOURCE
4
5#include <stdio.h>
6#include <sys/time.h>
7#include <time.h>
8#include <stdlib.h>
9#include <sys/syscall.h>
10#include <unistd.h>
11#include <dlfcn.h>
12#include <string.h>
13#include <inttypes.h>
14#include <signal.h>
15#include <sys/ucontext.h>
16#include <errno.h>
17#include <err.h>
18#include <sched.h>
19#include <stdbool.h>
20#include <setjmp.h>
21
22#ifdef __x86_64__
23# define VSYS(x) (x)
24#else
25# define VSYS(x) 0
26#endif
27
28#ifndef SYS_getcpu
29# ifdef __x86_64__
30# define SYS_getcpu 309
31# else
32# define SYS_getcpu 318
33# endif
34#endif
35
36static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
37 int flags)
38{
39 struct sigaction sa;
40 memset(&sa, 0, sizeof(sa));
41 sa.sa_sigaction = handler;
42 sa.sa_flags = SA_SIGINFO | flags;
43 sigemptyset(&sa.sa_mask);
44 if (sigaction(sig, &sa, 0))
45 err(1, "sigaction");
46}
47
48/* vsyscalls and vDSO */
49bool should_read_vsyscall = false;
50
51typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
52gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
53gtod_t vdso_gtod;
54
55typedef int (*vgettime_t)(clockid_t, struct timespec *);
56vgettime_t vdso_gettime;
57
58typedef long (*time_func_t)(time_t *t);
59time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
60time_func_t vdso_time;
61
62typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
63getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
64getcpu_t vdso_getcpu;
65
66static void init_vdso(void)
67{
68 void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
69 if (!vdso)
70 vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
71 if (!vdso) {
72 printf("[WARN]\tfailed to find vDSO\n");
73 return;
74 }
75
76 vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
77 if (!vdso_gtod)
78 printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
79
80 vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
81 if (!vdso_gettime)
82 printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
83
84 vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
85 if (!vdso_time)
86 printf("[WARN]\tfailed to find time in vDSO\n");
87
88 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
89 if (!vdso_getcpu) {
90 /* getcpu() was never wired up in the 32-bit vDSO. */
91 printf("[%s]\tfailed to find getcpu in vDSO\n",
92 sizeof(long) == 8 ? "WARN" : "NOTE");
93 }
94}
95
96static int init_vsys(void)
97{
98#ifdef __x86_64__
99 int nerrs = 0;
100 FILE *maps;
101 char line[128];
102 bool found = false;
103
104 maps = fopen("/proc/self/maps", "r");
105 if (!maps) {
106 printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
107 should_read_vsyscall = true;
108 return 0;
109 }
110
111 while (fgets(line, sizeof(line), maps)) {
112 char r, x;
113 void *start, *end;
114 char name[128];
115 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
116 &start, &end, &r, &x, name) != 5)
117 continue;
118
119 if (strcmp(name, "[vsyscall]"))
120 continue;
121
122 printf("\tvsyscall map: %s", line);
123
124 if (start != (void *)0xffffffffff600000 ||
125 end != (void *)0xffffffffff601000) {
126 printf("[FAIL]\taddress range is nonsense\n");
127 nerrs++;
128 }
129
130 printf("\tvsyscall permissions are %c-%c\n", r, x);
131 should_read_vsyscall = (r == 'r');
132 if (x != 'x') {
133 vgtod = NULL;
134 vtime = NULL;
135 vgetcpu = NULL;
136 }
137
138 found = true;
139 break;
140 }
141
142 fclose(maps);
143
144 if (!found) {
145 printf("\tno vsyscall map in /proc/self/maps\n");
146 should_read_vsyscall = false;
147 vgtod = NULL;
148 vtime = NULL;
149 vgetcpu = NULL;
150 }
151
152 return nerrs;
153#else
154 return 0;
155#endif
156}
157
158/* syscalls */
159static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
160{
161 return syscall(SYS_gettimeofday, tv, tz);
162}
163
164static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
165{
166 return syscall(SYS_clock_gettime, id, ts);
167}
168
169static inline long sys_time(time_t *t)
170{
171 return syscall(SYS_time, t);
172}
173
174static inline long sys_getcpu(unsigned * cpu, unsigned * node,
175 void* cache)
176{
177 return syscall(SYS_getcpu, cpu, node, cache);
178}
179
180static jmp_buf jmpbuf;
181
182static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
183{
184 siglongjmp(jmpbuf, 1);
185}
186
187static double tv_diff(const struct timeval *a, const struct timeval *b)
188{
189 return (double)(a->tv_sec - b->tv_sec) +
190 (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
191}
192
193static int check_gtod(const struct timeval *tv_sys1,
194 const struct timeval *tv_sys2,
195 const struct timezone *tz_sys,
196 const char *which,
197 const struct timeval *tv_other,
198 const struct timezone *tz_other)
199{
200 int nerrs = 0;
201 double d1, d2;
202
203 if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
204 printf("[FAIL] %s tz mismatch\n", which);
205 nerrs++;
206 }
207
208 d1 = tv_diff(tv_other, tv_sys1);
209 d2 = tv_diff(tv_sys2, tv_other);
210 printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
211
212 if (d1 < 0 || d2 < 0) {
213 printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
214 nerrs++;
215 } else {
216 printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
217 }
218
219 return nerrs;
220}
221
222static int test_gtod(void)
223{
224 struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
225 struct timezone tz_sys, tz_vdso, tz_vsys;
226 long ret_vdso = -1;
227 long ret_vsys = -1;
228 int nerrs = 0;
229
230 printf("[RUN]\ttest gettimeofday()\n");
231
232 if (sys_gtod(&tv_sys1, &tz_sys) != 0)
233 err(1, "syscall gettimeofday");
234 if (vdso_gtod)
235 ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
236 if (vgtod)
237 ret_vsys = vgtod(&tv_vsys, &tz_vsys);
238 if (sys_gtod(&tv_sys2, &tz_sys) != 0)
239 err(1, "syscall gettimeofday");
240
241 if (vdso_gtod) {
242 if (ret_vdso == 0) {
243 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
244 } else {
245 printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
246 nerrs++;
247 }
248 }
249
250 if (vgtod) {
251 if (ret_vsys == 0) {
252 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
253 } else {
254 printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
255 nerrs++;
256 }
257 }
258
259 return nerrs;
260}
261
262static int test_time(void) {
263 int nerrs = 0;
264
265 printf("[RUN]\ttest time()\n");
266 long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
267 long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
268 t_sys1 = sys_time(&t2_sys1);
269 if (vdso_time)
270 t_vdso = vdso_time(&t2_vdso);
271 if (vtime)
272 t_vsys = vtime(&t2_vsys);
273 t_sys2 = sys_time(&t2_sys2);
274 if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
275 printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
276 nerrs++;
277 return nerrs;
278 }
279
280 if (vdso_time) {
281 if (t_vdso < 0 || t_vdso != t2_vdso) {
282 printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
283 nerrs++;
284 } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
285 printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
286 nerrs++;
287 } else {
288 printf("[OK]\tvDSO time() is okay\n");
289 }
290 }
291
292 if (vtime) {
293 if (t_vsys < 0 || t_vsys != t2_vsys) {
294 printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
295 nerrs++;
296 } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
297 printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
298 nerrs++;
299 } else {
300 printf("[OK]\tvsyscall time() is okay\n");
301 }
302 }
303
304 return nerrs;
305}
306
307static int test_getcpu(int cpu)
308{
309 int nerrs = 0;
310 long ret_sys, ret_vdso = -1, ret_vsys = -1;
311
312 printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
313
314 cpu_set_t cpuset;
315 CPU_ZERO(&cpuset);
316 CPU_SET(cpu, &cpuset);
317 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
318 printf("[SKIP]\tfailed to force CPU %d\n", cpu);
319 return nerrs;
320 }
321
322 unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
323 unsigned node = 0;
324 bool have_node = false;
325 ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
326 if (vdso_getcpu)
327 ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
328 if (vgetcpu)
329 ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
330
331 if (ret_sys == 0) {
332 if (cpu_sys != cpu) {
333 printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
334 nerrs++;
335 }
336
337 have_node = true;
338 node = node_sys;
339 }
340
341 if (vdso_getcpu) {
342 if (ret_vdso) {
343 printf("[FAIL]\tvDSO getcpu() failed\n");
344 nerrs++;
345 } else {
346 if (!have_node) {
347 have_node = true;
348 node = node_vdso;
349 }
350
351 if (cpu_vdso != cpu) {
352 printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
353 nerrs++;
354 } else {
355 printf("[OK]\tvDSO reported correct CPU\n");
356 }
357
358 if (node_vdso != node) {
359 printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
360 nerrs++;
361 } else {
362 printf("[OK]\tvDSO reported correct node\n");
363 }
364 }
365 }
366
367 if (vgetcpu) {
368 if (ret_vsys) {
369 printf("[FAIL]\tvsyscall getcpu() failed\n");
370 nerrs++;
371 } else {
372 if (!have_node) {
373 have_node = true;
374 node = node_vsys;
375 }
376
377 if (cpu_vsys != cpu) {
378 printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
379 nerrs++;
380 } else {
381 printf("[OK]\tvsyscall reported correct CPU\n");
382 }
383
384 if (node_vsys != node) {
385 printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
386 nerrs++;
387 } else {
388 printf("[OK]\tvsyscall reported correct node\n");
389 }
390 }
391 }
392
393 return nerrs;
394}
395
396static int test_vsys_r(void)
397{
398#ifdef __x86_64__
399 printf("[RUN]\tChecking read access to the vsyscall page\n");
400 bool can_read;
401 if (sigsetjmp(jmpbuf, 1) == 0) {
402 *(volatile int *)0xffffffffff600000;
403 can_read = true;
404 } else {
405 can_read = false;
406 }
407
408 if (can_read && !should_read_vsyscall) {
409 printf("[FAIL]\tWe have read access, but we shouldn't\n");
410 return 1;
411 } else if (!can_read && should_read_vsyscall) {
412 printf("[FAIL]\tWe don't have read access, but we should\n");
413 return 1;
414 } else {
415 printf("[OK]\tgot expected result\n");
416 }
417#endif
418
419 return 0;
420}
421
422
423#ifdef __x86_64__
424#define X86_EFLAGS_TF (1UL << 8)
425static volatile sig_atomic_t num_vsyscall_traps;
426
427static unsigned long get_eflags(void)
428{
429 unsigned long eflags;
430 asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
431 return eflags;
432}
433
434static void set_eflags(unsigned long eflags)
435{
436 asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
437}
438
439static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
440{
441 ucontext_t *ctx = (ucontext_t *)ctx_void;
442 unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
443
444 if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
445 num_vsyscall_traps++;
446}
447
448static int test_native_vsyscall(void)
449{
450 time_t tmp;
451 bool is_native;
452
453 if (!vtime)
454 return 0;
455
456 printf("[RUN]\tchecking for native vsyscall\n");
457 sethandler(SIGTRAP, sigtrap, 0);
458 set_eflags(get_eflags() | X86_EFLAGS_TF);
459 vtime(&tmp);
460 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
461
462 /*
463 * If vsyscalls are emulated, we expect a single trap in the
464 * vsyscall page -- the call instruction will trap with RIP
465 * pointing to the entry point before emulation takes over.
466 * In native mode, we expect two traps, since whatever code
467 * the vsyscall page contains will be more than just a ret
468 * instruction.
469 */
470 is_native = (num_vsyscall_traps > 1);
471
472 printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
473 (is_native ? "native" : "emulated"),
474 (int)num_vsyscall_traps);
475
476 return 0;
477}
478#endif
479
480int main(int argc, char **argv)
481{
482 int nerrs = 0;
483
484 init_vdso();
485 nerrs += init_vsys();
486
487 nerrs += test_gtod();
488 nerrs += test_time();
489 nerrs += test_getcpu(0);
490 nerrs += test_getcpu(1);
491
492 sethandler(SIGSEGV, sigsegv, 0);
493 nerrs += test_vsys_r();
494
495#ifdef __x86_64__
496 nerrs += test_native_vsyscall();
497#endif
498
499 return nerrs ? 1 : 0;
500}
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 5727dfb15a83..c9c81614a66a 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
50 50
51 while (*c != '\0') { 51 while (*c != '\0') {
52 int port, status, speed, devid; 52 int port, status, speed, devid;
53 unsigned long socket; 53 int sockfd;
54 char lbusid[SYSFS_BUS_ID_SIZE]; 54 char lbusid[SYSFS_BUS_ID_SIZE];
55 struct usbip_imported_device *idev; 55 struct usbip_imported_device *idev;
56 char hub[3]; 56 char hub[3];
57 57
58 ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", 58 ret = sscanf(c, "%2s %d %d %d %x %u %31s\n",
59 hub, &port, &status, &speed, 59 hub, &port, &status, &speed,
60 &devid, &socket, lbusid); 60 &devid, &sockfd, lbusid);
61 61
62 if (ret < 5) { 62 if (ret < 5) {
63 dbg("sscanf failed: %d", ret); 63 dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
66 66
67 dbg("hub %s port %d status %d speed %d devid %x", 67 dbg("hub %s port %d status %d speed %d devid %x",
68 hub, port, status, speed, devid); 68 hub, port, status, speed, devid);
69 dbg("socket %lx lbusid %s", socket, lbusid); 69 dbg("sockfd %u lbusid %s", sockfd, lbusid);
70 70
71 /* if a device is connected, look at it */ 71 /* if a device is connected, look at it */
72 idev = &vhci_driver->idev[port]; 72 idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
106 return 0; 106 return 0;
107} 107}
108 108
109#define MAX_STATUS_NAME 16 109#define MAX_STATUS_NAME 18
110 110
111static int refresh_imported_device_list(void) 111static int refresh_imported_device_list(void)
112{ 112{
@@ -329,9 +329,17 @@ err:
329int usbip_vhci_get_free_port(uint32_t speed) 329int usbip_vhci_get_free_port(uint32_t speed)
330{ 330{
331 for (int i = 0; i < vhci_driver->nports; i++) { 331 for (int i = 0; i < vhci_driver->nports; i++) {
332 if (speed == USB_SPEED_SUPER && 332
333 vhci_driver->idev[i].hub != HUB_SPEED_SUPER) 333 switch (speed) {
334 continue; 334 case USB_SPEED_SUPER:
335 if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
336 continue;
337 break;
338 default:
339 if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
340 continue;
341 break;
342 }
335 343
336 if (vhci_driver->idev[i].status == VDEV_ST_NULL) 344 if (vhci_driver->idev[i].status == VDEV_ST_NULL)
337 return vhci_driver->idev[i].port; 345 return vhci_driver->idev[i].port;
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d235015..3d7b42e77299 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
30 char command[SYSFS_BUS_ID_SIZE + 4]; 30 char command[SYSFS_BUS_ID_SIZE + 4];
31 char match_busid_attr_path[SYSFS_PATH_MAX]; 31 char match_busid_attr_path[SYSFS_PATH_MAX];
32 int rc; 32 int rc;
33 int cmd_size;
33 34
34 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), 35 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
35 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, 36 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
37 attr_name); 38 attr_name);
38 39
39 if (add) 40 if (add)
40 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); 41 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
42 busid);
41 else 43 else
42 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); 44 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
45 busid);
43 46
44 rc = write_sysfs_attribute(match_busid_attr_path, command, 47 rc = write_sysfs_attribute(match_busid_attr_path, command,
45 sizeof(command)); 48 cmd_size);
46 if (rc < 0) { 49 if (rc < 0) {
47 dbg("failed to write match_busid: %s", strerror(errno)); 50 dbg("failed to write match_busid: %s", strerror(errno));
48 return -1; 51 return -1;
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 38bb171aceba..e6e81305ef46 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -16,24 +16,41 @@
16#define unlikely(x) (__builtin_expect(!!(x), 0)) 16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1)) 17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20
19typedef pthread_spinlock_t spinlock_t; 21typedef pthread_spinlock_t spinlock_t;
20 22
21typedef int gfp_t; 23typedef int gfp_t;
22static void *kmalloc(unsigned size, gfp_t gfp) 24#define __GFP_ZERO 0x1
23{
24 return memalign(64, size);
25}
26 25
27static void *kzalloc(unsigned size, gfp_t gfp) 26static void *kmalloc(unsigned size, gfp_t gfp)
28{ 27{
29 void *p = memalign(64, size); 28 void *p = memalign(64, size);
30 if (!p) 29 if (!p)
31 return p; 30 return p;
32 memset(p, 0, size);
33 31
32 if (gfp & __GFP_ZERO)
33 memset(p, 0, size);
34 return p; 34 return p;
35} 35}
36 36
37static inline void *kzalloc(unsigned size, gfp_t flags)
38{
39 return kmalloc(size, flags | __GFP_ZERO);
40}
41
42static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
43{
44 if (size != 0 && n > SIZE_MAX / size)
45 return NULL;
46 return kmalloc(n * size, flags);
47}
48
49static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
50{
51 return kmalloc_array(n, size, flags | __GFP_ZERO);
52}
53
37static void kfree(void *p) 54static void kfree(void *p)
38{ 55{
39 if (p) 56 if (p)
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 35b039864b77..0cf28aa6f21c 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2 2
3# Sergey Senozhatsky, 2015 3# Sergey Senozhatsky, 2015
4# sergey.senozhatsky.work@gmail.com 4# sergey.senozhatsky.work@gmail.com
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4151250ce8da..cc29a8148328 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
92{ 92{
93 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 93 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
94 struct arch_timer_context *vtimer; 94 struct arch_timer_context *vtimer;
95 u32 cnt_ctl;
95 96
96 if (!vcpu) { 97 /*
97 pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n"); 98 * We may see a timer interrupt after vcpu_put() has been called which
98 return IRQ_NONE; 99 * sets the CPU's vcpu pointer to NULL, because even though the timer
99 } 100 * has been disabled in vtimer_save_state(), the hardware interrupt
100 vtimer = vcpu_vtimer(vcpu); 101 * signal may not have been retired from the interrupt controller yet.
102 */
103 if (!vcpu)
104 return IRQ_HANDLED;
101 105
106 vtimer = vcpu_vtimer(vcpu);
102 if (!vtimer->irq.level) { 107 if (!vtimer->irq.level) {
103 vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); 108 cnt_ctl = read_sysreg_el0(cntv_ctl);
104 if (kvm_timer_irq_can_fire(vtimer)) 109 cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
110 ARCH_TIMER_CTRL_IT_MASK;
111 if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
105 kvm_timer_update_irq(vcpu, true, vtimer); 112 kvm_timer_update_irq(vcpu, true, vtimer);
106 } 113 }
107 114
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
355 362
356 /* Disable the virtual timer */ 363 /* Disable the virtual timer */
357 write_sysreg_el0(0, cntv_ctl); 364 write_sysreg_el0(0, cntv_ctl);
365 isb();
358 366
359 vtimer->loaded = false; 367 vtimer->loaded = false;
360out: 368out:
@@ -479,9 +487,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
479 487
480 vtimer_restore_state(vcpu); 488 vtimer_restore_state(vcpu);
481 489
482 if (has_vhe())
483 disable_el1_phys_timer_access();
484
485 /* Set the background timer for the physical timer emulation. */ 490 /* Set the background timer for the physical timer emulation. */
486 phys_timer_emulate(vcpu); 491 phys_timer_emulate(vcpu);
487} 492}
@@ -510,9 +515,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
510 if (unlikely(!timer->enabled)) 515 if (unlikely(!timer->enabled))
511 return; 516 return;
512 517
513 if (has_vhe())
514 enable_el1_phys_timer_access();
515
516 vtimer_save_state(vcpu); 518 vtimer_save_state(vcpu);
517 519
518 /* 520 /*
@@ -726,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
726 return 0; 728 return 0;
727} 729}
728 730
729int kvm_timer_hyp_init(void) 731int kvm_timer_hyp_init(bool has_gic)
730{ 732{
731 struct arch_timer_kvm_info *info; 733 struct arch_timer_kvm_info *info;
732 int err; 734 int err;
@@ -762,10 +764,13 @@ int kvm_timer_hyp_init(void)
762 return err; 764 return err;
763 } 765 }
764 766
765 err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus()); 767 if (has_gic) {
766 if (err) { 768 err = irq_set_vcpu_affinity(host_vtimer_irq,
767 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 769 kvm_get_running_vcpus());
768 goto out_free_irq; 770 if (err) {
771 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
772 goto out_free_irq;
773 }
769 } 774 }
770 775
771 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 776 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -841,7 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
841no_vgic: 846no_vgic:
842 preempt_disable(); 847 preempt_disable();
843 timer->enabled = 1; 848 timer->enabled = 1;
844 kvm_timer_vcpu_load_vgic(vcpu); 849 kvm_timer_vcpu_load(vcpu);
845 preempt_enable(); 850 preempt_enable();
846 851
847 return 0; 852 return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a67c106d73f5..2e43f9d42bd5 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
188 kvm->vcpus[i] = NULL; 188 kvm->vcpus[i] = NULL;
189 } 189 }
190 } 190 }
191 atomic_set(&kvm->online_vcpus, 0);
191} 192}
192 193
193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 194int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296{ 297{
297 kvm_mmu_free_memory_caches(vcpu); 298 kvm_mmu_free_memory_caches(vcpu);
298 kvm_timer_vcpu_terminate(vcpu); 299 kvm_timer_vcpu_terminate(vcpu);
299 kvm_vgic_vcpu_destroy(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu); 300 kvm_pmu_vcpu_destroy(vcpu);
301 kvm_vcpu_uninit(vcpu); 301 kvm_vcpu_uninit(vcpu);
302 kmem_cache_free(kvm_vcpu_cache, vcpu); 302 kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
627 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 627 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
628 if (ret) 628 if (ret)
629 return ret; 629 return ret;
630 if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
631 return 0;
632
630 } 633 }
631 634
632 if (run->immediate_exit) 635 if (run->immediate_exit)
@@ -1323,7 +1326,7 @@ static int init_subsystems(void)
1323 /* 1326 /*
1324 * Init HYP architected timer support 1327 * Init HYP architected timer support
1325 */ 1328 */
1326 err = kvm_timer_hyp_init(); 1329 err = kvm_timer_hyp_init(vgic_present);
1327 if (err) 1330 if (err)
1328 goto out; 1331 goto out;
1329 1332
@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque)
1502 bool in_hyp_mode; 1505 bool in_hyp_mode;
1503 1506
1504 if (!is_hyp_mode_available()) { 1507 if (!is_hyp_mode_available()) {
1505 kvm_err("HYP mode not available\n"); 1508 kvm_info("HYP mode not available\n");
1506 return -ENODEV; 1509 return -ENODEV;
1507 } 1510 }
1508 1511
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index f39861639f08..f24404b3c8df 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
27 write_sysreg(cntvoff, cntvoff_el2); 27 write_sysreg(cntvoff, cntvoff_el2);
28} 28}
29 29
30void __hyp_text enable_el1_phys_timer_access(void)
31{
32 u64 val;
33
34 /* Allow physical timer/counter access for the host */
35 val = read_sysreg(cnthctl_el2);
36 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
37 write_sysreg(val, cnthctl_el2);
38}
39
40void __hyp_text disable_el1_phys_timer_access(void)
41{
42 u64 val;
43
44 /*
45 * Disallow physical timer access for the guest
46 * Physical counter access is allowed
47 */
48 val = read_sysreg(cnthctl_el2);
49 val &= ~CNTHCTL_EL1PCEN;
50 val |= CNTHCTL_EL1PCTEN;
51 write_sysreg(val, cnthctl_el2);
52}
53
54void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) 30void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
55{ 31{
56 /* 32 /*
57 * We don't need to do this for VHE since the host kernel runs in EL2 33 * We don't need to do this for VHE since the host kernel runs in EL2
58 * with HCR_EL2.TGE ==1, which makes those bits have no impact. 34 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
59 */ 35 */
60 if (!has_vhe()) 36 if (!has_vhe()) {
61 enable_el1_phys_timer_access(); 37 u64 val;
38
39 /* Allow physical timer/counter access for the host */
40 val = read_sysreg(cnthctl_el2);
41 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
42 write_sysreg(val, cnthctl_el2);
43 }
62} 44}
63 45
64void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) 46void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
65{ 47{
66 if (!has_vhe()) 48 if (!has_vhe()) {
67 disable_el1_phys_timer_access(); 49 u64 val;
50
51 /*
52 * Disallow physical timer access for the guest
53 * Physical counter access is allowed
54 */
55 val = read_sysreg(cnthctl_el2);
56 val &= ~CNTHCTL_EL1PCEN;
57 val |= CNTHCTL_EL1PCTEN;
58 write_sysreg(val, cnthctl_el2);
59 }
68} 60}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f18d362366..d7fd46fe9efb 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
34 else 34 else
35 elrsr1 = 0; 35 elrsr1 = 0;
36 36
37#ifdef CONFIG_CPU_BIG_ENDIAN
38 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
39#else
40 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 37 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
41#endif
42} 38}
43 39
44static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) 40static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index b6e715fd3c90..dac7ceb1a677 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
112 } 112 }
113 113
114 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 114 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
115 data); 115 &data);
116 data = vcpu_data_host_to_guest(vcpu, data, len); 116 data = vcpu_data_host_to_guest(vcpu, data, len);
117 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); 117 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
118 } 118 }
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
182 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), 182 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
183 len); 183 len);
184 184
185 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); 185 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
186 kvm_mmio_write_buf(data_buf, len, data); 186 kvm_mmio_write_buf(data_buf, len, data);
187 187
188 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, 188 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
189 data_buf); 189 data_buf);
190 } else { 190 } else {
191 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, 191 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
192 fault_ipa, 0); 192 fault_ipa, NULL);
193 193
194 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, 194 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
195 data_buf); 195 data_buf);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b36945d49986..9dea96380339 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
509 */ 509 */
510void free_hyp_pgds(void) 510void free_hyp_pgds(void)
511{ 511{
512 unsigned long addr;
513
514 mutex_lock(&kvm_hyp_pgd_mutex); 512 mutex_lock(&kvm_hyp_pgd_mutex);
515 513
516 if (boot_hyp_pgd) { 514 if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
521 519
522 if (hyp_pgd) { 520 if (hyp_pgd) {
523 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); 521 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
524 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) 522 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
525 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); 523 (uintptr_t)high_memory - PAGE_OFFSET);
526 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 524 unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
527 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); 525 VMALLOC_END - VMALLOC_START);
528 526
529 free_pages((unsigned long)hyp_pgd, hyp_pgd_order); 527 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
530 hyp_pgd = NULL; 528 hyp_pgd = NULL;
@@ -1312,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1312 return -EFAULT; 1310 return -EFAULT;
1313 } 1311 }
1314 1312
1315 if (is_vm_hugetlb_page(vma) && !logging_active) { 1313 if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1316 hugetlb = true; 1314 hugetlb = true;
1317 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; 1315 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1318 } else { 1316 } else {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
285 if (ret) 285 if (ret)
286 goto out; 286 goto out;
287 287
288 ret = vgic_v4_init(kvm); 288 if (vgic_has_its(kvm)) {
289 if (ret) 289 ret = vgic_v4_init(kvm);
290 goto out; 290 if (ret)
291 goto out;
292 }
291 293
292 kvm_for_each_vcpu(i, vcpu, kvm) 294 kvm_for_each_vcpu(i, vcpu, kvm)
293 kvm_vgic_vcpu_enable(vcpu); 295 kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b7baf581611a..99e026d2dade 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
112 u32 nr = dist->nr_spis; 112 u32 nr = dist->nr_spis;
113 int i, ret; 113 int i, ret;
114 114
115 entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), 115 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
116 GFP_KERNEL);
117 if (!entries) 116 if (!entries)
118 return -ENOMEM; 117 return -ENOMEM;
119 118
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1f761a9991e7..8e633bd9cc1e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
421 u32 *intids; 421 u32 *intids;
422 int nr_irqs, i; 422 int nr_irqs, i;
423 unsigned long flags; 423 unsigned long flags;
424 u8 pendmask;
424 425
425 nr_irqs = vgic_copy_lpi_list(vcpu, &intids); 426 nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
426 if (nr_irqs < 0) 427 if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
428 429
429 for (i = 0; i < nr_irqs; i++) { 430 for (i = 0; i < nr_irqs; i++) {
430 int byte_offset, bit_nr; 431 int byte_offset, bit_nr;
431 u8 pendmask;
432 432
433 byte_offset = intids[i] / BITS_PER_BYTE; 433 byte_offset = intids[i] / BITS_PER_BYTE;
434 bit_nr = intids[i] % BITS_PER_BYTE; 434 bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
821 return E_ITS_MAPC_COLLECTION_OOR; 821 return E_ITS_MAPC_COLLECTION_OOR;
822 822
823 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 823 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
824 if (!collection)
825 return -ENOMEM;
824 826
825 collection->collection_id = coll_id; 827 collection->collection_id = coll_id;
826 collection->target_addr = COLLECTION_NOT_MAPPED; 828 collection->target_addr = COLLECTION_NOT_MAPPED;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 2f05f732d3fd..f47e8481fa45 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
327 int last_byte_offset = -1; 327 int last_byte_offset = -1;
328 struct vgic_irq *irq; 328 struct vgic_irq *irq;
329 int ret; 329 int ret;
330 u8 val;
330 331
331 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
332 int byte_offset, bit_nr; 333 int byte_offset, bit_nr;
333 struct kvm_vcpu *vcpu; 334 struct kvm_vcpu *vcpu;
334 gpa_t pendbase, ptr; 335 gpa_t pendbase, ptr;
335 bool stored; 336 bool stored;
336 u8 val;
337 337
338 vcpu = irq->target_vcpu; 338 vcpu = irq->target_vcpu;
339 if (!vcpu) 339 if (!vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 53c324aa44ef..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
118 struct kvm_vcpu *vcpu; 118 struct kvm_vcpu *vcpu;
119 int i, nr_vcpus, ret; 119 int i, nr_vcpus, ret;
120 120
121 if (!vgic_supports_direct_msis(kvm)) 121 if (!kvm_vgic_global_state.has_gicv4)
122 return 0; /* Nothing to see here... move along. */ 122 return 0; /* Nothing to see here... move along. */
123 123
124 if (dist->its_vm.vpes) 124 if (dist->its_vm.vpes)
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
337 goto out; 337 goto out;
338 338
339 WARN_ON(!(irq->hw && irq->host_irq == virq)); 339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 irq->hw = false; 340 if (irq->hw) {
341 ret = its_unmap_vlpi(virq); 341 irq->hw = false;
342 ret = its_unmap_vlpi(virq);
343 }
342 344
343out: 345out:
344 mutex_unlock(&its->its_lock); 346 mutex_unlock(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index b168a328a9e0..ecb8e25f5fe5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
493{ 493{
494 struct vgic_irq *irq; 494 struct vgic_irq *irq;
495 unsigned long flags;
495 int ret = 0; 496 int ret = 0;
496 497
497 if (!vgic_initialized(vcpu->kvm)) 498 if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
502 return -EINVAL; 503 return -EINVAL;
503 504
504 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 505 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
505 spin_lock(&irq->irq_lock); 506 spin_lock_irqsave(&irq->irq_lock, flags);
506 if (irq->owner && irq->owner != owner) 507 if (irq->owner && irq->owner != owner)
507 ret = -EEXIST; 508 ret = -EEXIST;
508 else 509 else
509 irq->owner = owner; 510 irq->owner = owner;
510 spin_unlock(&irq->irq_lock); 511 spin_unlock_irqrestore(&irq->irq_lock, flags);
511 512
512 return ret; 513 return ret;
513} 514}
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
823 824
824bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 825bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
825{ 826{
826 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 827 struct vgic_irq *irq;
827 bool map_is_active; 828 bool map_is_active;
828 unsigned long flags; 829 unsigned long flags;
829 830
830 if (!vgic_initialized(vcpu->kvm)) 831 if (!vgic_initialized(vcpu->kvm))
831 return false; 832 return false;
832 833
834 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
833 spin_lock_irqsave(&irq->irq_lock, flags); 835 spin_lock_irqsave(&irq->irq_lock, flags);
834 map_is_active = irq->hw && irq->active; 836 map_is_active = irq->hw && irq->active;
835 spin_unlock_irqrestore(&irq->irq_lock, flags); 837 spin_unlock_irqrestore(&irq->irq_lock, flags);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c422c10cd1dd..210bf820385a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
135static unsigned long long kvm_createvm_count; 135static unsigned long long kvm_createvm_count;
136static unsigned long long kvm_active_vms; 136static unsigned long long kvm_active_vms;
137 137
138__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
139 unsigned long start, unsigned long end)
140{
141}
142
138bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 143bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
139{ 144{
140 if (pfn_valid(pfn)) 145 if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
360 kvm_flush_remote_tlbs(kvm); 365 kvm_flush_remote_tlbs(kvm);
361 366
362 spin_unlock(&kvm->mmu_lock); 367 spin_unlock(&kvm->mmu_lock);
368
369 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
370
363 srcu_read_unlock(&kvm->srcu, idx); 371 srcu_read_unlock(&kvm->srcu, idx);
364} 372}
365 373