aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2018-02-03 02:26:26 -0500
committerRoss Zwisler <ross.zwisler@linux.intel.com>2018-02-03 02:26:26 -0500
commitee95f4059a833839bf52972191b2d4c3d3cec552 (patch)
treea1c8587d9b82e64a75dde376a90a3d69b0f4847a
parentd121f07691415df824e6b60520f782f6d13b3c81 (diff)
parentf81e1d35a6e36d30888c46283b8dd1022e847124 (diff)
Merge branch 'for-4.16/nfit' into libnvdimm-for-next
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt59
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/gpu/i915.rst5
-rw-r--r--Documentation/kbuild/kconfig-language.txt23
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/msg_zerocopy.rst4
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/x86/pti.txt186
-rw-r--r--Documentation/x86/x86_64/mm.txt18
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile45
-rw-r--r--arch/arc/boot/dts/axc003.dtsi8
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi8
-rw-r--r--arch/arc/boot/dts/hsdk.dts8
-rw-r--r--arch/arc/configs/hsdk_defconfig5
-rw-r--r--arch/arc/include/asm/uaccess.h5
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/traps.c14
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/plat-axs10x/axs10x.c18
-rw-r--r--arch/arc/plat-hsdk/platform.c42
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-tse850-3.dts1
-rw-r--r--arch/arm/boot/dts/da850-lego-ev3.dts4
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts4
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts2
-rw-r--r--arch/arm/boot/dts/rk3066a-marsboard.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/boot/dts/tango4-common.dtsi1
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-davinci/dm365.c29
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi11
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi4
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/m32r/kernel/traps.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/ptrace.c147
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/entry.S13
-rw-r--r--arch/parisc/kernel/pacache.S9
-rw-r--r--arch/parisc/kernel/process.c39
-rw-r--r--arch/parisc/mm/init.c10
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h57
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h13
-rw-r--r--arch/powerpc/include/asm/hvcall.h17
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h14
-rw-r--r--arch/powerpc/include/asm/setup.h13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S44
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S137
-rw-r--r--arch/powerpc/kernel/setup_64.c101
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c90
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S7
-rw-r--r--arch/powerpc/kvm/book3s_segment.S4
-rw-r--r--arch/powerpc/lib/feature-fixups.c41
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/platforms/powernv/setup.c49
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c21
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c35
-rw-r--r--arch/riscv/configs/defconfig75
-rw-r--r--arch/riscv/include/asm/csr.h8
-rw-r--r--arch/riscv/include/asm/io.h4
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/riscv/include/asm/ptrace.h2
-rw-r--r--arch/riscv/include/asm/tlbflush.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h12
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/asm/vdso-syscalls.h28
-rw-r--r--arch/riscv/include/uapi/asm/syscalls.h26
-rw-r--r--arch/riscv/kernel/entry.S8
-rw-r--r--arch/riscv/kernel/process.c4
-rw-r--r--arch/riscv/kernel/syscall_table.c1
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S1
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c9
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/pci/pci_dma.c21
-rw-r--r--arch/s390/pci/pci_insn.c3
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c24
-rw-r--r--arch/sh/include/mach-se/mach/se.h1
-rw-r--r--arch/unicore32/kernel/traps.c1
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S3
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S3
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S3
-rw-r--r--arch/x86/entry/calling.h36
-rw-r--r--arch/x86/entry/entry_32.S5
-rw-r--r--arch/x86/entry/entry_64.S12
-rw-r--r--arch/x86/entry/entry_64_compat.S13
-rw-r--r--arch/x86/events/intel/bts.c18
-rw-r--r--arch/x86/events/intel/ds.c16
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/asm-prototypes.h25
-rw-r--r--arch/x86/include/asm/cpufeatures.h6
-rw-r--r--arch/x86/include/asm/mshyperv.h18
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/nospec-branch.h214
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h14
-rw-r--r--arch/x86/include/asm/processor-flags.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/unwind.h17
-rw-r--r--arch/x86/include/asm/xen/hypercall.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c35
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c185
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c13
-rw-r--r--arch/x86/kernel/dumpstack.c31
-rw-r--r--arch/x86/kernel/ftrace_32.S6
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--arch/x86/kernel/irq_32.c9
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tboot.c11
-rw-r--r--arch/x86/kvm/mmu.c19
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx.c35
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/checksum_32.S7
-rw-r--r--arch/x86/lib/retpoline.S48
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/kaslr.c32
-rw-r--r--arch/x86/mm/pti.c41
-rw-r--r--arch/x86/pci/common.c5
-rw-r--r--arch/x86/pci/fixup.c29
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/efi/quirks.c13
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk.h2
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/algapi.c12
-rw-r--r--crypto/algif_aead.c2
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/pcrypt.c19
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/ghes.c79
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c23
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/cpu.c48
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c85
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c89
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/firmware/efi/capsule-loader.c45
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpiolib.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c18
-rw-r--r--drivers/infiniband/core/nldev.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/leds/led-core.c3
-rw-r--r--drivers/mfd/rtsx_pcr.c3
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c3
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region_devs.c13
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fabrics.c1
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c42
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/usb/gadget/udc/core.c28
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/usbip/usbip_common.c17
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/pvcalls-front.c2
-rw-r--r--fs/afs/dir.c37
-rw-r--r--fs/afs/inode.c4
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/btrfs/delayed-inode.c45
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/exec.c9
-rw-r--r--fs/super.c6
-rw-r--r--fs/userfaultfd.c20
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_qm.c46
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/acpi/actbl1.h159
-rw-r--r--include/acpi/actbl2.h15
-rw-r--r--include/acpi/actypes.h4
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/libnvdimm.h11
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/libc-compat.h61
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/bpf/arraymap.c47
-rw-r--r--kernel/bpf/core.c19
-rw-r--r--kernel/bpf/inode.c40
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c36
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c20
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/pid.c8
-rw-r--r--kernel/sched/completion.c5
-rw-r--r--kernel/sched/membarrier.c2
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/test_bpf.c11
-rw-r--r--mm/debug.c28
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/vmscan.c3
-rw-r--r--mm/zsmalloc.c1
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/bluetooth/l2cap_core.c20
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c10
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/ethtool.c15
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/ip6_fib.c73
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ip6_tunnel.c15
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/xt_bpf.c14
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sctp/input.c28
-rw-r--r--net/sctp/socket.c28
-rw-r--r--net/sctp/stream.c22
-rw-r--r--net/sctp/transport.c29
-rw-r--r--net/socket.c13
-rw-r--r--net/tipc/group.c22
-rw-r--r--net/wireless/nl80211.c3
-rw-r--r--scripts/genksyms/.gitignore1
-rw-r--r--scripts/kconfig/expr.c5
-rw-r--r--security/Kconfig3
-rw-r--r--security/apparmor/domain.c9
-rw-r--r--security/apparmor/include/perms.h3
-rw-r--r--security/apparmor/ipc.c53
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--security/commoncap.c21
-rw-r--r--sound/core/oss/pcm_oss.c41
-rw-r--r--sound/core/oss/pcm_plugin.c14
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/drivers/aloop.c98
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/check.c69
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c28
-rw-r--r--tools/testing/nvdimm/test/nfit.c14
-rw-r--r--tools/testing/selftests/bpf/test_align.c22
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c500
475 files changed, 6107 insertions, 1912 deletions
diff --git a/.mailmap b/.mailmap
index 1469ff0d3f4d..e18cab73e209 100644
--- a/.mailmap
+++ b/.mailmap
@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
107Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> 107Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
108Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> 108Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
109Mark Brown <broonie@sirena.org.uk> 109Mark Brown <broonie@sirena.org.uk>
110Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
110Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> 111Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
111Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> 112Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
112Matthieu CASTET <castet.matthieu@free.fr> 113Matthieu CASTET <castet.matthieu@free.fr>
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index d6d862db3b5d..bfd29bc8d37a 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -375,3 +375,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
375Description: information about CPUs heterogeneity. 375Description: information about CPUs heterogeneity.
376 376
377 cpu_capacity: capacity of cpu#. 377 cpu_capacity: capacity of cpu#.
378
379What: /sys/devices/system/cpu/vulnerabilities
380 /sys/devices/system/cpu/vulnerabilities/meltdown
381 /sys/devices/system/cpu/vulnerabilities/spectre_v1
382 /sys/devices/system/cpu/vulnerabilities/spectre_v2
383Date: January 2018
384Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
385Description: Information about CPU vulnerabilities
386
387 The files are named after the code names of CPU
388 vulnerabilities. The output of those files reflects the
389 state of the CPUs in the system. Possible output values:
390
391 "Not affected" CPU is not affected by the vulnerability
392 "Vulnerable" CPU is affected and no mitigation in effect
393 "Mitigation: $M" CPU is affected and mitigation $M is in effect
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index af7104aaffd9..02045e0b50a5 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -114,7 +114,6 @@
114 This facility can be used to prevent such uncontrolled 114 This facility can be used to prevent such uncontrolled
115 GPE floodings. 115 GPE floodings.
116 Format: <int> 116 Format: <int>
117 Support masking of GPEs numbered from 0x00 to 0x7f.
118 117
119 acpi_no_auto_serialize [HW,ACPI] 118 acpi_no_auto_serialize [HW,ACPI]
120 Disable auto-serialization of AML methods 119 Disable auto-serialization of AML methods
@@ -713,9 +712,6 @@
713 It will be ignored when crashkernel=X,high is not used 712 It will be ignored when crashkernel=X,high is not used
714 or memory reserved is below 4G. 713 or memory reserved is below 4G.
715 714
716 crossrelease_fullstack
717 [KNL] Allow to record full stack trace in cross-release
718
719 cryptomgr.notests 715 cryptomgr.notests
720 [KNL] Disable crypto self-tests 716 [KNL] Disable crypto self-tests
721 717
@@ -2626,6 +2622,11 @@
2626 nosmt [KNL,S390] Disable symmetric multithreading (SMT). 2622 nosmt [KNL,S390] Disable symmetric multithreading (SMT).
2627 Equivalent to smt=1. 2623 Equivalent to smt=1.
2628 2624
2625 nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
2626 (indirect branch prediction) vulnerability. System may
2627 allow data leaks with this option, which is equivalent
2628 to spectre_v2=off.
2629
2629 noxsave [BUGS=X86] Disables x86 extended register state save 2630 noxsave [BUGS=X86] Disables x86 extended register state save
2630 and restore using xsave. The kernel will fallback to 2631 and restore using xsave. The kernel will fallback to
2631 enabling legacy floating-point and sse state. 2632 enabling legacy floating-point and sse state.
@@ -2712,8 +2713,6 @@
2712 steal time is computed, but won't influence scheduler 2713 steal time is computed, but won't influence scheduler
2713 behaviour 2714 behaviour
2714 2715
2715 nopti [X86-64] Disable kernel page table isolation
2716
2717 nolapic [X86-32,APIC] Do not enable or use the local APIC. 2716 nolapic [X86-32,APIC] Do not enable or use the local APIC.
2718 2717
2719 nolapic_timer [X86-32,APIC] Do not use the local APIC timer. 2718 nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
@@ -3100,6 +3099,12 @@
3100 pcie_scan_all Scan all possible PCIe devices. Otherwise we 3099 pcie_scan_all Scan all possible PCIe devices. Otherwise we
3101 only look for one device below a PCIe downstream 3100 only look for one device below a PCIe downstream
3102 port. 3101 port.
3102 big_root_window Try to add a big 64bit memory window to the PCIe
3103 root complex on AMD CPUs. Some GFX hardware
3104 can resize a BAR to allow access to all VRAM.
3105 Adding the window is slightly risky (it may
3106 conflict with unreported devices), so this
3107 taints the kernel.
3103 3108
3104 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power 3109 pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
3105 Management. 3110 Management.
@@ -3288,11 +3293,20 @@
3288 pt. [PARIDE] 3293 pt. [PARIDE]
3289 See Documentation/blockdev/paride.txt. 3294 See Documentation/blockdev/paride.txt.
3290 3295
3291 pti= [X86_64] 3296 pti= [X86_64] Control Page Table Isolation of user and
3292 Control user/kernel address space isolation: 3297 kernel address spaces. Disabling this feature
3293 on - enable 3298 removes hardening, but improves performance of
3294 off - disable 3299 system calls and interrupts.
3295 auto - default setting 3300
3301 on - unconditionally enable
3302 off - unconditionally disable
3303 auto - kernel detects whether your CPU model is
3304 vulnerable to issues that PTI mitigates
3305
3306 Not specifying this option is equivalent to pti=auto.
3307
3308 nopti [X86_64]
3309 Equivalent to pti=off
3296 3310
3297 pty.legacy_count= 3311 pty.legacy_count=
3298 [KNL] Number of legacy pty's. Overwrites compiled-in 3312 [KNL] Number of legacy pty's. Overwrites compiled-in
@@ -3943,6 +3957,29 @@
3943 sonypi.*= [HW] Sony Programmable I/O Control Device driver 3957 sonypi.*= [HW] Sony Programmable I/O Control Device driver
3944 See Documentation/laptops/sonypi.txt 3958 See Documentation/laptops/sonypi.txt
3945 3959
3960 spectre_v2= [X86] Control mitigation of Spectre variant 2
3961 (indirect branch speculation) vulnerability.
3962
3963 on - unconditionally enable
3964 off - unconditionally disable
3965 auto - kernel detects whether your CPU model is
3966 vulnerable
3967
3968 Selecting 'on' will, and 'auto' may, choose a
3969 mitigation method at run time according to the
3970 CPU, the available microcode, the setting of the
3971 CONFIG_RETPOLINE configuration option, and the
3972 compiler with which the kernel was built.
3973
3974 Specific mitigations can also be selected manually:
3975
3976 retpoline - replace indirect branches
3977 retpoline,generic - google's original retpoline
3978 retpoline,amd - AMD-specific minimal thunk
3979
3980 Not specifying this option is equivalent to
3981 spectre_v2=auto.
3982
3946 spia_io_base= [HW,MTD] 3983 spia_io_base= [HW,MTD]
3947 spia_fio_base= 3984 spia_fio_base=
3948 spia_pedr= 3985 spia_pedr=
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index c0727dc36271..f2f3f8592a6f 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -25,8 +25,8 @@ available from the following download page. At least "mkfs.nilfs2",
25cleaner or garbage collector) are required. Details on the tools are 25cleaner or garbage collector) are required. Details on the tools are
26described in the man pages included in the package. 26described in the man pages included in the package.
27 27
28Project web page: http://nilfs.sourceforge.net/ 28Project web page: https://nilfs.sourceforge.io/
29Download page: http://nilfs.sourceforge.net/en/download.html 29Download page: https://nilfs.sourceforge.io/en/download.html
30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs 30List info: http://vger.kernel.org/vger-lists.html#linux-nilfs
31 31
32Caveats 32Caveats
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 2e7ee0313c1c..e94d3ac2bdd0 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -341,10 +341,7 @@ GuC
341GuC-specific firmware loader 341GuC-specific firmware loader
342---------------------------- 342----------------------------
343 343
344.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c 344.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
345 :doc: GuC-specific firmware loader
346
347.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
348 :internal: 345 :internal:
349 346
350GuC-based command submission 347GuC-based command submission
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 262722d8867b..c4a293a03c33 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -200,10 +200,14 @@ module state. Dependency expressions have the following syntax:
200<expr> ::= <symbol> (1) 200<expr> ::= <symbol> (1)
201 <symbol> '=' <symbol> (2) 201 <symbol> '=' <symbol> (2)
202 <symbol> '!=' <symbol> (3) 202 <symbol> '!=' <symbol> (3)
203 '(' <expr> ')' (4) 203 <symbol1> '<' <symbol2> (4)
204 '!' <expr> (5) 204 <symbol1> '>' <symbol2> (4)
205 <expr> '&&' <expr> (6) 205 <symbol1> '<=' <symbol2> (4)
206 <expr> '||' <expr> (7) 206 <symbol1> '>=' <symbol2> (4)
207 '(' <expr> ')' (5)
208 '!' <expr> (6)
209 <expr> '&&' <expr> (7)
210 <expr> '||' <expr> (8)
207 211
208Expressions are listed in decreasing order of precedence. 212Expressions are listed in decreasing order of precedence.
209 213
@@ -214,10 +218,13 @@ Expressions are listed in decreasing order of precedence.
214 otherwise 'n'. 218 otherwise 'n'.
215(3) If the values of both symbols are equal, it returns 'n', 219(3) If the values of both symbols are equal, it returns 'n',
216 otherwise 'y'. 220 otherwise 'y'.
217(4) Returns the value of the expression. Used to override precedence. 221(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
218(5) Returns the result of (2-/expr/). 222 or greater-or-equal than value of <symbol2>, it returns 'y',
219(6) Returns the result of min(/expr/, /expr/). 223 otherwise 'n'.
220(7) Returns the result of max(/expr/, /expr/). 224(5) Returns the value of the expression. Used to override precedence.
225(6) Returns the result of (2-/expr/).
226(7) Returns the result of min(/expr/, /expr/).
227(8) Returns the result of max(/expr/, /expr/).
221 228
222An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2 229An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
223respectively for calculations). A menu entry becomes visible when its 230respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 66e620866245..7d4b15977d61 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 batman-adv 9 batman-adv
10 kapi 10 kapi
11 z8530book 11 z8530book
12 msg_zerocopy
12 13
13.. only:: subproject 14.. only:: subproject
14 15
@@ -16,4 +17,3 @@ Contents:
16 ======= 17 =======
17 18
18 * :ref:`genindex` 19 * :ref:`genindex`
19
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 77f6d7e25cfd..291a01264967 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one))) 72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
73 error(1, errno, "setsockopt zerocopy"); 73 error(1, errno, "setsockopt zerocopy");
74 74
75Setting the socket option only works when the socket is in its initial
76(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
77for example, will lead to an EBUSY error. In this case, the option should be set
78to the listening socket and it will be inherited by the accepted sockets.
75 79
76Transmission 80Transmission
77------------ 81------------
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 441a4b9b666f..5908a21fddb6 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value
693in each line. The rules stated above are best illustrated with an example: 693in each line. The rules stated above are best illustrated with an example:
694 694
695# mkdir functions/uvc.usb0/control/header/h 695# mkdir functions/uvc.usb0/control/header/h
696# cd functions/uvc.usb0/control/header/h 696# cd functions/uvc.usb0/control/
697# ln -s header/h class/fs 697# ln -s header/h class/fs
698# ln -s header/h class/ss 698# ln -s header/h class/ss
699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p 699# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 000000000000..d11eff61fc9a
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
1Overview
2========
3
4Page Table Isolation (pti, previously known as KAISER[1]) is a
5countermeasure against attacks on the shared user/kernel address
6space such as the "Meltdown" approach[2].
7
8To mitigate this class of attacks, we create an independent set of
9page tables for use only when running userspace applications. When
10the kernel is entered via syscalls, interrupts or exceptions, the
11page tables are switched to the full "kernel" copy. When the system
12switches back to user mode, the user copy is used again.
13
14The userspace page tables contain only a minimal amount of kernel
15data: only what is needed to enter/exit the kernel such as the
16entry/exit functions themselves and the interrupt descriptor table
17(IDT). There are a few strictly unnecessary things that get mapped
18such as the first C function when entering an interrupt (see
19comments in pti.c).
20
21This approach helps to ensure that side-channel attacks leveraging
22the paging structures do not function when PTI is enabled. It can be
23enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
24Once enabled at compile-time, it can be disabled at boot with the
25'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
26
27Page Table Management
28=====================
29
30When PTI is enabled, the kernel manages two sets of page tables.
31The first set is very similar to the single set which is present in
32kernels without PTI. This includes a complete mapping of userspace
33that the kernel can use for things like copy_to_user().
34
35Although _complete_, the user portion of the kernel page tables is
36crippled by setting the NX bit in the top level. This ensures
37that any missed kernel->user CR3 switch will immediately crash
38userspace upon executing its first instruction.
39
40The userspace page tables map only the kernel data needed to enter
41and exit the kernel. This data is entirely contained in the 'struct
42cpu_entry_area' structure which is placed in the fixmap which gives
43each CPU's copy of the area a compile-time-fixed virtual address.
44
45For new userspace mappings, the kernel makes the entries in its
46page tables like normal. The only difference is when the kernel
47makes entries in the top (PGD) level. In addition to setting the
48entry in the main kernel PGD, a copy of the entry is made in the
49userspace page tables' PGD.
50
51This sharing at the PGD level also inherently shares all the lower
52layers of the page tables. This leaves a single, shared set of
53userspace page tables to manage. One PTE to lock, one set of
54accessed bits, dirty bits, etc...
55
56Overhead
57========
58
59Protection against side-channel attacks is important. But,
60this protection comes at a cost:
61
621. Increased Memory Use
63 a. Each process now needs an order-1 PGD instead of order-0.
64 (Consumes an additional 4k per process).
65 b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
66 aligned so that it can be mapped by setting a single PMD
67 entry. This consumes nearly 2MB of RAM once the kernel
68 is decompressed, but no space in the kernel image itself.
69
702. Runtime Cost
71 a. CR3 manipulation to switch between the page table copies
72 must be done at interrupt, syscall, and exception entry
73 and exit (it can be skipped when the kernel is interrupted,
74 though.) Moves to CR3 are on the order of a hundred
75 cycles, and are required at every entry and exit.
76 b. A "trampoline" must be used for SYSCALL entry. This
77 trampoline depends on a smaller set of resources than the
78 non-PTI SYSCALL entry code, so requires mapping fewer
79 things into the userspace page tables. The downside is
80 that stacks must be switched at entry time.
81 d. Global pages are disabled for all kernel structures not
82 mapped into both kernel and userspace page tables. This
83 feature of the MMU allows different processes to share TLB
84 entries mapping the kernel. Losing the feature means more
85 TLB misses after a context switch. The actual loss of
86 performance is very small, however, never exceeding 1%.
87 d. Process Context IDentifiers (PCID) is a CPU feature that
88 allows us to skip flushing the entire TLB when switching page
89 tables by setting a special bit in CR3 when the page tables
90 are changed. This makes switching the page tables (at context
91 switch, or kernel entry/exit) cheaper. But, on systems with
92 PCID support, the context switch code must flush both the user
93 and kernel entries out of the TLB. The user PCID TLB flush is
94 deferred until the exit to userspace, minimizing the cost.
95 See intel.com/sdm for the gory PCID/INVPCID details.
96 e. The userspace page tables must be populated for each new
97 process. Even without PTI, the shared kernel mappings
98 are created by copying top-level (PGD) entries into each
99 new process. But, with PTI, there are now *two* kernel
100 mappings: one in the kernel page tables that maps everything
101 and one for the entry/exit structures. At fork(), we need to
102 copy both.
103 f. In addition to the fork()-time copying, there must also
104 be an update to the userspace PGD any time a set_pgd() is done
105 on a PGD used to map userspace. This ensures that the kernel
106 and userspace copies always map the same userspace
107 memory.
108 g. On systems without PCID support, each CR3 write flushes
109 the entire TLB. That means that each syscall, interrupt
110 or exception flushes the TLB.
111 h. INVPCID is a TLB-flushing instruction which allows flushing
112 of TLB entries for non-current PCIDs. Some systems support
113 PCIDs, but do not support INVPCID. On these systems, addresses
114 can only be flushed from the TLB for the current PCID. When
115 flushing a kernel address, we need to flush all PCIDs, so a
116 single kernel address flush will require a TLB-flushing CR3
117 write upon the next use of every PCID.
118
119Possible Future Work
120====================
1211. We can be more careful about not actually writing to CR3
122 unless its value is actually changed.
1232. Allow PTI to be enabled/disabled at runtime in addition to the
124 boot-time switching.
125
126Testing
127========
128
129To test stability of PTI, the following test procedure is recommended,
130ideally doing all of these in parallel:
131
1321. Set CONFIG_DEBUG_ENTRY=y
1332. Run several copies of all of the tools/testing/selftests/x86/ tests
134 (excluding MPX and protection_keys) in a loop on multiple CPUs for
135 several minutes. These tests frequently uncover corner cases in the
136 kernel entry code. In general, old kernels might cause these tests
137 themselves to crash, but they should never crash the kernel.
1383. Run the 'perf' tool in a mode (top or record) that generates many
139 frequent performance monitoring non-maskable interrupts (see "NMI"
140 in /proc/interrupts). This exercises the NMI entry/exit code which
141 is known to trigger bugs in code paths that did not expect to be
142 interrupted, including nested NMIs. Using "-c" boosts the rate of
143 NMIs, and using two -c with separate counters encourages nested NMIs
144 and less deterministic behavior.
145
146 while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
147
1484. Launch a KVM virtual machine.
1495. Run 32-bit binaries on systems supporting the SYSCALL instruction.
150 This has been a lightly-tested code path and needs extra scrutiny.
151
152Debugging
153=========
154
155Bugs in PTI cause a few different signatures of crashes
156that are worth noting here.
157
158 * Failures of the selftests/x86 code. Usually a bug in one of the
159 more obscure corners of entry_64.S
160 * Crashes in early boot, especially around CPU bringup. Bugs
161 in the trampoline code or mappings cause these.
162 * Crashes at the first interrupt. Caused by bugs in entry_64.S,
163 like screwing up a page table switch. Also caused by
164 incorrectly mapping the IRQ handler entry code.
165 * Crashes at the first NMI. The NMI code is separate from main
166 interrupt handlers and can have bugs that do not affect
167 normal interrupts. Also caused by incorrectly mapping NMI
168 code. NMIs that interrupt the entry code must be very
169 careful and can be the cause of crashes that show up when
170 running perf.
171 * Kernel crashes at the first exit to userspace. entry_64.S
172 bugs, or failing to map some of the exit code.
173 * Crashes at first interrupt that interrupts userspace. The paths
174 in entry_64.S that return to userspace are sometimes separate
175 from the ones that return to the kernel.
176 * Double faults: overflowing the kernel stack because of page
177 faults upon page faults. Caused by touching non-pti-mapped
178 data in the entry code, or forgetting to switch to kernel
179 CR3 before calling into C functions which are not pti-mapped.
180 * Userspace segfaults early in boot, sometimes manifesting
181 as mount(8) failing to mount the rootfs. These have
182 tended to be TLB invalidation issues. Usually invalidating
183 the wrong PCID, or otherwise missing an invalidation.
184
1851. https://gruss.cc/files/kaiser.pdf
1862. https://meltdownattack.com/meltdown.pdf
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index ad41b3813f0a..ea91cb61a602 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
12... unused hole ... 12... unused hole ...
13ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 13ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
14... unused hole ... 14... unused hole ...
15fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI 15 vaddr_end for KASLR
16fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 16fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
17fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
17ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 18ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
18... unused hole ... 19... unused hole ...
19ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space 20ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
@@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
37... unused hole ... 38... unused hole ...
38ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 39ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
39... unused hole ... 40... unused hole ...
40fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 41 vaddr_end for KASLR
42fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
43... unused hole ...
41ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 44ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
42... unused hole ... 45... unused hole ...
43ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space 46ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
44... unused hole ... 47... unused hole ...
45ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 48ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
46ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space 49ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
47[fixmap start] - ffffffffff5fffff kernel-internal fixmap range 50[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
48ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI 51ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
49ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole 52ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
@@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised later if needed).
67The mappings are not part of any other kernel PGD and are only available 70The mappings are not part of any other kernel PGD and are only available
68during EFI runtime calls. 71during EFI runtime calls.
69 72
70The module mapping space size changes based on the CONFIG requirements for the
71following fixmap section.
72
73Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all 73Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
74physical memory, vmalloc/ioremap space and virtual memory map are randomized. 74physical memory, vmalloc/ioremap space and virtual memory map are randomized.
75Their order is preserved but their base will be offset early at boot time. 75Their order is preserved but their base will be offset early at boot time.
76
77Be very careful vs. KASLR when changing anything here. The KASLR address
78range must not overlap with anything except the KASAN shadow area, which is
79correct as KASAN disables KASLR.
diff --git a/MAINTAINERS b/MAINTAINERS
index b46c9cea5ae5..4c1cdc399a22 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -321,7 +321,7 @@ F: drivers/acpi/apei/
321 321
322ACPI COMPONENT ARCHITECTURE (ACPICA) 322ACPI COMPONENT ARCHITECTURE (ACPICA)
323M: Robert Moore <robert.moore@intel.com> 323M: Robert Moore <robert.moore@intel.com>
324M: Lv Zheng <lv.zheng@intel.com> 324M: Erik Schmauss <erik.schmauss@intel.com>
325M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> 325M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
326L: linux-acpi@vger.kernel.org 326L: linux-acpi@vger.kernel.org
327L: devel@acpica.org 327L: devel@acpica.org
@@ -5149,15 +5149,15 @@ F: sound/usb/misc/ua101.c
5149EFI TEST DRIVER 5149EFI TEST DRIVER
5150L: linux-efi@vger.kernel.org 5150L: linux-efi@vger.kernel.org
5151M: Ivan Hu <ivan.hu@canonical.com> 5151M: Ivan Hu <ivan.hu@canonical.com>
5152M: Matt Fleming <matt@codeblueprint.co.uk> 5152M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5153S: Maintained 5153S: Maintained
5154F: drivers/firmware/efi/test/ 5154F: drivers/firmware/efi/test/
5155 5155
5156EFI VARIABLE FILESYSTEM 5156EFI VARIABLE FILESYSTEM
5157M: Matthew Garrett <matthew.garrett@nebula.com> 5157M: Matthew Garrett <matthew.garrett@nebula.com>
5158M: Jeremy Kerr <jk@ozlabs.org> 5158M: Jeremy Kerr <jk@ozlabs.org>
5159M: Matt Fleming <matt@codeblueprint.co.uk> 5159M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5160T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 5160T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
5161L: linux-efi@vger.kernel.org 5161L: linux-efi@vger.kernel.org
5162S: Maintained 5162S: Maintained
5163F: fs/efivarfs/ 5163F: fs/efivarfs/
@@ -5318,7 +5318,6 @@ S: Supported
5318F: security/integrity/evm/ 5318F: security/integrity/evm/
5319 5319
5320EXTENSIBLE FIRMWARE INTERFACE (EFI) 5320EXTENSIBLE FIRMWARE INTERFACE (EFI)
5321M: Matt Fleming <matt@codeblueprint.co.uk>
5322M: Ard Biesheuvel <ard.biesheuvel@linaro.org> 5321M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
5323L: linux-efi@vger.kernel.org 5322L: linux-efi@vger.kernel.org
5324T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git 5323T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
@@ -9639,8 +9638,8 @@ F: include/uapi/linux/sunrpc/
9639NILFS2 FILESYSTEM 9638NILFS2 FILESYSTEM
9640M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> 9639M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
9641L: linux-nilfs@vger.kernel.org 9640L: linux-nilfs@vger.kernel.org
9642W: http://nilfs.sourceforge.net/ 9641W: https://nilfs.sourceforge.io/
9643W: http://nilfs.osdn.jp/ 9642W: https://nilfs.osdn.jp/
9644T: git git://github.com/konis/nilfs2.git 9643T: git git://github.com/konis/nilfs2.git
9645S: Supported 9644S: Supported
9646F: Documentation/filesystems/nilfs2.txt 9645F: Documentation/filesystems/nilfs2.txt
@@ -10135,7 +10134,7 @@ F: drivers/irqchip/irq-ompic.c
10135F: drivers/irqchip/irq-or1k-* 10134F: drivers/irqchip/irq-or1k-*
10136 10135
10137OPENVSWITCH 10136OPENVSWITCH
10138M: Pravin Shelar <pshelar@nicira.com> 10137M: Pravin B Shelar <pshelar@ovn.org>
10139L: netdev@vger.kernel.org 10138L: netdev@vger.kernel.org
10140L: dev@openvswitch.org 10139L: dev@openvswitch.org
10141W: http://openvswitch.org 10140W: http://openvswitch.org
diff --git a/Makefile b/Makefile
index eb1f5973813e..bf5b8cbb9469 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 15 3PATCHLEVEL = 15
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc8
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -484,26 +484,6 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
484endif 484endif
485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 485KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 486KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
487KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
488KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
489KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
490KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
491KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
492# Quiet clang warning: comparison of unsigned expression < 0 is always false
493KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
494# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
495# source of a reference will be _MergedGlobals and not on of the whitelisted names.
496# See modpost pattern 2
497KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
498KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
499KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
500KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
501else
502
503# These warnings generated too much noise in a regular build.
504# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
505KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
506KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
507endif 487endif
508 488
509ifeq ($(config-targets),1) 489ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR
716endif 696endif
717KBUILD_CFLAGS += $(stackp-flag) 697KBUILD_CFLAGS += $(stackp-flag)
718 698
699ifeq ($(cc-name),clang)
700KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
701KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
702KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
703KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
704KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
705# Quiet clang warning: comparison of unsigned expression < 0 is always false
706KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
707# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
708# source of a reference will be _MergedGlobals and not on of the whitelisted names.
709# See modpost pattern 2
710KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
711KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
712KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
713KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
714else
715
716# These warnings generated too much noise in a regular build.
717# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
718KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
719KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
720endif
721
719ifdef CONFIG_FRAME_POINTER 722ifdef CONFIG_FRAME_POINTER
720KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 723KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
721else 724else
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 4e6e9f57e790..dc91c663bcc0 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 90MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <90000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 63954a8b0100..69ff4895f2ba 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -35,6 +35,14 @@
35 reg = <0x80 0x10>, <0x100 0x10>; 35 reg = <0x80 0x10>, <0x100 0x10>;
36 #clock-cells = <0>; 36 #clock-cells = <0>;
37 clocks = <&input_clk>; 37 clocks = <&input_clk>;
38
39 /*
40 * Set initial core pll output frequency to 100MHz.
41 * It will be applied at the core pll driver probing
42 * on early boot.
43 */
44 assigned-clocks = <&core_clk>;
45 assigned-clock-rates = <100000000>;
38 }; 46 };
39 47
40 core_intc: archs-intc@cpu { 48 core_intc: archs-intc@cpu {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8f627c200d60..006aa3de5348 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -114,6 +114,14 @@
114 reg = <0x00 0x10>, <0x14B8 0x4>; 114 reg = <0x00 0x10>, <0x14B8 0x4>;
115 #clock-cells = <0>; 115 #clock-cells = <0>;
116 clocks = <&input_clk>; 116 clocks = <&input_clk>;
117
118 /*
119 * Set initial core pll output frequency to 1GHz.
120 * It will be applied at the core pll driver probing
121 * on early boot.
122 */
123 assigned-clocks = <&core_clk>;
124 assigned-clock-rates = <1000000000>;
117 }; 125 };
118 126
119 serial: serial@5000 { 127 serial: serial@5000 {
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 7b8f8faf8a24..ac6b0ed8341e 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y
49CONFIG_SERIAL_OF_PLATFORM=y 49CONFIG_SERIAL_OF_PLATFORM=y
50# CONFIG_HW_RANDOM is not set 50# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
52CONFIG_DRM=y
53# CONFIG_DRM_FBDEV_EMULATION is not set
54CONFIG_DRM_UDL=y
52CONFIG_FB=y 55CONFIG_FB=y
53CONFIG_FB_UDL=y
54CONFIG_FRAMEBUFFER_CONSOLE=y 56CONFIG_FRAMEBUFFER_CONSOLE=y
55CONFIG_USB=y
56CONFIG_USB_EHCI_HCD=y 57CONFIG_USB_EHCI_HCD=y
57CONFIG_USB_EHCI_HCD_PLATFORM=y 58CONFIG_USB_EHCI_HCD_PLATFORM=y
58CONFIG_USB_OHCI_HCD=y 59CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index f35974ee7264..c9173c02081c 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
668 return 0; 668 return 0;
669 669
670 __asm__ __volatile__( 670 __asm__ __volatile__(
671 " mov lp_count, %5 \n"
671 " lp 3f \n" 672 " lp 3f \n"
672 "1: ldb.ab %3, [%2, 1] \n" 673 "1: ldb.ab %3, [%2, 1] \n"
673 " breq.d %3, 0, 3f \n" 674 " breq.d %3, 0, 3f \n"
@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
684 " .word 1b, 4b \n" 685 " .word 1b, 4b \n"
685 " .previous \n" 686 " .previous \n"
686 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) 687 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
687 : "g"(-EFAULT), "l"(count) 688 : "g"(-EFAULT), "r"(count)
688 : "memory"); 689 : "lp_count", "lp_start", "lp_end", "memory");
689 690
690 return res; 691 return res;
691} 692}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7ef7d9a8ff89..9d27331fe69a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
199 unsigned int exec_ctrl; 199 unsigned int exec_ctrl;
200 200
201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
202 cpu->extn.dual_enb = exec_ctrl & 1; 202 cpu->extn.dual_enb = !(exec_ctrl & 1);
203 203
204 /* dual issue always present for this core */ 204 /* dual issue always present for this core */
205 cpu->extn.dual = 1; 205 cpu->extn.dual = 1;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 74315f302971..bf40e06f3fb8 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
163 */ 163 */
164static int __print_sym(unsigned int address, void *unused) 164static int __print_sym(unsigned int address, void *unused)
165{ 165{
166 __print_symbol(" %s\n", address); 166 printk(" %pS\n", (void *)address);
167 return 0; 167 return 0;
168} 168}
169 169
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index bcd7c9fc5d0f..133a4dae41fe 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR) 83DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) 84DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) 85DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
86DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
86 87
87/* 88/*
88 * Entry Point for Misaligned Data access Exception, for emulating in software 89 * Entry Point for Misaligned Data access Exception, for emulating in software
@@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
115 * Thus TRAP_S <n> can be used for specific purpose 116 * Thus TRAP_S <n> can be used for specific purpose
116 * -1 used for software breakpointing (gdb) 117 * -1 used for software breakpointing (gdb)
117 * -2 used by kprobes 118 * -2 used by kprobes
119 * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
120 * -fno-isolate-erroneous-paths-dereference
118 */ 121 */
119void do_non_swi_trap(unsigned long address, struct pt_regs *regs) 122void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
120{ 123{
@@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
134 kgdb_trap(regs); 137 kgdb_trap(regs);
135 break; 138 break;
136 139
140 case 5:
141 do_trap5_error(address, regs);
142 break;
137 default: 143 default:
138 break; 144 break;
139 } 145 }
@@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
155 161
156 insterror_is_error(address, regs); 162 insterror_is_error(address, regs);
157} 163}
164
165/*
166 * abort() call generated by older gcc for __builtin_trap()
167 */
168void abort(void)
169{
170 __asm__ __volatile__("trap_s 5\n");
171}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7d8c1d6c2f60..6e9a0a9a6a04 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
163 else 163 else
164 pr_cont("Bus Error, check PRM\n"); 164 pr_cont("Bus Error, check PRM\n");
165#endif 165#endif
166 } else if (vec == ECR_V_TRAP) {
167 if (regs->ecr_param == 5)
168 pr_cont("gcc generated __builtin_trap\n");
166 } else { 169 } else {
167 pr_cont("Check Programmer's Manual\n"); 170 pr_cont("Check Programmer's Manual\n");
168 } 171 }
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..46544e88492d 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack 317 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
318 * of fudging the freq in DT 318 * of fudging the freq in DT
319 */ 319 */
320#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
321
320 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; 322 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
321 if (num_cores > 2) { 323 if (num_cores > 2) {
322 u32 freq = 50, orig; 324 u32 freq;
323 /*
324 * TODO: use cpu node "cpu-freq" param instead of platform-specific
325 * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
326 */
327 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); 325 int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
328 const struct fdt_property *prop; 326 const struct fdt_property *prop;
329 327
330 prop = fdt_get_property(initial_boot_params, off, 328 prop = fdt_get_property(initial_boot_params, off,
331 "clock-frequency", NULL); 329 "assigned-clock-rates", NULL);
332 orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000; 330 freq = be32_to_cpu(*(u32 *)(prop->data));
333 331
334 /* Patching .dtb in-place with new core clock value */ 332 /* Patching .dtb in-place with new core clock value */
335 if (freq != orig ) { 333 if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
336 freq = cpu_to_be32(freq * 1000000); 334 freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
337 fdt_setprop_inplace(initial_boot_params, off, 335 fdt_setprop_inplace(initial_boot_params, off,
338 "clock-frequency", &freq, sizeof(freq)); 336 "assigned-clock-rates", &freq, sizeof(freq));
339 } 337 }
340 } 338 }
341#endif 339#endif
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index fd0ae5e38639..2958aedb649a 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
38#define CREG_PAE (CREG_BASE + 0x180) 38#define CREG_PAE (CREG_BASE + 0x180)
39#define CREG_PAE_UPDATE (CREG_BASE + 0x194) 39#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
40 40
41#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
42#define CREG_CORE_IF_CLK_DIV_2 0x1
43#define CGU_BASE ARC_PERIPHERAL_BASE
44#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
45#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
46#define CGU_PLL_STATUS_LOCK BIT(0)
47#define CGU_PLL_STATUS_ERR BIT(1)
48#define CGU_PLL_CTRL_1GHZ 0x3A10
49#define HSDK_PLL_LOCK_TIMEOUT 500
50
51#define HSDK_PLL_LOCKED() \
52 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
53
54#define HSDK_PLL_ERR() \
55 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
56
57static void __init hsdk_set_cpu_freq_1ghz(void)
58{
59 u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
60
61 /*
62 * As we set cpu clock which exceeds 500MHz, the divider for the interface
63 * clock must be programmed to div-by-2.
64 */
65 iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
66
67 /* Set cpu clock to 1GHz */
68 iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
69
70 while (!HSDK_PLL_LOCKED() && timeout--)
71 cpu_relax();
72
73 if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
74 pr_err("Failed to setup CPU frequency to 1GHz!");
75}
76
77#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) 41#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
78#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) 42#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
79#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) 43#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
@@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
98 * minimum possible div-by-2. 62 * minimum possible div-by-2.
99 */ 63 */
100 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); 64 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
101
102 /*
103 * Setup CPU frequency to 1GHz.
104 * TODO: remove it after smart hsdk pll driver will be introduced.
105 */
106 hsdk_set_cpu_freq_1ghz();
107} 65}
108 66
109static const char *hsdk_compat[] __initconst = { 67static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 45d815a86d42..de08d9045cb8 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -219,7 +219,7 @@
219 compatible = "aspeed,ast2400-vuart"; 219 compatible = "aspeed,ast2400-vuart";
220 reg = <0x1e787000 0x40>; 220 reg = <0x1e787000 0x40>;
221 reg-shift = <2>; 221 reg-shift = <2>;
222 interrupts = <10>; 222 interrupts = <8>;
223 clocks = <&clk_uart>; 223 clocks = <&clk_uart>;
224 no-loopback-test; 224 no-loopback-test;
225 status = "disabled"; 225 status = "disabled";
diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
index 5f29010cdbd8..9b82cc8843e1 100644
--- a/arch/arm/boot/dts/at91-tse850-3.dts
+++ b/arch/arm/boot/dts/at91-tse850-3.dts
@@ -221,6 +221,7 @@
221 jc42@18 { 221 jc42@18 {
222 compatible = "nxp,se97b", "jedec,jc-42.4-temp"; 222 compatible = "nxp,se97b", "jedec,jc-42.4-temp";
223 reg = <0x18>; 223 reg = <0x18>;
224 smbus-timeout-disable;
224 }; 225 };
225 226
226 dpot: mcp4651-104@28 { 227 dpot: mcp4651-104@28 {
diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
index 413dbd5d9f64..81942ae83e1f 100644
--- a/arch/arm/boot/dts/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/da850-lego-ev3.dts
@@ -178,7 +178,7 @@
178 */ 178 */
179 battery { 179 battery {
180 pinctrl-names = "default"; 180 pinctrl-names = "default";
181 pintctrl-0 = <&battery_pins>; 181 pinctrl-0 = <&battery_pins>;
182 compatible = "lego,ev3-battery"; 182 compatible = "lego,ev3-battery";
183 io-channels = <&adc 4>, <&adc 3>; 183 io-channels = <&adc 4>, <&adc 3>;
184 io-channel-names = "voltage", "current"; 184 io-channel-names = "voltage", "current";
@@ -392,7 +392,7 @@
392 batt_volt_en { 392 batt_volt_en {
393 gpio-hog; 393 gpio-hog;
394 gpios = <6 GPIO_ACTIVE_HIGH>; 394 gpios = <6 GPIO_ACTIVE_HIGH>;
395 output-low; 395 output-high;
396 }; 396 };
397}; 397};
398 398
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index b2b95ff205e8..0029ec27819c 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -664,6 +664,10 @@
664 status = "okay"; 664 status = "okay";
665}; 665};
666 666
667&mixer {
668 status = "okay";
669};
670
667/* eMMC flash */ 671/* eMMC flash */
668&mmc_0 { 672&mmc_0 {
669 status = "okay"; 673 status = "okay";
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 940875316d0f..67b4de0e3439 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -215,7 +215,7 @@
215 reg = <0x2a>; 215 reg = <0x2a>;
216 VDDA-supply = <&reg_3p3v>; 216 VDDA-supply = <&reg_3p3v>;
217 VDDIO-supply = <&reg_3p3v>; 217 VDDIO-supply = <&reg_3p3v>;
218 clocks = <&sys_mclk 1>; 218 clocks = <&sys_mclk>;
219 }; 219 };
220 }; 220 };
221 }; 221 };
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index a8b148ad1dd2..44715c8ef756 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -187,7 +187,7 @@
187 reg = <0x0a>; 187 reg = <0x0a>;
188 VDDA-supply = <&reg_3p3v>; 188 VDDA-supply = <&reg_3p3v>;
189 VDDIO-supply = <&reg_3p3v>; 189 VDDIO-supply = <&reg_3p3v>;
190 clocks = <&sys_mclk 1>; 190 clocks = <&sys_mclk>;
191 }; 191 };
192}; 192};
193 193
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index c6d92c25df42..d23ee6d911ac 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -83,6 +83,10 @@
83 }; 83 };
84}; 84};
85 85
86&cpu0 {
87 cpu0-supply = <&vdd_arm>;
88};
89
86&i2c1 { 90&i2c1 {
87 status = "okay"; 91 status = "okay";
88 clock-frequency = <400000>; 92 clock-frequency = <400000>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index cd24894ee5c6..6102e4e7f35c 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -956,7 +956,7 @@
956 iep_mmu: iommu@ff900800 { 956 iep_mmu: iommu@ff900800 {
957 compatible = "rockchip,iommu"; 957 compatible = "rockchip,iommu";
958 reg = <0x0 0xff900800 0x0 0x40>; 958 reg = <0x0 0xff900800 0x0 0x40>;
959 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; 959 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
960 interrupt-names = "iep_mmu"; 960 interrupt-names = "iep_mmu";
961 #iommu-cells = <0>; 961 #iommu-cells = <0>;
962 status = "disabled"; 962 status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index b91300d49a31..5840f5c75c3b 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -502,8 +502,8 @@
502 reg = <0x01c16000 0x1000>; 502 reg = <0x01c16000 0x1000>;
503 interrupts = <58>; 503 interrupts = <58>;
504 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, 504 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
505 <&ccu 9>, 505 <&ccu CLK_PLL_VIDEO0_2X>,
506 <&ccu 18>; 506 <&ccu CLK_PLL_VIDEO1_2X>;
507 clock-names = "ahb", "mod", "pll-0", "pll-1"; 507 clock-names = "ahb", "mod", "pll-0", "pll-1";
508 dmas = <&dma SUN4I_DMA_NORMAL 16>, 508 dmas = <&dma SUN4I_DMA_NORMAL 16>,
509 <&dma SUN4I_DMA_NORMAL 16>, 509 <&dma SUN4I_DMA_NORMAL 16>,
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 6ae4d95e230e..316cb8b2945b 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -82,8 +82,8 @@
82 reg = <0x01c16000 0x1000>; 82 reg = <0x01c16000 0x1000>;
83 interrupts = <58>; 83 interrupts = <58>;
84 clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>, 84 clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
85 <&ccu 9>, 85 <&ccu CLK_PLL_VIDEO0_2X>,
86 <&ccu 16>; 86 <&ccu CLK_PLL_VIDEO1_2X>;
87 clock-names = "ahb", "mod", "pll-0", "pll-1"; 87 clock-names = "ahb", "mod", "pll-0", "pll-1";
88 dmas = <&dma SUN4I_DMA_NORMAL 16>, 88 dmas = <&dma SUN4I_DMA_NORMAL 16>,
89 <&dma SUN4I_DMA_NORMAL 16>, 89 <&dma SUN4I_DMA_NORMAL 16>,
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 8bfa12b548e0..72d3fe44ecaf 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -429,8 +429,8 @@
429 interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>; 429 interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
430 clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>, 430 clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>,
431 <&ccu CLK_HDMI_DDC>, 431 <&ccu CLK_HDMI_DDC>,
432 <&ccu 7>, 432 <&ccu CLK_PLL_VIDEO0_2X>,
433 <&ccu 13>; 433 <&ccu CLK_PLL_VIDEO1_2X>;
434 clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1"; 434 clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1";
435 resets = <&ccu RST_AHB1_HDMI>; 435 resets = <&ccu RST_AHB1_HDMI>;
436 reset-names = "ahb"; 436 reset-names = "ahb";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 68dfa82544fc..59655e42e4b0 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -581,8 +581,8 @@
581 reg = <0x01c16000 0x1000>; 581 reg = <0x01c16000 0x1000>;
582 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>; 582 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
583 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, 583 clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
584 <&ccu 9>, 584 <&ccu CLK_PLL_VIDEO0_2X>,
585 <&ccu 18>; 585 <&ccu CLK_PLL_VIDEO1_2X>;
586 clock-names = "ahb", "mod", "pll-0", "pll-1"; 586 clock-names = "ahb", "mod", "pll-0", "pll-1";
587 dmas = <&dma SUN4I_DMA_NORMAL 16>, 587 dmas = <&dma SUN4I_DMA_NORMAL 16>,
588 <&dma SUN4I_DMA_NORMAL 16>, 588 <&dma SUN4I_DMA_NORMAL 16>,
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 98715538932f..a021ee6da396 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -146,6 +146,7 @@
146 status = "okay"; 146 status = "okay";
147 147
148 axp81x: pmic@3a3 { 148 axp81x: pmic@3a3 {
149 compatible = "x-powers,axp813";
149 reg = <0x3a3>; 150 reg = <0x3a3>;
150 interrupt-parent = <&r_intc>; 151 interrupt-parent = <&r_intc>;
151 interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 152 interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi
index 0ec1b0a317b4..ff72a8efb73d 100644
--- a/arch/arm/boot/dts/tango4-common.dtsi
+++ b/arch/arm/boot/dts/tango4-common.dtsi
@@ -156,7 +156,6 @@
156 reg = <0x6e000 0x400>; 156 reg = <0x6e000 0x400>;
157 ranges = <0 0x6e000 0x400>; 157 ranges = <0 0x6e000 0x400>;
158 interrupt-parent = <&gic>; 158 interrupt-parent = <&gic>;
159 interrupt-controller;
160 #address-cells = <1>; 159 #address-cells = <1>;
161 #size-cells = <1>; 160 #size-cells = <1>;
162 161
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5cf04888c581..3e26c6f7a191 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -793,7 +793,6 @@ void abort(void)
793 /* if that doesn't kill us, halt */ 793 /* if that doesn't kill us, halt */
794 panic("Oops failed to kill thread"); 794 panic("Oops failed to kill thread");
795} 795}
796EXPORT_SYMBOL(abort);
797 796
798void __init trap_init(void) 797void __init trap_init(void)
799{ 798{
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8be04ec95adf..5ace9380626a 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = {
868 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) }, 868 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) },
869 { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) }, 869 { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) },
870 { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) }, 870 { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) },
871 { "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) }, 871 { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
872 { "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) }, 872 { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
873 { "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) }, 873 { "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
874 { "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) }, 874 { "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
875}; 875};
876 876
877static struct edma_soc_info dm365_edma_pdata = { 877static struct edma_soc_info dm365_edma_pdata = {
@@ -925,12 +925,14 @@ static struct resource edma_resources[] = {
925 /* not using TC*_ERR */ 925 /* not using TC*_ERR */
926}; 926};
927 927
928static struct platform_device dm365_edma_device = { 928static const struct platform_device_info dm365_edma_device __initconst = {
929 .name = "edma", 929 .name = "edma",
930 .id = 0, 930 .id = 0,
931 .dev.platform_data = &dm365_edma_pdata, 931 .dma_mask = DMA_BIT_MASK(32),
932 .num_resources = ARRAY_SIZE(edma_resources), 932 .res = edma_resources,
933 .resource = edma_resources, 933 .num_res = ARRAY_SIZE(edma_resources),
934 .data = &dm365_edma_pdata,
935 .size_data = sizeof(dm365_edma_pdata),
934}; 936};
935 937
936static struct resource dm365_asp_resources[] = { 938static struct resource dm365_asp_resources[] = {
@@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
1428 1430
1429static int __init dm365_init_devices(void) 1431static int __init dm365_init_devices(void)
1430{ 1432{
1433 struct platform_device *edma_pdev;
1431 int ret = 0; 1434 int ret = 0;
1432 1435
1433 if (!cpu_is_davinci_dm365()) 1436 if (!cpu_is_davinci_dm365())
1434 return 0; 1437 return 0;
1435 1438
1436 davinci_cfg_reg(DM365_INT_EDMA_CC); 1439 davinci_cfg_reg(DM365_INT_EDMA_CC);
1437 platform_device_register(&dm365_edma_device); 1440 edma_pdev = platform_device_register_full(&dm365_edma_device);
1441 if (IS_ERR(edma_pdev)) {
1442 pr_warn("%s: Failed to register eDMA\n", __func__);
1443 return PTR_ERR(edma_pdev);
1444 }
1438 1445
1439 platform_device_register(&dm365_mdio_device); 1446 platform_device_register(&dm365_mdio_device);
1440 platform_device_register(&dm365_emac_device); 1447 platform_device_register(&dm365_emac_device);
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 45bdbfb96126..4a8d3f83a36e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -75,6 +75,7 @@
75 pinctrl-0 = <&rgmii_pins>; 75 pinctrl-0 = <&rgmii_pins>;
76 phy-mode = "rgmii"; 76 phy-mode = "rgmii";
77 phy-handle = <&ext_rgmii_phy>; 77 phy-handle = <&ext_rgmii_phy>;
78 phy-supply = <&reg_dc1sw>;
78 status = "okay"; 79 status = "okay";
79}; 80};
80 81
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 806442d3e846..604cdaedac38 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -77,6 +77,7 @@
77 pinctrl-0 = <&rmii_pins>; 77 pinctrl-0 = <&rmii_pins>;
78 phy-mode = "rmii"; 78 phy-mode = "rmii";
79 phy-handle = <&ext_rmii_phy1>; 79 phy-handle = <&ext_rmii_phy1>;
80 phy-supply = <&reg_dc1sw>;
80 status = "okay"; 81 status = "okay";
81 82
82}; 83};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 0eb2acedf8c3..abe179de35d7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -82,6 +82,7 @@
82 pinctrl-0 = <&rgmii_pins>; 82 pinctrl-0 = <&rgmii_pins>;
83 phy-mode = "rgmii"; 83 phy-mode = "rgmii";
84 phy-handle = <&ext_rgmii_phy>; 84 phy-handle = <&ext_rgmii_phy>;
85 phy-supply = <&reg_dc1sw>;
85 status = "okay"; 86 status = "okay";
86}; 87};
87 88
@@ -95,7 +96,7 @@
95&mmc2 { 96&mmc2 {
96 pinctrl-names = "default"; 97 pinctrl-names = "default";
97 pinctrl-0 = <&mmc2_pins>; 98 pinctrl-0 = <&mmc2_pins>;
98 vmmc-supply = <&reg_vcc3v3>; 99 vmmc-supply = <&reg_dcdc1>;
99 vqmmc-supply = <&reg_vcc1v8>; 100 vqmmc-supply = <&reg_vcc1v8>;
100 bus-width = <8>; 101 bus-width = <8>;
101 non-removable; 102 non-removable;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index a5da18a6f286..43418bd881d8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -45,19 +45,10 @@
45 45
46#include "sun50i-a64.dtsi" 46#include "sun50i-a64.dtsi"
47 47
48/ {
49 reg_vcc3v3: vcc3v3 {
50 compatible = "regulator-fixed";
51 regulator-name = "vcc3v3";
52 regulator-min-microvolt = <3300000>;
53 regulator-max-microvolt = <3300000>;
54 };
55};
56
57&mmc0 { 48&mmc0 {
58 pinctrl-names = "default"; 49 pinctrl-names = "default";
59 pinctrl-0 = <&mmc0_pins>; 50 pinctrl-0 = <&mmc0_pins>;
60 vmmc-supply = <&reg_vcc3v3>; 51 vmmc-supply = <&reg_dcdc1>;
61 non-removable; 52 non-removable;
62 disable-wp; 53 disable-wp;
63 bus-width = <4>; 54 bus-width = <4>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index b6b7a561df8c..a42fd79a62a3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -71,7 +71,7 @@
71 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>; 71 pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
72 vmmc-supply = <&reg_vcc3v3>; 72 vmmc-supply = <&reg_vcc3v3>;
73 bus-width = <4>; 73 bus-width = <4>;
74 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; 74 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
75 status = "okay"; 75 status = "okay";
76}; 76};
77 77
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index a298df74ca6c..dbe2648649db 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -255,7 +255,6 @@
255&avb { 255&avb {
256 pinctrl-0 = <&avb_pins>; 256 pinctrl-0 = <&avb_pins>;
257 pinctrl-names = "default"; 257 pinctrl-names = "default";
258 renesas,no-ether-link;
259 phy-handle = <&phy0>; 258 phy-handle = <&phy0>;
260 status = "okay"; 259 status = "okay";
261 260
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 0d85b315ce71..73439cf48659 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -145,7 +145,6 @@
145&avb { 145&avb {
146 pinctrl-0 = <&avb_pins>; 146 pinctrl-0 = <&avb_pins>;
147 pinctrl-names = "default"; 147 pinctrl-names = "default";
148 renesas,no-ether-link;
149 phy-handle = <&phy0>; 148 phy-handle = <&phy0>;
150 status = "okay"; 149 status = "okay";
151 150
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index d4f80786e7c2..3890468678ce 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -132,6 +132,8 @@
132 assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; 132 assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
133 assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; 133 assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
134 clock_in_out = "input"; 134 clock_in_out = "input";
135 /* shows instability at 1GBit right now */
136 max-speed = <100>;
135 phy-supply = <&vcc_io>; 137 phy-supply = <&vcc_io>;
136 phy-mode = "rgmii"; 138 phy-mode = "rgmii";
137 pinctrl-names = "default"; 139 pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 41d61840fb99..2426da631938 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -514,7 +514,7 @@
514 tsadc: tsadc@ff250000 { 514 tsadc: tsadc@ff250000 {
515 compatible = "rockchip,rk3328-tsadc"; 515 compatible = "rockchip,rk3328-tsadc";
516 reg = <0x0 0xff250000 0x0 0x100>; 516 reg = <0x0 0xff250000 0x0 0x100>;
517 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>; 517 interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
518 assigned-clocks = <&cru SCLK_TSADC>; 518 assigned-clocks = <&cru SCLK_TSADC>;
519 assigned-clock-rates = <50000>; 519 assigned-clock-rates = <50000>;
520 clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; 520 clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 910628d18add..1fc5060d7027 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -155,17 +155,6 @@
155 regulator-min-microvolt = <5000000>; 155 regulator-min-microvolt = <5000000>;
156 regulator-max-microvolt = <5000000>; 156 regulator-max-microvolt = <5000000>;
157 }; 157 };
158
159 vdd_log: vdd-log {
160 compatible = "pwm-regulator";
161 pwms = <&pwm2 0 25000 0>;
162 regulator-name = "vdd_log";
163 regulator-min-microvolt = <800000>;
164 regulator-max-microvolt = <1400000>;
165 regulator-always-on;
166 regulator-boot-on;
167 status = "okay";
168 };
169}; 158};
170 159
171&cpu_b0 { 160&cpu_b0 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 48e733136db4..0ac2ace82435 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -198,8 +198,8 @@
198 gpio-controller; 198 gpio-controller;
199 #gpio-cells = <2>; 199 #gpio-cells = <2>;
200 gpio-ranges = <&pinctrl 0 0 0>, 200 gpio-ranges = <&pinctrl 0 0 0>,
201 <&pinctrl 96 0 0>, 201 <&pinctrl 104 0 0>,
202 <&pinctrl 160 0 0>; 202 <&pinctrl 168 0 0>;
203 gpio-ranges-group-names = "gpio_range0", 203 gpio-ranges-group-names = "gpio_range0",
204 "gpio_range1", 204 "gpio_range1",
205 "gpio_range2"; 205 "gpio_range2";
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 1d29b2f8726b..1dacbf5e9e09 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -504,6 +504,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
504 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) 504 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
505 return -1; 505 return -1;
506 506
507 if (num_node_memblks >= NR_NODE_MEMBLKS) {
508 pr_err("NUMA: too many memblk ranges\n");
509 return -EINVAL;
510 }
511
507 /* record this node in proximity bitmap */ 512 /* record this node in proximity bitmap */
508 pxm_bit_set(pxm); 513 pxm_bit_set(pxm);
509 514
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c6ecb97151a2..9025699049ca 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
88 } 88 }
89 89
90 if (ti->softirq_time) { 90 if (ti->softirq_time) {
91 delta = cycle_to_nsec(ti->softirq_time)); 91 delta = cycle_to_nsec(ti->softirq_time);
92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); 92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
93 } 93 }
94 94
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index cb79fba79d43..b88a8dd14933 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -122,7 +122,6 @@ void abort(void)
122 /* if that doesn't kill us, halt */ 122 /* if that doesn't kill us, halt */
123 panic("Oops failed to kill thread"); 123 panic("Oops failed to kill thread");
124} 124}
125EXPORT_SYMBOL(abort);
126 125
127void __init trap_init(void) 126void __init trap_init(void)
128{ 127{
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index c7ed26029cbb..e68e6e04063a 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
235 has_mt t0, 3f 235 has_mt t0, 3f
236 236
237 .set push 237 .set push
238 .set MIPS_ISA_LEVEL_RAW
238 .set mt 239 .set mt
239 240
240 /* Only allow 1 TC per VPE to execute... */ 241 /* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
388#elif defined(CONFIG_MIPS_MT) 389#elif defined(CONFIG_MIPS_MT)
389 390
390 .set push 391 .set push
392 .set MIPS_ISA_LEVEL_RAW
391 .set mt 393 .set mt
392 394
393 /* If the core doesn't support MT then return */ 395 /* If the core doesn't support MT then return */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 45d0b6b037ee..57028d49c202 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
705 struct task_struct *t; 705 struct task_struct *t;
706 int max_users; 706 int max_users;
707 707
708 /* If nothing to change, return right away, successfully. */
709 if (value == mips_get_process_fp_mode(task))
710 return 0;
711
712 /* Only accept a mode change if 64-bit FP enabled for o32. */
713 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
714 return -EOPNOTSUPP;
715
716 /* And only for o32 tasks. */
717 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
718 return -EOPNOTSUPP;
719
708 /* Check the value is valid */ 720 /* Check the value is valid */
709 if (value & ~known_bits) 721 if (value & ~known_bits)
710 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index efbd8df8b665..0b23b1ad99e6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
419 419
420#endif /* CONFIG_64BIT */ 420#endif /* CONFIG_64BIT */
421 421
422/*
423 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
424 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
425 * correspond 1:1 to buffer slots. Only general registers are copied.
426 */
427static int fpr_get_fpa(struct task_struct *target,
428 unsigned int *pos, unsigned int *count,
429 void **kbuf, void __user **ubuf)
430{
431 return user_regset_copyout(pos, count, kbuf, ubuf,
432 &target->thread.fpu,
433 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
434}
435
436/*
437 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
438 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
439 * general register slots are copied to buffer slots. Only general
440 * registers are copied.
441 */
442static int fpr_get_msa(struct task_struct *target,
443 unsigned int *pos, unsigned int *count,
444 void **kbuf, void __user **ubuf)
445{
446 unsigned int i;
447 u64 fpr_val;
448 int err;
449
450 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
451 for (i = 0; i < NUM_FPU_REGS; i++) {
452 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
453 err = user_regset_copyout(pos, count, kbuf, ubuf,
454 &fpr_val, i * sizeof(elf_fpreg_t),
455 (i + 1) * sizeof(elf_fpreg_t));
456 if (err)
457 return err;
458 }
459
460 return 0;
461}
462
463/*
464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
465 * Choose the appropriate helper for general registers, and then copy
466 * the FCSR register separately.
467 */
422static int fpr_get(struct task_struct *target, 468static int fpr_get(struct task_struct *target,
423 const struct user_regset *regset, 469 const struct user_regset *regset,
424 unsigned int pos, unsigned int count, 470 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf) 471 void *kbuf, void __user *ubuf)
426{ 472{
427 unsigned i; 473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
428 int err; 474 int err;
429 u64 fpr_val;
430 475
431 /* XXX fcr31 */ 476 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
477 err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
478 else
479 err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
480 if (err)
481 return err;
432 482
433 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 483 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
434 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 484 &target->thread.fpu.fcr31,
435 &target->thread.fpu, 485 fcr31_pos, fcr31_pos + sizeof(u32));
436 0, sizeof(elf_fpregset_t));
437 486
438 for (i = 0; i < NUM_FPU_REGS; i++) { 487 return err;
439 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 488}
440 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 489
441 &fpr_val, i * sizeof(elf_fpreg_t), 490/*
442 (i + 1) * sizeof(elf_fpreg_t)); 491 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
492 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
493 * context's general register slots. Only general registers are copied.
494 */
495static int fpr_set_fpa(struct task_struct *target,
496 unsigned int *pos, unsigned int *count,
497 const void **kbuf, const void __user **ubuf)
498{
499 return user_regset_copyin(pos, count, kbuf, ubuf,
500 &target->thread.fpu,
501 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
502}
503
504/*
505 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
506 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
507 * bits only of FP context's general register slots. Only general
508 * registers are copied.
509 */
510static int fpr_set_msa(struct task_struct *target,
511 unsigned int *pos, unsigned int *count,
512 const void **kbuf, const void __user **ubuf)
513{
514 unsigned int i;
515 u64 fpr_val;
516 int err;
517
518 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
519 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
520 err = user_regset_copyin(pos, count, kbuf, ubuf,
521 &fpr_val, i * sizeof(elf_fpreg_t),
522 (i + 1) * sizeof(elf_fpreg_t));
443 if (err) 523 if (err)
444 return err; 524 return err;
525 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
445 } 526 }
446 527
447 return 0; 528 return 0;
448} 529}
449 530
531/*
532 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
533 * Choose the appropriate helper for general registers, and then copy
534 * the FCSR register separately.
535 *
536 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
537 * which is supposed to have been guaranteed by the kernel before
538 * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
539 * so that we can safely avoid preinitializing temporaries for
540 * partial register writes.
541 */
450static int fpr_set(struct task_struct *target, 542static int fpr_set(struct task_struct *target,
451 const struct user_regset *regset, 543 const struct user_regset *regset,
452 unsigned int pos, unsigned int count, 544 unsigned int pos, unsigned int count,
453 const void *kbuf, const void __user *ubuf) 545 const void *kbuf, const void __user *ubuf)
454{ 546{
455 unsigned i; 547 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
548 u32 fcr31;
456 int err; 549 int err;
457 u64 fpr_val;
458 550
459 /* XXX fcr31 */ 551 BUG_ON(count % sizeof(elf_fpreg_t));
552
553 if (pos + count > sizeof(elf_fpregset_t))
554 return -EIO;
460 555
461 init_fp_ctx(target); 556 init_fp_ctx(target);
462 557
463 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) 558 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
464 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 559 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
465 &target->thread.fpu, 560 else
466 0, sizeof(elf_fpregset_t)); 561 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
562 if (err)
563 return err;
467 564
468 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 565 if (count > 0) {
469 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
470 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 566 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
471 &fpr_val, i * sizeof(elf_fpreg_t), 567 &fcr31,
472 (i + 1) * sizeof(elf_fpreg_t)); 568 fcr31_pos, fcr31_pos + sizeof(u32));
473 if (err) 569 if (err)
474 return err; 570 return err;
475 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 571
572 ptrace_setfcr31(target, fcr31);
476 } 573 }
477 574
478 return 0; 575 return err;
479} 576}
480 577
481enum mips_regset { 578enum mips_regset {
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index dd5a08aaa4da..3eb4bfc1fb36 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -12,6 +12,7 @@
12 for the semaphore. */ 12 for the semaphore. */
13 13
14#define __PA_LDCW_ALIGNMENT 16 14#define __PA_LDCW_ALIGNMENT 16
15#define __PA_LDCW_ALIGN_ORDER 4
15#define __ldcw_align(a) ({ \ 16#define __ldcw_align(a) ({ \
16 unsigned long __ret = (unsigned long) &(a)->lock[0]; \ 17 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
17 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ 18 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
@@ -29,6 +30,7 @@
29 ldcd). */ 30 ldcd). */
30 31
31#define __PA_LDCW_ALIGNMENT 4 32#define __PA_LDCW_ALIGNMENT 4
33#define __PA_LDCW_ALIGN_ORDER 2
32#define __ldcw_align(a) (&(a)->slock) 34#define __ldcw_align(a) (&(a)->slock)
33#define __LDCW "ldcw,co" 35#define __LDCW "ldcw,co"
34 36
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index d8f77358e2ba..29b99b8964aa 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
870 static int count; 870 static int count;
871 871
872 print_pa_hwpath(dev, hw_path); 872 print_pa_hwpath(dev, hw_path);
873 printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", 873 printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, 874 ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); 875 dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
876 876
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f3cecf5117cf..e95207c0565e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,6 +35,7 @@
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/signal.h> 36#include <asm/signal.h>
37#include <asm/unistd.h> 37#include <asm/unistd.h>
38#include <asm/ldcw.h>
38#include <asm/thread_info.h> 39#include <asm/thread_info.h>
39 40
40#include <linux/linkage.h> 41#include <linux/linkage.h>
@@ -46,6 +47,14 @@
46#endif 47#endif
47 48
48 .import pa_tlb_lock,data 49 .import pa_tlb_lock,data
50 .macro load_pa_tlb_lock reg
51#if __PA_LDCW_ALIGNMENT > 4
52 load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
53 depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
54#else
55 load32 PA(pa_tlb_lock), \reg
56#endif
57 .endm
49 58
50 /* space_to_prot macro creates a prot id from a space id */ 59 /* space_to_prot macro creates a prot id from a space id */
51 60
@@ -457,7 +466,7 @@
457 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 466 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP 467#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f 468 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_tlb_lock),\tmp 469 load_pa_tlb_lock \tmp
4611: LDCW 0(\tmp),\tmp1 4701: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b 471 cmpib,COND(=) 0,\tmp1,1b
463 nop 472 nop
@@ -480,7 +489,7 @@
480 /* Release pa_tlb_lock lock. */ 489 /* Release pa_tlb_lock lock. */
481 .macro tlb_unlock1 spc,tmp 490 .macro tlb_unlock1 spc,tmp
482#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
483 load32 PA(pa_tlb_lock),\tmp 492 load_pa_tlb_lock \tmp
484 tlb_unlock0 \spc,\tmp 493 tlb_unlock0 \spc,\tmp
485#endif 494#endif
486 .endm 495 .endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187f8951..2d40c4ff3f69 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -36,6 +36,7 @@
36#include <asm/assembly.h> 36#include <asm/assembly.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/cache.h> 38#include <asm/cache.h>
39#include <asm/ldcw.h>
39#include <linux/linkage.h> 40#include <linux/linkage.h>
40 41
41 .text 42 .text
@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
333 334
334 .macro tlb_lock la,flags,tmp 335 .macro tlb_lock la,flags,tmp
335#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
336 ldil L%pa_tlb_lock,%r1 337#if __PA_LDCW_ALIGNMENT > 4
337 ldo R%pa_tlb_lock(%r1),\la 338 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
339 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
340#else
341 load32 pa_tlb_lock, \la
342#endif
338 rsm PSW_SM_I,\flags 343 rsm PSW_SM_I,\flags
3391: LDCW 0(\la),\tmp 3441: LDCW 0(\la),\tmp
340 cmpib,<>,n 0,\tmp,3f 345 cmpib,<>,n 0,\tmp,3f
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 30f92391a93e..cad3e8661cd6 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -39,6 +39,7 @@
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/mm.h> 40#include <linux/mm.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/cpu.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/personality.h> 44#include <linux/personality.h>
44#include <linux/ptrace.h> 45#include <linux/ptrace.h>
@@ -184,6 +185,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
184} 185}
185 186
186/* 187/*
188 * Idle thread support
189 *
190 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
191 * QEMU idle the host too.
192 */
193
194int running_on_qemu __read_mostly;
195
196void __cpuidle arch_cpu_idle_dead(void)
197{
198 /* nop on real hardware, qemu will offline CPU. */
199 asm volatile("or %%r31,%%r31,%%r31\n":::);
200}
201
202void __cpuidle arch_cpu_idle(void)
203{
204 local_irq_enable();
205
206 /* nop on real hardware, qemu will idle sleep. */
207 asm volatile("or %%r10,%%r10,%%r10\n":::);
208}
209
210static int __init parisc_idle_init(void)
211{
212 const char *marker;
213
214 /* check QEMU/SeaBIOS marker in PAGE0 */
215 marker = (char *) &PAGE0->pad0;
216 running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
217
218 if (!running_on_qemu)
219 cpu_idle_poll_ctrl(1);
220
221 return 0;
222}
223arch_initcall(parisc_idle_init);
224
225/*
187 * Copy architecture-specific thread state 226 * Copy architecture-specific thread state
188 */ 227 */
189int 228int
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 13f7854e0d49..48f41399fc0b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -631,11 +631,11 @@ void __init mem_init(void)
631 mem_init_print_info(NULL); 631 mem_init_print_info(NULL);
632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ 632#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
633 printk("virtual kernel memory layout:\n" 633 printk("virtual kernel memory layout:\n"
634 " vmalloc : 0x%p - 0x%p (%4ld MB)\n" 634 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
635 " memory : 0x%p - 0x%p (%4ld MB)\n" 635 " memory : 0x%px - 0x%px (%4ld MB)\n"
636 " .init : 0x%p - 0x%p (%4ld kB)\n" 636 " .init : 0x%px - 0x%px (%4ld kB)\n"
637 " .data : 0x%p - 0x%p (%4ld kB)\n" 637 " .data : 0x%px - 0x%px (%4ld kB)\n"
638 " .text : 0x%p - 0x%p (%4ld kB)\n", 638 " .text : 0x%px - 0x%px (%4ld kB)\n",
639 639
640 (void*)VMALLOC_START, (void*)VMALLOC_END, 640 (void*)VMALLOC_START, (void*)VMALLOC_END,
641 (VMALLOC_END - VMALLOC_START) >> 20, 641 (VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452d67b6..555e22d5e07f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
209 ori r3,r3,vector_offset@l; \ 209 ori r3,r3,vector_offset@l; \
210 mtspr SPRN_IVOR##vector_number,r3; 210 mtspr SPRN_IVOR##vector_number,r3;
211 211
212#define RFI_TO_KERNEL \
213 rfi
214
215#define RFI_TO_USER \
216 rfi
217
212#endif /* _ASM_POWERPC_EXCEPTION_64E_H */ 218#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
213 219
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b27205297e1d..7197b179c1b1 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,59 @@
74 */ 74 */
75#define EX_R3 EX_DAR 75#define EX_R3 EX_DAR
76 76
77/*
78 * Macros for annotating the expected destination of (h)rfid
79 *
80 * The nop instructions allow us to insert one or more instructions to flush the
81 * L1-D cache when returning to userspace or a guest.
82 */
83#define RFI_FLUSH_SLOT \
84 RFI_FLUSH_FIXUP_SECTION; \
85 nop; \
86 nop; \
87 nop
88
89#define RFI_TO_KERNEL \
90 rfid
91
92#define RFI_TO_USER \
93 RFI_FLUSH_SLOT; \
94 rfid; \
95 b rfi_flush_fallback
96
97#define RFI_TO_USER_OR_KERNEL \
98 RFI_FLUSH_SLOT; \
99 rfid; \
100 b rfi_flush_fallback
101
102#define RFI_TO_GUEST \
103 RFI_FLUSH_SLOT; \
104 rfid; \
105 b rfi_flush_fallback
106
107#define HRFI_TO_KERNEL \
108 hrfid
109
110#define HRFI_TO_USER \
111 RFI_FLUSH_SLOT; \
112 hrfid; \
113 b hrfi_flush_fallback
114
115#define HRFI_TO_USER_OR_KERNEL \
116 RFI_FLUSH_SLOT; \
117 hrfid; \
118 b hrfi_flush_fallback
119
120#define HRFI_TO_GUEST \
121 RFI_FLUSH_SLOT; \
122 hrfid; \
123 b hrfi_flush_fallback
124
125#define HRFI_TO_UNKNOWN \
126 RFI_FLUSH_SLOT; \
127 hrfid; \
128 b hrfi_flush_fallback
129
77#ifdef CONFIG_RELOCATABLE 130#ifdef CONFIG_RELOCATABLE
78#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 131#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
79 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 132 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
218 mtspr SPRN_##h##SRR0,r12; \ 271 mtspr SPRN_##h##SRR0,r12; \
219 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 272 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
220 mtspr SPRN_##h##SRR1,r10; \ 273 mtspr SPRN_##h##SRR1,r10; \
221 h##rfid; \ 274 h##RFI_TO_KERNEL; \
222 b . /* prevent speculative execution */ 275 b . /* prevent speculative execution */
223#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ 276#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
224 __EXCEPTION_PROLOG_PSERIES_1(label, h) 277 __EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
232 mtspr SPRN_##h##SRR0,r12; \ 285 mtspr SPRN_##h##SRR0,r12; \
233 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 286 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
234 mtspr SPRN_##h##SRR1,r10; \ 287 mtspr SPRN_##h##SRR1,r10; \
235 h##rfid; \ 288 h##RFI_TO_KERNEL; \
236 b . /* prevent speculative execution */ 289 b . /* prevent speculative execution */
237 290
238#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ 291#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 8f88f771cc55..1e82eb3caabd 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,7 +187,20 @@ label##3: \
187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 187 FTR_ENTRY_OFFSET label##1b-label##3b; \
188 .popsection; 188 .popsection;
189 189
190#define RFI_FLUSH_FIXUP_SECTION \
191951: \
192 .pushsection __rfi_flush_fixup,"a"; \
193 .align 2; \
194952: \
195 FTR_ENTRY_OFFSET 951b-952b; \
196 .popsection;
197
198
190#ifndef __ASSEMBLY__ 199#ifndef __ASSEMBLY__
200#include <linux/types.h>
201
202extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
203
191void apply_feature_fixups(void); 204void apply_feature_fixups(void);
192void setup_feature_keys(void); 205void setup_feature_keys(void);
193#endif 206#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a409177be8bd..f0461618bf7b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -241,6 +241,7 @@
241#define H_GET_HCA_INFO 0x1B8 241#define H_GET_HCA_INFO 0x1B8
242#define H_GET_PERF_COUNT 0x1BC 242#define H_GET_PERF_COUNT 0x1BC
243#define H_MANAGE_TRACE 0x1C0 243#define H_MANAGE_TRACE 0x1C0
244#define H_GET_CPU_CHARACTERISTICS 0x1C8
244#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 245#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
245#define H_QUERY_INT_STATE 0x1E4 246#define H_QUERY_INT_STATE 0x1E4
246#define H_POLL_PENDING 0x1D8 247#define H_POLL_PENDING 0x1D8
@@ -330,6 +331,17 @@
330#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 331#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
331/* >= 0 values are CPU number */ 332/* >= 0 values are CPU number */
332 333
334/* H_GET_CPU_CHARACTERISTICS return values */
335#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
336#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
337#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
338#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
339#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
340
341#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
342#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
343#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
344
333/* Flag values used in H_REGISTER_PROC_TBL hcall */ 345/* Flag values used in H_REGISTER_PROC_TBL hcall */
334#define PROC_TABLE_OP_MASK 0x18 346#define PROC_TABLE_OP_MASK 0x18
335#define PROC_TABLE_DEREG 0x10 347#define PROC_TABLE_DEREG 0x10
@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
436 } 448 }
437} 449}
438 450
451struct h_cpu_char_result {
452 u64 character;
453 u64 behaviour;
454};
455
439#endif /* __ASSEMBLY__ */ 456#endif /* __ASSEMBLY__ */
440#endif /* __KERNEL__ */ 457#endif /* __KERNEL__ */
441#endif /* _ASM_POWERPC_HVCALL_H */ 458#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3892db93b837..23ac7fc0af23 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -232,6 +232,16 @@ struct paca_struct {
232 struct sibling_subcore_state *sibling_subcore_state; 232 struct sibling_subcore_state *sibling_subcore_state;
233#endif 233#endif
234#endif 234#endif
235#ifdef CONFIG_PPC_BOOK3S_64
236 /*
237 * rfi fallback flush must be in its own cacheline to prevent
238 * other paca data leaking into the L1d
239 */
240 u64 exrfi[EX_SIZE] __aligned(0x80);
241 void *rfi_flush_fallback_area;
242 u64 l1d_flush_congruence;
243 u64 l1d_flush_sets;
244#endif
235}; 245};
236 246
237extern void copy_mm_to_paca(struct mm_struct *mm); 247extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 7f01b22fa6cb..55eddf50d149 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); 326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
327} 327}
328 328
329static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
330{
331 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 long rc;
333
334 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
335 if (rc == H_SUCCESS) {
336 p->character = retbuf[0];
337 p->behaviour = retbuf[1];
338 }
339
340 return rc;
341}
342
329#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ 343#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index cf00ec26303a..469b7fdc9be4 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
39static inline void pseries_little_endian_exceptions(void) {} 39static inline void pseries_little_endian_exceptions(void) {}
40#endif /* CONFIG_PPC_PSERIES */ 40#endif /* CONFIG_PPC_PSERIES */
41 41
42void rfi_flush_enable(bool enable);
43
44/* These are bit flags */
45enum l1d_flush_type {
46 L1D_FLUSH_NONE = 0x1,
47 L1D_FLUSH_FALLBACK = 0x2,
48 L1D_FLUSH_ORI = 0x4,
49 L1D_FLUSH_MTTRIG = 0x8,
50};
51
52void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
53void do_rfi_flush_fixups(enum l1d_flush_type types);
54
42#endif /* !__ASSEMBLY__ */ 55#endif /* !__ASSEMBLY__ */
43 56
44#endif /* _ASM_POWERPC_SETUP_H */ 57#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6b958414b4e0..f390d57cf2e1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -237,6 +237,11 @@ int main(void)
237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); 237 OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
238 OFFSET(PACA_IN_MCE, paca_struct, in_mce); 238 OFFSET(PACA_IN_MCE, paca_struct, in_mce);
239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi); 239 OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
240 OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
241 OFFSET(PACA_EXRFI, paca_struct, exrfi);
242 OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
243 OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
244
240#endif 245#endif
241 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); 246 OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
242 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); 247 OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3320bcac7192..2748584b767d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -37,6 +37,11 @@
37#include <asm/tm.h> 37#include <asm/tm.h>
38#include <asm/ppc-opcode.h> 38#include <asm/ppc-opcode.h>
39#include <asm/export.h> 39#include <asm/export.h>
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
40 45
41/* 46/*
42 * System calls. 47 * System calls.
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
262END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 267END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
263 268
264 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 269 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
270 ld r2,GPR2(r1)
271 ld r1,GPR1(r1)
272 mtlr r4
273 mtcr r5
274 mtspr SPRN_SRR0,r7
275 mtspr SPRN_SRR1,r8
276 RFI_TO_USER
277 b . /* prevent speculative execution */
278
279 /* exit to kernel */
2651: ld r2,GPR2(r1) 2801: ld r2,GPR2(r1)
266 ld r1,GPR1(r1) 281 ld r1,GPR1(r1)
267 mtlr r4 282 mtlr r4
268 mtcr r5 283 mtcr r5
269 mtspr SPRN_SRR0,r7 284 mtspr SPRN_SRR0,r7
270 mtspr SPRN_SRR1,r8 285 mtspr SPRN_SRR1,r8
271 RFI 286 RFI_TO_KERNEL
272 b . /* prevent speculative execution */ 287 b . /* prevent speculative execution */
273 288
274.Lsyscall_error: 289.Lsyscall_error:
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
397 mtmsrd r10, 1 412 mtmsrd r10, 1
398 mtspr SPRN_SRR0, r11 413 mtspr SPRN_SRR0, r11
399 mtspr SPRN_SRR1, r12 414 mtspr SPRN_SRR1, r12
400 415 RFI_TO_USER
401 rfid
402 b . /* prevent speculative execution */ 416 b . /* prevent speculative execution */
403#endif 417#endif
404_ASM_NOKPROBE_SYMBOL(system_call_common); 418_ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
878END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 892END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
879 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 893 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
880 REST_GPR(13, r1) 894 REST_GPR(13, r1)
8811: 895
882 mtspr SPRN_SRR1,r3 896 mtspr SPRN_SRR1,r3
883 897
884 ld r2,_CCR(r1) 898 ld r2,_CCR(r1)
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
891 ld r3,GPR3(r1) 905 ld r3,GPR3(r1)
892 ld r4,GPR4(r1) 906 ld r4,GPR4(r1)
893 ld r1,GPR1(r1) 907 ld r1,GPR1(r1)
908 RFI_TO_USER
909 b . /* prevent speculative execution */
894 910
895 rfid 9111: mtspr SPRN_SRR1,r3
912
913 ld r2,_CCR(r1)
914 mtcrf 0xFF,r2
915 ld r2,_NIP(r1)
916 mtspr SPRN_SRR0,r2
917
918 ld r0,GPR0(r1)
919 ld r2,GPR2(r1)
920 ld r3,GPR3(r1)
921 ld r4,GPR4(r1)
922 ld r1,GPR1(r1)
923 RFI_TO_KERNEL
896 b . /* prevent speculative execution */ 924 b . /* prevent speculative execution */
897 925
898#endif /* CONFIG_PPC_BOOK3E */ 926#endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@ __enter_rtas:
1073 1101
1074 mtspr SPRN_SRR0,r5 1102 mtspr SPRN_SRR0,r5
1075 mtspr SPRN_SRR1,r6 1103 mtspr SPRN_SRR1,r6
1076 rfid 1104 RFI_TO_KERNEL
1077 b . /* prevent speculative execution */ 1105 b . /* prevent speculative execution */
1078 1106
1079rtas_return_loc: 1107rtas_return_loc:
@@ -1098,7 +1126,7 @@ rtas_return_loc:
1098 1126
1099 mtspr SPRN_SRR0,r3 1127 mtspr SPRN_SRR0,r3
1100 mtspr SPRN_SRR1,r4 1128 mtspr SPRN_SRR1,r4
1101 rfid 1129 RFI_TO_KERNEL
1102 b . /* prevent speculative execution */ 1130 b . /* prevent speculative execution */
1103_ASM_NOKPROBE_SYMBOL(__enter_rtas) 1131_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1104_ASM_NOKPROBE_SYMBOL(rtas_return_loc) 1132_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
1171 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) 1199 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1172 andc r11,r11,r12 1200 andc r11,r11,r12
1173 mtsrr1 r11 1201 mtsrr1 r11
1174 rfid 1202 RFI_TO_KERNEL
1175#endif /* CONFIG_PPC_BOOK3E */ 1203#endif /* CONFIG_PPC_BOOK3E */
1176 1204
11771: /* Return from OF */ 12051: /* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e441b469dc8f..2dc10bf646b8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION
256 LOAD_HANDLER(r12, machine_check_handle_early) 256 LOAD_HANDLER(r12, machine_check_handle_early)
2571: mtspr SPRN_SRR0,r12 2571: mtspr SPRN_SRR0,r12
258 mtspr SPRN_SRR1,r11 258 mtspr SPRN_SRR1,r11
259 rfid 259 RFI_TO_KERNEL
260 b . /* prevent speculative execution */ 260 b . /* prevent speculative execution */
2612: 2612:
262 /* Stack overflow. Stay on emergency stack and panic. 262 /* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
445 li r3,MSR_ME 445 li r3,MSR_ME
446 andc r10,r10,r3 /* Turn off MSR_ME */ 446 andc r10,r10,r3 /* Turn off MSR_ME */
447 mtspr SPRN_SRR1,r10 447 mtspr SPRN_SRR1,r10
448 rfid 448 RFI_TO_KERNEL
449 b . 449 b .
4502: 4502:
451 /* 451 /*
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
463 */ 463 */
464 bl machine_check_queue_event 464 bl machine_check_queue_event
465 MACHINE_CHECK_HANDLER_WINDUP 465 MACHINE_CHECK_HANDLER_WINDUP
466 rfid 466 RFI_TO_USER_OR_KERNEL
4679: 4679:
468 /* Deliver the machine check to host kernel in V mode. */ 468 /* Deliver the machine check to host kernel in V mode. */
469 MACHINE_CHECK_HANDLER_WINDUP 469 MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 598 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 599 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
600 600
601 andi. r9,r11,MSR_PR // Check for exception from userspace
602 cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
603
601 /* 604 /*
602 * Test MSR_RI before calling slb_allocate_realmode, because the 605 * Test MSR_RI before calling slb_allocate_realmode, because the
603 * MSR in r11 gets clobbered. However we still want to allocate 606 * MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
624 627
625 /* All done -- return from exception. */ 628 /* All done -- return from exception. */
626 629
630 bne cr4,1f /* returning to kernel */
631
627.machine push 632.machine push
628.machine "power4" 633.machine "power4"
629 mtcrf 0x80,r9 634 mtcrf 0x80,r9
635 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
630 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ 636 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
631 mtcrf 0x02,r9 /* I/D indication is in cr6 */ 637 mtcrf 0x02,r9 /* I/D indication is in cr6 */
632 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 638 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
640 ld r11,PACA_EXSLB+EX_R11(r13) 646 ld r11,PACA_EXSLB+EX_R11(r13)
641 ld r12,PACA_EXSLB+EX_R12(r13) 647 ld r12,PACA_EXSLB+EX_R12(r13)
642 ld r13,PACA_EXSLB+EX_R13(r13) 648 ld r13,PACA_EXSLB+EX_R13(r13)
643 rfid 649 RFI_TO_USER
650 b . /* prevent speculative execution */
6511:
652.machine push
653.machine "power4"
654 mtcrf 0x80,r9
655 mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
656 mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
657 mtcrf 0x02,r9 /* I/D indication is in cr6 */
658 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
659.machine pop
660
661 RESTORE_CTR(r9, PACA_EXSLB)
662 RESTORE_PPR_PACA(PACA_EXSLB, r9)
663 mr r3,r12
664 ld r9,PACA_EXSLB+EX_R9(r13)
665 ld r10,PACA_EXSLB+EX_R10(r13)
666 ld r11,PACA_EXSLB+EX_R11(r13)
667 ld r12,PACA_EXSLB+EX_R12(r13)
668 ld r13,PACA_EXSLB+EX_R13(r13)
669 RFI_TO_KERNEL
644 b . /* prevent speculative execution */ 670 b . /* prevent speculative execution */
645 671
672
6462: std r3,PACA_EXSLB+EX_DAR(r13) 6732: std r3,PACA_EXSLB+EX_DAR(r13)
647 mr r3,r12 674 mr r3,r12
648 mfspr r11,SPRN_SRR0 675 mfspr r11,SPRN_SRR0
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
651 mtspr SPRN_SRR0,r10 678 mtspr SPRN_SRR0,r10
652 ld r10,PACAKMSR(r13) 679 ld r10,PACAKMSR(r13)
653 mtspr SPRN_SRR1,r10 680 mtspr SPRN_SRR1,r10
654 rfid 681 RFI_TO_KERNEL
655 b . 682 b .
656 683
6578: std r3,PACA_EXSLB+EX_DAR(r13) 6848: std r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
662 mtspr SPRN_SRR0,r10 689 mtspr SPRN_SRR0,r10
663 ld r10,PACAKMSR(r13) 690 ld r10,PACAKMSR(r13)
664 mtspr SPRN_SRR1,r10 691 mtspr SPRN_SRR1,r10
665 rfid 692 RFI_TO_KERNEL
666 b . 693 b .
667 694
668EXC_COMMON_BEGIN(unrecov_slb) 695EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
901 mtspr SPRN_SRR0,r10 ; \ 928 mtspr SPRN_SRR0,r10 ; \
902 ld r10,PACAKMSR(r13) ; \ 929 ld r10,PACAKMSR(r13) ; \
903 mtspr SPRN_SRR1,r10 ; \ 930 mtspr SPRN_SRR1,r10 ; \
904 rfid ; \ 931 RFI_TO_KERNEL ; \
905 b . ; /* prevent speculative execution */ 932 b . ; /* prevent speculative execution */
906 933
907#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH 934#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
917 xori r12,r12,MSR_LE ; \ 944 xori r12,r12,MSR_LE ; \
918 mtspr SPRN_SRR1,r12 ; \ 945 mtspr SPRN_SRR1,r12 ; \
919 mr r13,r9 ; \ 946 mr r13,r9 ; \
920 rfid ; /* return to userspace */ \ 947 RFI_TO_USER ; /* return to userspace */ \
921 b . ; /* prevent speculative execution */ 948 b . ; /* prevent speculative execution */
922#else 949#else
923#define SYSCALL_FASTENDIAN_TEST 950#define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
1063 mtcr r11 1090 mtcr r11
1064 REST_GPR(11, r1) 1091 REST_GPR(11, r1)
1065 ld r1,GPR1(r1) 1092 ld r1,GPR1(r1)
1066 hrfid 1093 HRFI_TO_USER_OR_KERNEL
1067 1094
10681: mtcr r11 10951: mtcr r11
1069 REST_GPR(11, r1) 1096 REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1314 ld r11,PACA_EXGEN+EX_R11(r13) 1341 ld r11,PACA_EXGEN+EX_R11(r13)
1315 ld r12,PACA_EXGEN+EX_R12(r13) 1342 ld r12,PACA_EXGEN+EX_R12(r13)
1316 ld r13,PACA_EXGEN+EX_R13(r13) 1343 ld r13,PACA_EXGEN+EX_R13(r13)
1317 HRFID 1344 HRFI_TO_UNKNOWN
1318 b . 1345 b .
1319#endif 1346#endif
1320 1347
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \
1418 ld r10,PACA_EXGEN+EX_R10(r13); \ 1445 ld r10,PACA_EXGEN+EX_R10(r13); \
1419 ld r11,PACA_EXGEN+EX_R11(r13); \ 1446 ld r11,PACA_EXGEN+EX_R11(r13); \
1420 /* returns to kernel where r13 must be set up, so don't restore it */ \ 1447 /* returns to kernel where r13 must be set up, so don't restore it */ \
1421 ##_H##rfid; \ 1448 ##_H##RFI_TO_KERNEL; \
1422 b .; \ 1449 b .; \
1423 MASKED_DEC_HANDLER(_H) 1450 MASKED_DEC_HANDLER(_H)
1424 1451
1452TRAMP_REAL_BEGIN(rfi_flush_fallback)
1453 SET_SCRATCH0(r13);
1454 GET_PACA(r13);
1455 std r9,PACA_EXRFI+EX_R9(r13)
1456 std r10,PACA_EXRFI+EX_R10(r13)
1457 std r11,PACA_EXRFI+EX_R11(r13)
1458 std r12,PACA_EXRFI+EX_R12(r13)
1459 std r8,PACA_EXRFI+EX_R13(r13)
1460 mfctr r9
1461 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1462 ld r11,PACA_L1D_FLUSH_SETS(r13)
1463 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1464 /*
1465 * The load adresses are at staggered offsets within cachelines,
1466 * which suits some pipelines better (on others it should not
1467 * hurt).
1468 */
1469 addi r12,r12,8
1470 mtctr r11
1471 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1472
1473 /* order ld/st prior to dcbt stop all streams with flushing */
1474 sync
14751: li r8,0
1476 .rept 8 /* 8-way set associative */
1477 ldx r11,r10,r8
1478 add r8,r8,r12
1479 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1480 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1481 .endr
1482 addi r10,r10,128 /* 128 byte cache line */
1483 bdnz 1b
1484
1485 mtctr r9
1486 ld r9,PACA_EXRFI+EX_R9(r13)
1487 ld r10,PACA_EXRFI+EX_R10(r13)
1488 ld r11,PACA_EXRFI+EX_R11(r13)
1489 ld r12,PACA_EXRFI+EX_R12(r13)
1490 ld r8,PACA_EXRFI+EX_R13(r13)
1491 GET_SCRATCH0(r13);
1492 rfid
1493
1494TRAMP_REAL_BEGIN(hrfi_flush_fallback)
1495 SET_SCRATCH0(r13);
1496 GET_PACA(r13);
1497 std r9,PACA_EXRFI+EX_R9(r13)
1498 std r10,PACA_EXRFI+EX_R10(r13)
1499 std r11,PACA_EXRFI+EX_R11(r13)
1500 std r12,PACA_EXRFI+EX_R12(r13)
1501 std r8,PACA_EXRFI+EX_R13(r13)
1502 mfctr r9
1503 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1504 ld r11,PACA_L1D_FLUSH_SETS(r13)
1505 ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
1506 /*
1507 * The load adresses are at staggered offsets within cachelines,
1508 * which suits some pipelines better (on others it should not
1509 * hurt).
1510 */
1511 addi r12,r12,8
1512 mtctr r11
1513 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1514
1515 /* order ld/st prior to dcbt stop all streams with flushing */
1516 sync
15171: li r8,0
1518 .rept 8 /* 8-way set associative */
1519 ldx r11,r10,r8
1520 add r8,r8,r12
1521 xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
1522 add r8,r8,r11 // Add 0, this creates a dependency on the ldx
1523 .endr
1524 addi r10,r10,128 /* 128 byte cache line */
1525 bdnz 1b
1526
1527 mtctr r9
1528 ld r9,PACA_EXRFI+EX_R9(r13)
1529 ld r10,PACA_EXRFI+EX_R10(r13)
1530 ld r11,PACA_EXRFI+EX_R11(r13)
1531 ld r12,PACA_EXRFI+EX_R12(r13)
1532 ld r8,PACA_EXRFI+EX_R13(r13)
1533 GET_SCRATCH0(r13);
1534 hrfid
1535
1425/* 1536/*
1426 * Real mode exceptions actually use this too, but alternate 1537 * Real mode exceptions actually use this too, but alternate
1427 * instruction code patches (which end up in the common .text area) 1538 * instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1441 addi r13, r13, 4 1552 addi r13, r13, 4
1442 mtspr SPRN_SRR0, r13 1553 mtspr SPRN_SRR0, r13
1443 GET_SCRATCH0(r13) 1554 GET_SCRATCH0(r13)
1444 rfid 1555 RFI_TO_KERNEL
1445 b . 1556 b .
1446 1557
1447TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) 1558TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1453 addi r13, r13, 4 1564 addi r13, r13, 4
1454 mtspr SPRN_HSRR0, r13 1565 mtspr SPRN_HSRR0, r13
1455 GET_SCRATCH0(r13) 1566 GET_SCRATCH0(r13)
1456 hrfid 1567 HRFI_TO_KERNEL
1457 b . 1568 b .
1458#endif 1569#endif
1459 1570
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8956a9856604..491be4179ddd 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -801,3 +801,104 @@ static int __init disable_hardlockup_detector(void)
801 return 0; 801 return 0;
802} 802}
803early_initcall(disable_hardlockup_detector); 803early_initcall(disable_hardlockup_detector);
804
805#ifdef CONFIG_PPC_BOOK3S_64
806static enum l1d_flush_type enabled_flush_types;
807static void *l1d_flush_fallback_area;
808static bool no_rfi_flush;
809bool rfi_flush;
810
811static int __init handle_no_rfi_flush(char *p)
812{
813 pr_info("rfi-flush: disabled on command line.");
814 no_rfi_flush = true;
815 return 0;
816}
817early_param("no_rfi_flush", handle_no_rfi_flush);
818
819/*
820 * The RFI flush is not KPTI, but because users will see doco that says to use
821 * nopti we hijack that option here to also disable the RFI flush.
822 */
823static int __init handle_no_pti(char *p)
824{
825 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
826 handle_no_rfi_flush(NULL);
827 return 0;
828}
829early_param("nopti", handle_no_pti);
830
831static void do_nothing(void *unused)
832{
833 /*
834 * We don't need to do the flush explicitly, just enter+exit kernel is
835 * sufficient, the RFI exit handlers will do the right thing.
836 */
837}
838
839void rfi_flush_enable(bool enable)
840{
841 if (rfi_flush == enable)
842 return;
843
844 if (enable) {
845 do_rfi_flush_fixups(enabled_flush_types);
846 on_each_cpu(do_nothing, NULL, 1);
847 } else
848 do_rfi_flush_fixups(L1D_FLUSH_NONE);
849
850 rfi_flush = enable;
851}
852
853static void init_fallback_flush(void)
854{
855 u64 l1d_size, limit;
856 int cpu;
857
858 l1d_size = ppc64_caches.l1d.size;
859 limit = min(safe_stack_limit(), ppc64_rma_size);
860
861 /*
862 * Align to L1d size, and size it at 2x L1d size, to catch possible
863 * hardware prefetch runoff. We don't have a recipe for load patterns to
864 * reliably avoid the prefetcher.
865 */
866 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
867 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
868
869 for_each_possible_cpu(cpu) {
870 /*
871 * The fallback flush is currently coded for 8-way
872 * associativity. Different associativity is possible, but it
873 * will be treated as 8-way and may not evict the lines as
874 * effectively.
875 *
876 * 128 byte lines are mandatory.
877 */
878 u64 c = l1d_size / 8;
879
880 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
881 paca[cpu].l1d_flush_congruence = c;
882 paca[cpu].l1d_flush_sets = c / 128;
883 }
884}
885
886void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
887{
888 if (types & L1D_FLUSH_FALLBACK) {
889 pr_info("rfi-flush: Using fallback displacement flush\n");
890 init_fallback_flush();
891 }
892
893 if (types & L1D_FLUSH_ORI)
894 pr_info("rfi-flush: Using ori type flush\n");
895
896 if (types & L1D_FLUSH_MTTRIG)
897 pr_info("rfi-flush: Using mttrig type flush\n");
898
899 enabled_flush_types = types;
900
901 if (!no_rfi_flush)
902 rfi_flush_enable(enable);
903}
904#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0494e1566ee2..307843d23682 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,15 @@ SECTIONS
132 /* Read-only data */ 132 /* Read-only data */
133 RO_DATA(PAGE_SIZE) 133 RO_DATA(PAGE_SIZE)
134 134
135#ifdef CONFIG_PPC64
136 . = ALIGN(8);
137 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
138 __start___rfi_flush_fixup = .;
139 *(__rfi_flush_fixup)
140 __stop___rfi_flush_fixup = .;
141 }
142#endif
143
135 EXCEPTION_TABLE(0) 144 EXCEPTION_TABLE(0)
136 145
137 NOTES :kernel :notes 146 NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 29ebe2fd5867..a93d719edc90 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
235 gpte->may_read = true; 235 gpte->may_read = true;
236 gpte->may_write = true; 236 gpte->may_write = true;
237 gpte->page_size = MMU_PAGE_4K; 237 gpte->page_size = MMU_PAGE_4K;
238 gpte->wimg = HPTE_R_M;
238 239
239 return 0; 240 return 0;
240 } 241 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 966097232d21..b73dbc9e797d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
65 u32 order; 65 u32 order;
66 66
67 /* These fields protected by kvm->lock */ 67 /* These fields protected by kvm->lock */
68
69 /* Possible values and their usage:
70 * <0 an error occurred during allocation,
71 * -EBUSY allocation is in the progress,
72 * 0 allocation made successfuly.
73 */
68 int error; 74 int error;
69 bool prepare_done;
70 75
71 /* Private to the work thread, until prepare_done is true, 76 /* Private to the work thread, until error != -EBUSY,
72 * then protected by kvm->resize_hpt_sem */ 77 * then protected by kvm->lock.
78 */
73 struct kvm_hpt_info hpt; 79 struct kvm_hpt_info hpt;
74}; 80};
75 81
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
159 * Reset all the reverse-mapping chains for all memslots 165 * Reset all the reverse-mapping chains for all memslots
160 */ 166 */
161 kvmppc_rmap_reset(kvm); 167 kvmppc_rmap_reset(kvm);
162 /* Ensure that each vcpu will flush its TLB on next entry. */
163 cpumask_setall(&kvm->arch.need_tlb_flush);
164 err = 0; 168 err = 0;
165 goto out; 169 goto out;
166 } 170 }
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
176 kvmppc_set_hpt(kvm, &info); 180 kvmppc_set_hpt(kvm, &info);
177 181
178out: 182out:
183 if (err == 0)
184 /* Ensure that each vcpu will flush its TLB on next entry. */
185 cpumask_setall(&kvm->arch.need_tlb_flush);
186
179 mutex_unlock(&kvm->lock); 187 mutex_unlock(&kvm->lock);
180 return err; 188 return err;
181} 189}
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1413 1421
1414static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) 1422static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1415{ 1423{
1416 BUG_ON(kvm->arch.resize_hpt != resize); 1424 if (WARN_ON(!mutex_is_locked(&kvm->lock)))
1425 return;
1417 1426
1418 if (!resize) 1427 if (!resize)
1419 return; 1428 return;
1420 1429
1421 if (resize->hpt.virt) 1430 if (resize->error != -EBUSY) {
1422 kvmppc_free_hpt(&resize->hpt); 1431 if (resize->hpt.virt)
1432 kvmppc_free_hpt(&resize->hpt);
1433 kfree(resize);
1434 }
1423 1435
1424 kvm->arch.resize_hpt = NULL; 1436 if (kvm->arch.resize_hpt == resize)
1425 kfree(resize); 1437 kvm->arch.resize_hpt = NULL;
1426} 1438}
1427 1439
1428static void resize_hpt_prepare_work(struct work_struct *work) 1440static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1431 struct kvm_resize_hpt, 1443 struct kvm_resize_hpt,
1432 work); 1444 work);
1433 struct kvm *kvm = resize->kvm; 1445 struct kvm *kvm = resize->kvm;
1434 int err; 1446 int err = 0;
1435 1447
1436 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", 1448 if (WARN_ON(resize->error != -EBUSY))
1437 resize->order); 1449 return;
1438
1439 err = resize_hpt_allocate(resize);
1440 1450
1441 mutex_lock(&kvm->lock); 1451 mutex_lock(&kvm->lock);
1442 1452
1453 /* Request is still current? */
1454 if (kvm->arch.resize_hpt == resize) {
1455 /* We may request large allocations here:
1456 * do not sleep with kvm->lock held for a while.
1457 */
1458 mutex_unlock(&kvm->lock);
1459
1460 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1461 resize->order);
1462
1463 err = resize_hpt_allocate(resize);
1464
1465 /* We have strict assumption about -EBUSY
1466 * when preparing for HPT resize.
1467 */
1468 if (WARN_ON(err == -EBUSY))
1469 err = -EINPROGRESS;
1470
1471 mutex_lock(&kvm->lock);
1472 /* It is possible that kvm->arch.resize_hpt != resize
1473 * after we grab kvm->lock again.
1474 */
1475 }
1476
1443 resize->error = err; 1477 resize->error = err;
1444 resize->prepare_done = true; 1478
1479 if (kvm->arch.resize_hpt != resize)
1480 resize_hpt_release(kvm, resize);
1445 1481
1446 mutex_unlock(&kvm->lock); 1482 mutex_unlock(&kvm->lock);
1447} 1483}
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1466 1502
1467 if (resize) { 1503 if (resize) {
1468 if (resize->order == shift) { 1504 if (resize->order == shift) {
1469 /* Suitable resize in progress */ 1505 /* Suitable resize in progress? */
1470 if (resize->prepare_done) { 1506 ret = resize->error;
1471 ret = resize->error; 1507 if (ret == -EBUSY)
1472 if (ret != 0)
1473 resize_hpt_release(kvm, resize);
1474 } else {
1475 ret = 100; /* estimated time in ms */ 1508 ret = 100; /* estimated time in ms */
1476 } 1509 else if (ret)
1510 resize_hpt_release(kvm, resize);
1477 1511
1478 goto out; 1512 goto out;
1479 } 1513 }
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1493 ret = -ENOMEM; 1527 ret = -ENOMEM;
1494 goto out; 1528 goto out;
1495 } 1529 }
1530
1531 resize->error = -EBUSY;
1496 resize->order = shift; 1532 resize->order = shift;
1497 resize->kvm = kvm; 1533 resize->kvm = kvm;
1498 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1534 INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1547 if (!resize || (resize->order != shift)) 1583 if (!resize || (resize->order != shift))
1548 goto out; 1584 goto out;
1549 1585
1550 ret = -EBUSY;
1551 if (!resize->prepare_done)
1552 goto out;
1553
1554 ret = resize->error; 1586 ret = resize->error;
1555 if (ret != 0) 1587 if (ret)
1556 goto out; 1588 goto out;
1557 1589
1558 ret = resize_hpt_rehash(resize); 1590 ret = resize_hpt_rehash(resize);
1559 if (ret != 0) 1591 if (ret)
1560 goto out; 1592 goto out;
1561 1593
1562 resize_hpt_pivot(resize); 1594 resize_hpt_pivot(resize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2659844784b8..9c61f736c75b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
79 mtmsrd r0,1 /* clear RI in MSR */ 79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5 80 mtsrr0 r5
81 mtsrr1 r6 81 mtsrr1 r6
82 RFI 82 RFI_TO_KERNEL
83 83
84kvmppc_call_hv_entry: 84kvmppc_call_hv_entry:
85BEGIN_FTR_SECTION 85BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
199 mtmsrd r6, 1 /* Clear RI in MSR */ 199 mtmsrd r6, 1 /* Clear RI in MSR */
200 mtsrr0 r8 200 mtsrr0 r8
201 mtsrr1 r7 201 mtsrr1 r7
202 RFI 202 RFI_TO_KERNEL
203 203
204 /* Virtual-mode return */ 204 /* Virtual-mode return */
205.Lvirt_return: 205.Lvirt_return:
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1167 1167
1168 ld r0, VCPU_GPR(R0)(r4) 1168 ld r0, VCPU_GPR(R0)(r4)
1169 ld r4, VCPU_GPR(R4)(r4) 1169 ld r4, VCPU_GPR(R4)(r4)
1170 1170 HRFI_TO_GUEST
1171 hrfid
1172 b . 1171 b .
1173 1172
1174secondary_too_late: 1173secondary_too_late:
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3320 ld r4, PACAKMSR(r13) 3319 ld r4, PACAKMSR(r13)
3321 mtspr SPRN_SRR0, r3 3320 mtspr SPRN_SRR0, r3
3322 mtspr SPRN_SRR1, r4 3321 mtspr SPRN_SRR1, r4
3323 rfid 3322 RFI_TO_KERNEL
33249: addi r3, r1, STACK_FRAME_OVERHEAD 33239: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt 3324 bl kvmppc_bad_interrupt
3326 b 9b 3325 b 9b
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d0dc8624198f..7deaeeb14b93 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
60#define MSR_USER32 MSR_USER 60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER 61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE 62#define HW_PAGE_SIZE PAGE_SIZE
63#define HPTE_R_M _PAGE_COHERENT
63#endif 64#endif
64 65
65static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) 66static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
557 pte.eaddr = eaddr; 558 pte.eaddr = eaddr;
558 pte.vpage = eaddr >> 12; 559 pte.vpage = eaddr >> 12;
559 pte.page_size = MMU_PAGE_64K; 560 pte.page_size = MMU_PAGE_64K;
561 pte.wimg = HPTE_R_M;
560 } 562 }
561 563
562 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { 564 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 42a4b237df5f..34a5adeff084 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
46 46
47#define FUNC(name) name 47#define FUNC(name) name
48 48
49#define RFI_TO_KERNEL RFI
50#define RFI_TO_GUEST RFI
51
49.macro INTERRUPT_TRAMPOLINE intno 52.macro INTERRUPT_TRAMPOLINE intno
50 53
51.global kvmppc_trampoline_\intno 54.global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
141 GET_SCRATCH0(r13) 144 GET_SCRATCH0(r13)
142 145
143 /* And get back into the code */ 146 /* And get back into the code */
144 RFI 147 RFI_TO_KERNEL
145#endif 148#endif
146 149
147/* 150/*
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
164 ori r5, r5, MSR_EE 167 ori r5, r5, MSR_EE
165 mtsrr0 r7 168 mtsrr0 r7
166 mtsrr1 r6 169 mtsrr1 r6
167 RFI 170 RFI_TO_KERNEL
168 171
169#include "book3s_segment.S" 172#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 2a2b96d53999..93a180ceefad 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@ no_dcbz32_on:
156 PPC_LL r9, SVCPU_R9(r3) 156 PPC_LL r9, SVCPU_R9(r3)
157 PPC_LL r3, (SVCPU_R3)(r3) 157 PPC_LL r3, (SVCPU_R3)(r3)
158 158
159 RFI 159 RFI_TO_GUEST
160kvmppc_handler_trampoline_enter_end: 160kvmppc_handler_trampoline_enter_end:
161 161
162 162
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 407 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
408 beqa BOOK3S_INTERRUPT_DOORBELL 408 beqa BOOK3S_INTERRUPT_DOORBELL
409 409
410 RFI 410 RFI_TO_KERNEL
411kvmppc_handler_trampoline_exit_end: 411kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 41cf5ae273cf..a95ea007d654 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
116 } 116 }
117} 117}
118 118
119#ifdef CONFIG_PPC_BOOK3S_64
120void do_rfi_flush_fixups(enum l1d_flush_type types)
121{
122 unsigned int instrs[3], *dest;
123 long *start, *end;
124 int i;
125
126 start = PTRRELOC(&__start___rfi_flush_fixup),
127 end = PTRRELOC(&__stop___rfi_flush_fixup);
128
129 instrs[0] = 0x60000000; /* nop */
130 instrs[1] = 0x60000000; /* nop */
131 instrs[2] = 0x60000000; /* nop */
132
133 if (types & L1D_FLUSH_FALLBACK)
134 /* b .+16 to fallback flush */
135 instrs[0] = 0x48000010;
136
137 i = 0;
138 if (types & L1D_FLUSH_ORI) {
139 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
140 instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
141 }
142
143 if (types & L1D_FLUSH_MTTRIG)
144 instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
145
146 for (i = 0; start < end; start++, i++) {
147 dest = (void *)start + *start;
148
149 pr_devel("patching dest %lx\n", (unsigned long)dest);
150
151 patch_instruction(dest, instrs[0]);
152 patch_instruction(dest + 1, instrs[1]);
153 patch_instruction(dest + 2, instrs[2]);
154 }
155
156 printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
157}
158#endif /* CONFIG_PPC_BOOK3S_64 */
159
119void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 160void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
120{ 161{
121 long *start, *end; 162 long *start, *end;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 4797d08581ce..6e1e39035380 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
145 return __bad_area(regs, address, SEGV_MAPERR); 145 return __bad_area(regs, address, SEGV_MAPERR);
146} 146}
147 147
148static noinline int bad_access(struct pt_regs *regs, unsigned long address)
149{
150 return __bad_area(regs, address, SEGV_ACCERR);
151}
152
148static int do_sigbus(struct pt_regs *regs, unsigned long address, 153static int do_sigbus(struct pt_regs *regs, unsigned long address,
149 unsigned int fault) 154 unsigned int fault)
150{ 155{
@@ -490,7 +495,7 @@ retry:
490 495
491good_area: 496good_area:
492 if (unlikely(access_error(is_write, is_exec, vma))) 497 if (unlikely(access_error(is_write, is_exec, vma)))
493 return bad_area(regs, address); 498 return bad_access(regs, address);
494 499
495 /* 500 /*
496 * If for any reason at all we couldn't handle the fault, 501 * If for any reason at all we couldn't handle the fault,
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 1edfbc1e40f4..4fb21e17504a 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -37,13 +37,62 @@
37#include <asm/kexec.h> 37#include <asm/kexec.h>
38#include <asm/smp.h> 38#include <asm/smp.h>
39#include <asm/tm.h> 39#include <asm/tm.h>
40#include <asm/setup.h>
40 41
41#include "powernv.h" 42#include "powernv.h"
42 43
44static void pnv_setup_rfi_flush(void)
45{
46 struct device_node *np, *fw_features;
47 enum l1d_flush_type type;
48 int enable;
49
50 /* Default to fallback in case fw-features are not available */
51 type = L1D_FLUSH_FALLBACK;
52 enable = 1;
53
54 np = of_find_node_by_name(NULL, "ibm,opal");
55 fw_features = of_get_child_by_name(np, "fw-features");
56 of_node_put(np);
57
58 if (fw_features) {
59 np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
60 if (np && of_property_read_bool(np, "enabled"))
61 type = L1D_FLUSH_MTTRIG;
62
63 of_node_put(np);
64
65 np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
66 if (np && of_property_read_bool(np, "enabled"))
67 type = L1D_FLUSH_ORI;
68
69 of_node_put(np);
70
71 /* Enable unless firmware says NOT to */
72 enable = 2;
73 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
74 if (np && of_property_read_bool(np, "disabled"))
75 enable--;
76
77 of_node_put(np);
78
79 np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
80 if (np && of_property_read_bool(np, "disabled"))
81 enable--;
82
83 of_node_put(np);
84 of_node_put(fw_features);
85 }
86
87 setup_rfi_flush(type, enable > 0);
88}
89
43static void __init pnv_setup_arch(void) 90static void __init pnv_setup_arch(void)
44{ 91{
45 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 92 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
46 93
94 pnv_setup_rfi_flush();
95
47 /* Initialize SMP */ 96 /* Initialize SMP */
48 pnv_smp_init(); 97 pnv_smp_init();
49 98
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6e35780c5962..a0b20c03f078 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
574 574
575static CLASS_ATTR_RW(dlpar); 575static CLASS_ATTR_RW(dlpar);
576 576
577static int __init pseries_dlpar_init(void) 577int __init dlpar_workqueue_init(void)
578{ 578{
579 if (pseries_hp_wq)
580 return 0;
581
579 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 582 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
580 WQ_UNBOUND, 1); 583 WQ_UNBOUND, 1);
584
585 return pseries_hp_wq ? 0 : -ENOMEM;
586}
587
588static int __init dlpar_sysfs_init(void)
589{
590 int rc;
591
592 rc = dlpar_workqueue_init();
593 if (rc)
594 return rc;
595
581 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 596 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
582} 597}
583machine_device_initcall(pseries, pseries_dlpar_init); 598machine_device_initcall(pseries, dlpar_sysfs_init);
584 599
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4470a3194311..1ae1d9f4dbe9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
98 return CMO_PageSize; 98 return CMO_PageSize;
99} 99}
100 100
101int dlpar_workqueue_init(void);
102
101#endif /* _PSERIES_PSERIES_H */ 103#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 4923ffe230cf..81d8614e7379 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
69 /* Hotplug Events */ 69 /* Hotplug Events */
70 np = of_find_node_by_path("/event-sources/hot-plug-events"); 70 np = of_find_node_by_path("/event-sources/hot-plug-events");
71 if (np != NULL) { 71 if (np != NULL) {
72 request_event_sources_irqs(np, ras_hotplug_interrupt, 72 if (dlpar_workqueue_init() == 0)
73 request_event_sources_irqs(np, ras_hotplug_interrupt,
73 "RAS_HOTPLUG"); 74 "RAS_HOTPLUG");
74 of_node_put(np); 75 of_node_put(np);
75 } 76 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a8531e012658..ae4f596273b5 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
459 of_pci_check_probe_only(); 459 of_pci_check_probe_only();
460} 460}
461 461
462static void pseries_setup_rfi_flush(void)
463{
464 struct h_cpu_char_result result;
465 enum l1d_flush_type types;
466 bool enable;
467 long rc;
468
469 /* Enable by default */
470 enable = true;
471
472 rc = plpar_get_cpu_characteristics(&result);
473 if (rc == H_SUCCESS) {
474 types = L1D_FLUSH_NONE;
475
476 if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
477 types |= L1D_FLUSH_MTTRIG;
478 if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
479 types |= L1D_FLUSH_ORI;
480
481 /* Use fallback if nothing set in hcall */
482 if (types == L1D_FLUSH_NONE)
483 types = L1D_FLUSH_FALLBACK;
484
485 if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
486 enable = false;
487 } else {
488 /* Default to fallback if case hcall is not available */
489 types = L1D_FLUSH_FALLBACK;
490 }
491
492 setup_rfi_flush(types, enable);
493}
494
462static void __init pSeries_setup_arch(void) 495static void __init pSeries_setup_arch(void)
463{ 496{
464 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 497 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
476 509
477 fwnmi_init(); 510 fwnmi_init();
478 511
512 pseries_setup_rfi_flush();
513
479 /* By default, only probe PCI (can be overridden by rtas_pci) */ 514 /* By default, only probe PCI (can be overridden by rtas_pci) */
480 pci_add_flags(PCI_PROBE_ONLY); 515 pci_add_flags(PCI_PROBE_ONLY);
481 516
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e69de29bb2d1..47dacf06c679 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -0,0 +1,75 @@
1CONFIG_SMP=y
2CONFIG_PCI=y
3CONFIG_PCIE_XILINX=y
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_CGROUPS=y
9CONFIG_CGROUP_SCHED=y
10CONFIG_CFS_BANDWIDTH=y
11CONFIG_CGROUP_BPF=y
12CONFIG_NAMESPACES=y
13CONFIG_USER_NS=y
14CONFIG_BLK_DEV_INITRD=y
15CONFIG_EXPERT=y
16CONFIG_CHECKPOINT_RESTORE=y
17CONFIG_BPF_SYSCALL=y
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_UNIX=y
21CONFIG_INET=y
22CONFIG_IP_MULTICAST=y
23CONFIG_IP_ADVANCED_ROUTER=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27CONFIG_IP_PNP_RARP=y
28CONFIG_NETLINK_DIAG=y
29CONFIG_DEVTMPFS=y
30CONFIG_BLK_DEV_LOOP=y
31CONFIG_VIRTIO_BLK=y
32CONFIG_BLK_DEV_SD=y
33CONFIG_BLK_DEV_SR=y
34CONFIG_ATA=y
35CONFIG_SATA_AHCI=y
36CONFIG_SATA_AHCI_PLATFORM=y
37CONFIG_NETDEVICES=y
38CONFIG_VIRTIO_NET=y
39CONFIG_MACB=y
40CONFIG_E1000E=y
41CONFIG_R8169=y
42CONFIG_MICROSEMI_PHY=y
43CONFIG_INPUT_MOUSEDEV=y
44CONFIG_SERIAL_8250=y
45CONFIG_SERIAL_8250_CONSOLE=y
46CONFIG_SERIAL_OF_PLATFORM=y
47# CONFIG_PTP_1588_CLOCK is not set
48CONFIG_DRM=y
49CONFIG_DRM_RADEON=y
50CONFIG_FRAMEBUFFER_CONSOLE=y
51CONFIG_USB=y
52CONFIG_USB_XHCI_HCD=y
53CONFIG_USB_XHCI_PLATFORM=y
54CONFIG_USB_EHCI_HCD=y
55CONFIG_USB_EHCI_HCD_PLATFORM=y
56CONFIG_USB_OHCI_HCD=y
57CONFIG_USB_OHCI_HCD_PLATFORM=y
58CONFIG_USB_STORAGE=y
59CONFIG_USB_UAS=y
60CONFIG_VIRTIO_MMIO=y
61CONFIG_RAS=y
62CONFIG_EXT4_FS=y
63CONFIG_EXT4_FS_POSIX_ACL=y
64CONFIG_AUTOFS4_FS=y
65CONFIG_MSDOS_FS=y
66CONFIG_VFAT_FS=y
67CONFIG_TMPFS=y
68CONFIG_TMPFS_POSIX_ACL=y
69CONFIG_NFS_FS=y
70CONFIG_NFS_V4=y
71CONFIG_NFS_V4_1=y
72CONFIG_NFS_V4_2=y
73CONFIG_ROOT_NFS=y
74# CONFIG_RCU_TRACE is not set
75CONFIG_CRYPTO_USER_API_HASH=y
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 0d64bc9f4f91..3c7a2c97e377 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -17,10 +17,10 @@
17#include <linux/const.h> 17#include <linux/const.h>
18 18
19/* Status register flags */ 19/* Status register flags */
20#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */ 20#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
21#define SR_PIE _AC(0x00000020, UL) /* Previous IE */ 21#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
22#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */ 22#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */ 23#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
24 24
25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ 25#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
26#define SR_FS_OFF _AC(0x00000000, UL) 26#define SR_FS_OFF _AC(0x00000000, UL)
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index a82ce599b639..b269451e7e85 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -21,8 +21,6 @@
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24#ifdef CONFIG_MMU
25
26extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); 24extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
27 25
28/* 26/*
@@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
36 34
37extern void iounmap(volatile void __iomem *addr); 35extern void iounmap(volatile void __iomem *addr);
38 36
39#endif /* CONFIG_MMU */
40
41/* Generic IO read/write. These perform native-endian accesses. */ 37/* Generic IO read/write. These perform native-endian accesses. */
42#define __raw_writeb __raw_writeb 38#define __raw_writeb __raw_writeb
43static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 39static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 6fdc860d7f84..07a3c6d5706f 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
27/* unconditionally enable interrupts */ 27/* unconditionally enable interrupts */
28static inline void arch_local_irq_enable(void) 28static inline void arch_local_irq_enable(void)
29{ 29{
30 csr_set(sstatus, SR_IE); 30 csr_set(sstatus, SR_SIE);
31} 31}
32 32
33/* unconditionally disable interrupts */ 33/* unconditionally disable interrupts */
34static inline void arch_local_irq_disable(void) 34static inline void arch_local_irq_disable(void)
35{ 35{
36 csr_clear(sstatus, SR_IE); 36 csr_clear(sstatus, SR_SIE);
37} 37}
38 38
39/* get status and disable interrupts */ 39/* get status and disable interrupts */
40static inline unsigned long arch_local_irq_save(void) 40static inline unsigned long arch_local_irq_save(void)
41{ 41{
42 return csr_read_clear(sstatus, SR_IE); 42 return csr_read_clear(sstatus, SR_SIE);
43} 43}
44 44
45/* test flags */ 45/* test flags */
46static inline int arch_irqs_disabled_flags(unsigned long flags) 46static inline int arch_irqs_disabled_flags(unsigned long flags)
47{ 47{
48 return !(flags & SR_IE); 48 return !(flags & SR_SIE);
49} 49}
50 50
51/* test hardware interrupt enable bit */ 51/* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
57/* set interrupt enabled status */ 57/* set interrupt enabled status */
58static inline void arch_local_irq_restore(unsigned long flags) 58static inline void arch_local_irq_restore(unsigned long flags)
59{ 59{
60 csr_set(sstatus, flags & SR_IE); 60 csr_set(sstatus, flags & SR_SIE);
61} 61}
62 62
63#endif /* _ASM_RISCV_IRQFLAGS_H */ 63#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2cbd92ed1629..16301966d65b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -20,8 +20,6 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#ifdef CONFIG_MMU
24
25/* Page Upper Directory not used in RISC-V */ 23/* Page Upper Directory not used in RISC-V */
26#include <asm-generic/pgtable-nopud.h> 24#include <asm-generic/pgtable-nopud.h>
27#include <asm/page.h> 25#include <asm/page.h>
@@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
413 /* No page table caches to initialize */ 411 /* No page table caches to initialize */
414} 412}
415 413
416#endif /* CONFIG_MMU */
417
418#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 414#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
419#define VMALLOC_END (PAGE_OFFSET - 1) 415#define VMALLOC_END (PAGE_OFFSET - 1)
420#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 416#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index 93b8956e25e4..2c5df945d43c 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -66,7 +66,7 @@ struct pt_regs {
66#define REG_FMT "%08lx" 66#define REG_FMT "%08lx"
67#endif 67#endif
68 68
69#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0) 69#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
70 70
71 71
72/* Helpers for working with the instruction pointer */ 72/* Helpers for working with the instruction pointer */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 715b0f10af58..7b9c24ebdf52 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -15,8 +15,6 @@
15#ifndef _ASM_RISCV_TLBFLUSH_H 15#ifndef _ASM_RISCV_TLBFLUSH_H
16#define _ASM_RISCV_TLBFLUSH_H 16#define _ASM_RISCV_TLBFLUSH_H
17 17
18#ifdef CONFIG_MMU
19
20#include <linux/mm_types.h> 18#include <linux/mm_types.h>
21 19
22/* 20/*
@@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
64 flush_tlb_all(); 62 flush_tlb_all();
65} 63}
66 64
67#endif /* CONFIG_MMU */
68
69#endif /* _ASM_RISCV_TLBFLUSH_H */ 65#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 27b90d64814b..14b0b22fb578 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
127 * call. 127 * call.
128 */ 128 */
129 129
130#ifdef CONFIG_MMU
131#define __get_user_asm(insn, x, ptr, err) \ 130#define __get_user_asm(insn, x, ptr, err) \
132do { \ 131do { \
133 uintptr_t __tmp; \ 132 uintptr_t __tmp; \
@@ -153,13 +152,11 @@ do { \
153 __disable_user_access(); \ 152 __disable_user_access(); \
154 (x) = __x; \ 153 (x) = __x; \
155} while (0) 154} while (0)
156#endif /* CONFIG_MMU */
157 155
158#ifdef CONFIG_64BIT 156#ifdef CONFIG_64BIT
159#define __get_user_8(x, ptr, err) \ 157#define __get_user_8(x, ptr, err) \
160 __get_user_asm("ld", x, ptr, err) 158 __get_user_asm("ld", x, ptr, err)
161#else /* !CONFIG_64BIT */ 159#else /* !CONFIG_64BIT */
162#ifdef CONFIG_MMU
163#define __get_user_8(x, ptr, err) \ 160#define __get_user_8(x, ptr, err) \
164do { \ 161do { \
165 u32 __user *__ptr = (u32 __user *)(ptr); \ 162 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -193,7 +190,6 @@ do { \
193 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ 190 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
194 (((u64)__hi << 32) | __lo))); \ 191 (((u64)__hi << 32) | __lo))); \
195} while (0) 192} while (0)
196#endif /* CONFIG_MMU */
197#endif /* CONFIG_64BIT */ 193#endif /* CONFIG_64BIT */
198 194
199 195
@@ -267,8 +263,6 @@ do { \
267 ((x) = 0, -EFAULT); \ 263 ((x) = 0, -EFAULT); \
268}) 264})
269 265
270
271#ifdef CONFIG_MMU
272#define __put_user_asm(insn, x, ptr, err) \ 266#define __put_user_asm(insn, x, ptr, err) \
273do { \ 267do { \
274 uintptr_t __tmp; \ 268 uintptr_t __tmp; \
@@ -292,14 +286,11 @@ do { \
292 : "rJ" (__x), "i" (-EFAULT)); \ 286 : "rJ" (__x), "i" (-EFAULT)); \
293 __disable_user_access(); \ 287 __disable_user_access(); \
294} while (0) 288} while (0)
295#endif /* CONFIG_MMU */
296
297 289
298#ifdef CONFIG_64BIT 290#ifdef CONFIG_64BIT
299#define __put_user_8(x, ptr, err) \ 291#define __put_user_8(x, ptr, err) \
300 __put_user_asm("sd", x, ptr, err) 292 __put_user_asm("sd", x, ptr, err)
301#else /* !CONFIG_64BIT */ 293#else /* !CONFIG_64BIT */
302#ifdef CONFIG_MMU
303#define __put_user_8(x, ptr, err) \ 294#define __put_user_8(x, ptr, err) \
304do { \ 295do { \
305 u32 __user *__ptr = (u32 __user *)(ptr); \ 296 u32 __user *__ptr = (u32 __user *)(ptr); \
@@ -329,7 +320,6 @@ do { \
329 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \ 320 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
330 __disable_user_access(); \ 321 __disable_user_access(); \
331} while (0) 322} while (0)
332#endif /* CONFIG_MMU */
333#endif /* CONFIG_64BIT */ 323#endif /* CONFIG_64BIT */
334 324
335 325
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
438 * will set "err" to -EFAULT, while successful accesses return the previous 428 * will set "err" to -EFAULT, while successful accesses return the previous
439 * value. 429 * value.
440 */ 430 */
441#ifdef CONFIG_MMU
442#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \ 431#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
443({ \ 432({ \
444 __typeof__(ptr) __ptr = (ptr); \ 433 __typeof__(ptr) __ptr = (ptr); \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
508 (err) = __err; \ 497 (err) = __err; \
509 __ret; \ 498 __ret; \
510}) 499})
511#endif /* CONFIG_MMU */
512 500
513#endif /* _ASM_RISCV_UACCESS_H */ 501#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 9f250ed007cd..2f704a5c4196 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -14,3 +14,4 @@
14#define __ARCH_HAVE_MMU 14#define __ARCH_HAVE_MMU
15#define __ARCH_WANT_SYS_CLONE 15#define __ARCH_WANT_SYS_CLONE
16#include <uapi/asm/unistd.h> 16#include <uapi/asm/unistd.h>
17#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
deleted file mode 100644
index a2ccf1894929..000000000000
--- a/arch/riscv/include/asm/vdso-syscalls.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) 2017 SiFive
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
18#define _ASM_RISCV_VDSO_SYSCALLS_H
19
20#ifdef CONFIG_SMP
21
22/* These syscalls are only used by the vDSO and are not in the uapi. */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
27
28#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644
index 000000000000..818655b0d535
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/syscalls.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017 SiFive
4 */
5
6#ifndef _ASM__UAPI__SYSCALLS_H
7#define _ASM__UAPI__SYSCALLS_H
8
9/*
10 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
11 * having a direct 'fence.i' instruction available to userspace (which we
12 * can't trap!), that's not actually viable when running on Linux because the
13 * kernel might schedule a process on another hart. There is no way for
14 * userspace to handle this without invoking the kernel (as it doesn't know the
15 * thread->hart mappings), so we've defined a RISC-V specific system call to
16 * flush the instruction cache.
17 *
18 * __NR_riscv_flush_icache is defined to flush the instruction cache over an
19 * address range, with the flush applying to either all threads or just the
20 * caller. We don't currently do anything with the address range, that's just
21 * in there for forwards compatibility.
22 */
23#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
24__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
25
26#endif
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 20ee86f782a9..7404ec222406 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -196,7 +196,7 @@ handle_syscall:
196 addi s2, s2, 0x4 196 addi s2, s2, 0x4
197 REG_S s2, PT_SEPC(sp) 197 REG_S s2, PT_SEPC(sp)
198 /* System calls run with interrupts enabled */ 198 /* System calls run with interrupts enabled */
199 csrs sstatus, SR_IE 199 csrs sstatus, SR_SIE
200 /* Trace syscalls, but only if requested by the user. */ 200 /* Trace syscalls, but only if requested by the user. */
201 REG_L t0, TASK_TI_FLAGS(tp) 201 REG_L t0, TASK_TI_FLAGS(tp)
202 andi t0, t0, _TIF_SYSCALL_TRACE 202 andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
224 224
225ret_from_exception: 225ret_from_exception:
226 REG_L s0, PT_SSTATUS(sp) 226 REG_L s0, PT_SSTATUS(sp)
227 csrc sstatus, SR_IE 227 csrc sstatus, SR_SIE
228 andi s0, s0, SR_PS 228 andi s0, s0, SR_SPP
229 bnez s0, restore_all 229 bnez s0, restore_all
230 230
231resume_userspace: 231resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
255 bnez s1, work_resched 255 bnez s1, work_resched
256work_notifysig: 256work_notifysig:
257 /* Handle pending signals and notify-resume requests */ 257 /* Handle pending signals and notify-resume requests */
258 csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */ 258 csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
259 move a0, sp /* pt_regs */ 259 move a0, sp /* pt_regs */
260 move a1, s0 /* current_thread_info->flags */ 260 move a1, s0 /* current_thread_info->flags */
261 tail do_notify_resume 261 tail do_notify_resume
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 0d90dcc1fbd3..d74d4adf2d54 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
76void start_thread(struct pt_regs *regs, unsigned long pc, 76void start_thread(struct pt_regs *regs, unsigned long pc,
77 unsigned long sp) 77 unsigned long sp)
78{ 78{
79 regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL; 79 regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
80 regs->sepc = pc; 80 regs->sepc = pc;
81 regs->sp = sp; 81 regs->sp = sp;
82 set_fs(USER_DS); 82 set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
110 const register unsigned long gp __asm__ ("gp"); 110 const register unsigned long gp __asm__ ("gp");
111 memset(childregs, 0, sizeof(struct pt_regs)); 111 memset(childregs, 0, sizeof(struct pt_regs));
112 childregs->gp = gp; 112 childregs->gp = gp;
113 childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */ 113 childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
114 114
115 p->thread.ra = (unsigned long)ret_from_kernel_thread; 115 p->thread.ra = (unsigned long)ret_from_kernel_thread;
116 p->thread.s[0] = usp; /* fn */ 116 p->thread.s[0] = usp; /* fn */
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index a5bd6401f95e..ade52b903a43 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -23,5 +23,4 @@
23void *sys_call_table[__NR_syscalls] = { 23void *sys_call_table[__NR_syscalls] = {
24 [0 ... __NR_syscalls - 1] = sys_ni_syscall, 24 [0 ... __NR_syscalls - 1] = sys_ni_syscall,
25#include <asm/unistd.h> 25#include <asm/unistd.h>
26#include <asm/vdso-syscalls.h>
27}; 26};
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index b0fbad74e873..023e4d4aef58 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -13,7 +13,6 @@
13 13
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
16#include <asm/vdso-syscalls.h>
17 16
18 .text 17 .text
19/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ 18/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index df2ca3c65048..0713f3c67ab4 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
63 goto vmalloc_fault; 63 goto vmalloc_fault;
64 64
65 /* Enable interrupts if they were enabled in the parent context. */ 65 /* Enable interrupts if they were enabled in the parent context. */
66 if (likely(regs->sstatus & SR_PIE)) 66 if (likely(regs->sstatus & SR_SPIE))
67 local_irq_enable(); 67 local_irq_enable();
68 68
69 /* 69 /*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ec8b68e97d3c..2c93cbbcd15e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
792 792
793 if (kvm->arch.use_cmma) { 793 if (kvm->arch.use_cmma) {
794 /* 794 /*
795 * Get the last slot. They should be sorted by base_gfn, so the 795 * Get the first slot. They are reverse sorted by base_gfn, so
796 * last slot is also the one at the end of the address space. 796 * the first slot is also the one at the end of the address
797 * We have verified above that at least one slot is present. 797 * space. We have verified above that at least one slot is
798 * present.
798 */ 799 */
799 ms = slots->memslots + slots->used_slots - 1; 800 ms = slots->memslots;
800 /* round up so we only use full longs */ 801 /* round up so we only use full longs */
801 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); 802 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
802 /* allocate enough bytes to store all the bits */ 803 /* allocate enough bytes to store all the bits */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 572496c688cc..0714bfa56da0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
1006 cbrlo[entries] = gfn << PAGE_SHIFT; 1006 cbrlo[entries] = gfn << PAGE_SHIFT;
1007 } 1007 }
1008 1008
1009 if (orc) { 1009 if (orc && gfn < ms->bitmap_size) {
1010 /* increment only if we are really flipping the bit to 1 */ 1010 /* increment only if we are really flipping the bit to 1 */
1011 if (!test_and_set_bit(gfn, ms->pgste_bitmap)) 1011 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1012 atomic64_inc(&ms->dirty_pages); 1012 atomic64_inc(&ms->dirty_pages);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index cae5a1e16cbd..c4f8039a35e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess);
89 89
90void disable_sacf_uaccess(mm_segment_t old_fs) 90void disable_sacf_uaccess(mm_segment_t old_fs)
91{ 91{
92 current->thread.mm_segment = old_fs;
92 if (old_fs == USER_DS && test_facility(27)) { 93 if (old_fs == USER_DS && test_facility(27)) {
93 __ctl_load(S390_lowcore.user_asce, 1, 1); 94 __ctl_load(S390_lowcore.user_asce, 1, 1);
94 clear_cpu_flag(CIF_ASCE_PRIMARY); 95 clear_cpu_flag(CIF_ASCE_PRIMARY);
95 } 96 }
96 current->thread.mm_segment = old_fs;
97} 97}
98EXPORT_SYMBOL(disable_sacf_uaccess); 98EXPORT_SYMBOL(disable_sacf_uaccess);
99 99
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f7aa5a77827e..2d15d84c20ed 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,6 +181,9 @@ out_unlock:
181static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, 181static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
182 size_t size, int flags) 182 size_t size, int flags)
183{ 183{
184 unsigned long irqflags;
185 int ret;
186
184 /* 187 /*
185 * With zdev->tlb_refresh == 0, rpcit is not required to establish new 188 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
186 * translations when previously invalid translation-table entries are 189 * translations when previously invalid translation-table entries are
@@ -196,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
196 return 0; 199 return 0;
197 } 200 }
198 201
199 return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, 202 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
200 PAGE_ALIGN(size)); 203 PAGE_ALIGN(size));
204 if (ret == -ENOMEM && !s390_iommu_strict) {
205 /* enable the hypervisor to free some resources */
206 if (zpci_refresh_global(zdev))
207 goto out;
208
209 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
210 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
211 zdev->lazy_bitmap, zdev->iommu_pages);
212 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
213 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
214 ret = 0;
215 }
216out:
217 return ret;
201} 218}
202 219
203static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, 220static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 19bcb3b45a70..f069929e8211 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -89,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
89 if (cc) 89 if (cc)
90 zpci_err_insn(cc, status, addr, range); 90 zpci_err_insn(cc, status, addr, range);
91 91
92 if (cc == 1 && (status == 4 || status == 16))
93 return -ENOMEM;
94
92 return (cc) ? -EIO : 0; 95 return (cc) ? -EIO : 0;
93} 96}
94 97
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 77c35350ee77..412326d59e6f 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/sh_eth.h>
12#include <mach-se/mach/se.h> 13#include <mach-se/mach/se.h>
13#include <mach-se/mach/mrshpc.h> 14#include <mach-se/mach/mrshpc.h>
14#include <asm/machvec.h> 15#include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
115#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ 116#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
116 defined(CONFIG_CPU_SUBTYPE_SH7712) 117 defined(CONFIG_CPU_SUBTYPE_SH7712)
117/* SH771X Ethernet driver */ 118/* SH771X Ethernet driver */
119static struct sh_eth_plat_data sh_eth_plat = {
120 .phy = PHY_ID,
121 .phy_interface = PHY_INTERFACE_MODE_MII,
122};
123
118static struct resource sh_eth0_resources[] = { 124static struct resource sh_eth0_resources[] = {
119 [0] = { 125 [0] = {
120 .start = SH_ETH0_BASE, 126 .start = SH_ETH0_BASE,
121 .end = SH_ETH0_BASE + 0x1B8, 127 .end = SH_ETH0_BASE + 0x1B8 - 1,
122 .flags = IORESOURCE_MEM, 128 .flags = IORESOURCE_MEM,
123 }, 129 },
124 [1] = { 130 [1] = {
131 .start = SH_TSU_BASE,
132 .end = SH_TSU_BASE + 0x200 - 1,
133 .flags = IORESOURCE_MEM,
134 },
135 [2] = {
125 .start = SH_ETH0_IRQ, 136 .start = SH_ETH0_IRQ,
126 .end = SH_ETH0_IRQ, 137 .end = SH_ETH0_IRQ,
127 .flags = IORESOURCE_IRQ, 138 .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
132 .name = "sh771x-ether", 143 .name = "sh771x-ether",
133 .id = 0, 144 .id = 0,
134 .dev = { 145 .dev = {
135 .platform_data = PHY_ID, 146 .platform_data = &sh_eth_plat,
136 }, 147 },
137 .num_resources = ARRAY_SIZE(sh_eth0_resources), 148 .num_resources = ARRAY_SIZE(sh_eth0_resources),
138 .resource = sh_eth0_resources, 149 .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
141static struct resource sh_eth1_resources[] = { 152static struct resource sh_eth1_resources[] = {
142 [0] = { 153 [0] = {
143 .start = SH_ETH1_BASE, 154 .start = SH_ETH1_BASE,
144 .end = SH_ETH1_BASE + 0x1B8, 155 .end = SH_ETH1_BASE + 0x1B8 - 1,
145 .flags = IORESOURCE_MEM, 156 .flags = IORESOURCE_MEM,
146 }, 157 },
147 [1] = { 158 [1] = {
159 .start = SH_TSU_BASE,
160 .end = SH_TSU_BASE + 0x200 - 1,
161 .flags = IORESOURCE_MEM,
162 },
163 [2] = {
148 .start = SH_ETH1_IRQ, 164 .start = SH_ETH1_IRQ,
149 .end = SH_ETH1_IRQ, 165 .end = SH_ETH1_IRQ,
150 .flags = IORESOURCE_IRQ, 166 .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
155 .name = "sh771x-ether", 171 .name = "sh771x-ether",
156 .id = 1, 172 .id = 1,
157 .dev = { 173 .dev = {
158 .platform_data = PHY_ID, 174 .platform_data = &sh_eth_plat,
159 }, 175 },
160 .num_resources = ARRAY_SIZE(sh_eth1_resources), 176 .num_resources = ARRAY_SIZE(sh_eth1_resources),
161 .resource = sh_eth1_resources, 177 .resource = sh_eth1_resources,
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h
index 4246ef9b07a3..aa83fe1ff0b1 100644
--- a/arch/sh/include/mach-se/mach/se.h
+++ b/arch/sh/include/mach-se/mach/se.h
@@ -100,6 +100,7 @@
100/* Base address */ 100/* Base address */
101#define SH_ETH0_BASE 0xA7000000 101#define SH_ETH0_BASE 0xA7000000
102#define SH_ETH1_BASE 0xA7000400 102#define SH_ETH1_BASE 0xA7000400
103#define SH_TSU_BASE 0xA7000800
103/* PHY ID */ 104/* PHY ID */
104#if defined(CONFIG_CPU_SUBTYPE_SH7710) 105#if defined(CONFIG_CPU_SUBTYPE_SH7710)
105# define PHY_ID 0x00 106# define PHY_ID 0x00
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 5f25b39f04d4..c4ac6043ebb0 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -298,7 +298,6 @@ void abort(void)
298 /* if that doesn't kill us, halt */ 298 /* if that doesn't kill us, halt */
299 panic("Oops failed to kill thread"); 299 panic("Oops failed to kill thread");
300} 300}
301EXPORT_SYMBOL(abort);
302 301
303void __init trap_init(void) 302void __init trap_init(void)
304{ 303{
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d4fc98c50378..20da391b5f32 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -55,7 +55,6 @@ config X86
55 select ARCH_HAS_GCOV_PROFILE_ALL 55 select ARCH_HAS_GCOV_PROFILE_ALL
56 select ARCH_HAS_KCOV if X86_64 56 select ARCH_HAS_KCOV if X86_64
57 select ARCH_HAS_PMEM_API if X86_64 57 select ARCH_HAS_PMEM_API if X86_64
58 # Causing hangs/crashes, see the commit that added this change for details.
59 select ARCH_HAS_REFCOUNT 58 select ARCH_HAS_REFCOUNT
60 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 59 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
61 select ARCH_HAS_SET_MEMORY 60 select ARCH_HAS_SET_MEMORY
@@ -89,6 +88,7 @@ config X86
89 select GENERIC_CLOCKEVENTS_MIN_ADJUST 88 select GENERIC_CLOCKEVENTS_MIN_ADJUST
90 select GENERIC_CMOS_UPDATE 89 select GENERIC_CMOS_UPDATE
91 select GENERIC_CPU_AUTOPROBE 90 select GENERIC_CPU_AUTOPROBE
91 select GENERIC_CPU_VULNERABILITIES
92 select GENERIC_EARLY_IOREMAP 92 select GENERIC_EARLY_IOREMAP
93 select GENERIC_FIND_FIRST_BIT 93 select GENERIC_FIND_FIRST_BIT
94 select GENERIC_IOMAP 94 select GENERIC_IOMAP
@@ -429,6 +429,19 @@ config GOLDFISH
429 def_bool y 429 def_bool y
430 depends on X86_GOLDFISH 430 depends on X86_GOLDFISH
431 431
432config RETPOLINE
433 bool "Avoid speculative indirect branches in kernel"
434 default y
435 help
436 Compile kernel with the retpoline compiler options to guard against
437 kernel-to-user data leaks by avoiding speculative indirect
438 branches. Requires a compiler with -mindirect-branch=thunk-extern
439 support for full protection. The kernel may run slower.
440
441 Without compiler support, at least indirect branches in assembler
442 code are eliminated. Since this includes the syscall entry path,
443 it is not entirely pointless.
444
432config INTEL_RDT 445config INTEL_RDT
433 bool "Intel Resource Director Technology support" 446 bool "Intel Resource Director Technology support"
434 default n 447 default n
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3e73bc255e4e..fad55160dcb9 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare
230# 230#
231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 231KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
232 232
233# Avoid indirect branches in kernel to deal with Spectre
234ifdef CONFIG_RETPOLINE
235 RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
236 ifneq ($(RETPOLINE_CFLAGS),)
237 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
238 endif
239endif
240
233archscripts: scripts_basic 241archscripts: scripts_basic
234 $(Q)$(MAKE) $(build)=arch/x86/tools relocs 242 $(Q)$(MAKE) $(build)=arch/x86/tools relocs
235 243
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 16627fec80b2..3d09e3aca18d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33#include <asm/inst.h> 33#include <asm/inst.h>
34#include <asm/frame.h> 34#include <asm/frame.h>
35#include <asm/nospec-branch.h>
35 36
36/* 37/*
37 * The following macros are used to move an (un)aligned 16 byte value to/from 38 * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
2884 pxor INC, STATE4 2885 pxor INC, STATE4
2885 movdqu IV, 0x30(OUTP) 2886 movdqu IV, 0x30(OUTP)
2886 2887
2887 call *%r11 2888 CALL_NOSPEC %r11
2888 2889
2889 movdqu 0x00(OUTP), INC 2890 movdqu 0x00(OUTP), INC
2890 pxor INC, STATE1 2891 pxor INC, STATE1
@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
2929 _aesni_gf128mul_x_ble() 2930 _aesni_gf128mul_x_ble()
2930 movups IV, (IVP) 2931 movups IV, (IVP)
2931 2932
2932 call *%r11 2933 CALL_NOSPEC %r11
2933 2934
2934 movdqu 0x40(OUTP), INC 2935 movdqu 0x40(OUTP), INC
2935 pxor INC, STATE1 2936 pxor INC, STATE1
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e2863c..a14af6eb09cb 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/frame.h> 19#include <asm/frame.h>
20#include <asm/nospec-branch.h>
20 21
21#define CAMELLIA_TABLE_BYTE_LEN 272 22#define CAMELLIA_TABLE_BYTE_LEN 272
22 23
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
1227 vpxor 14 * 16(%rax), %xmm15, %xmm14; 1228 vpxor 14 * 16(%rax), %xmm15, %xmm14;
1228 vpxor 15 * 16(%rax), %xmm15, %xmm15; 1229 vpxor 15 * 16(%rax), %xmm15, %xmm15;
1229 1230
1230 call *%r9; 1231 CALL_NOSPEC %r9;
1231 1232
1232 addq $(16 * 16), %rsp; 1233 addq $(16 * 16), %rsp;
1233 1234
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index eee5b3982cfd..b66bbfa62f50 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/frame.h> 14#include <asm/frame.h>
15#include <asm/nospec-branch.h>
15 16
16#define CAMELLIA_TABLE_BYTE_LEN 272 17#define CAMELLIA_TABLE_BYTE_LEN 272
17 18
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
1343 vpxor 14 * 32(%rax), %ymm15, %ymm14; 1344 vpxor 14 * 32(%rax), %ymm15, %ymm14;
1344 vpxor 15 * 32(%rax), %ymm15, %ymm15; 1345 vpxor 15 * 32(%rax), %ymm15, %ymm15;
1345 1346
1346 call *%r9; 1347 CALL_NOSPEC %r9;
1347 1348
1348 addq $(16 * 32), %rsp; 1349 addq $(16 * 32), %rsp;
1349 1350
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 7a7de27c6f41..d9b734d0c8cc 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
45 45
46#include <asm/inst.h> 46#include <asm/inst.h>
47#include <linux/linkage.h> 47#include <linux/linkage.h>
48#include <asm/nospec-branch.h>
48 49
49## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction 50## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
50 51
@@ -172,7 +173,7 @@ continue_block:
172 movzxw (bufp, %rax, 2), len 173 movzxw (bufp, %rax, 2), len
173 lea crc_array(%rip), bufp 174 lea crc_array(%rip), bufp
174 lea (bufp, len, 1), bufp 175 lea (bufp, len, 1), bufp
175 jmp *bufp 176 JMP_NOSPEC bufp
176 177
177 ################################################################ 178 ################################################################
178 ## 2a) PROCESS FULL BLOCKS: 179 ## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 45a63e00a6af..3f48f695d5e6 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with
198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two 198 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
199 * halves: 199 * halves:
200 */ 200 */
201#define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT) 201#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
202#define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT)) 202#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
203#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
204#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
205#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
203 206
204.macro SET_NOFLUSH_BIT reg:req 207.macro SET_NOFLUSH_BIT reg:req
205 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg 208 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
@@ -208,7 +211,7 @@ For 32-bit we have the following conventions - kernel is built with
208.macro ADJUST_KERNEL_CR3 reg:req 211.macro ADJUST_KERNEL_CR3 reg:req
209 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID 212 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
210 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ 213 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
211 andq $(~PTI_SWITCH_MASK), \reg 214 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
212.endm 215.endm
213 216
214.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 217.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
@@ -239,15 +242,19 @@ For 32-bit we have the following conventions - kernel is built with
239 /* Flush needed, clear the bit */ 242 /* Flush needed, clear the bit */
240 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask 243 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
241 movq \scratch_reg2, \scratch_reg 244 movq \scratch_reg2, \scratch_reg
242 jmp .Lwrcr3_\@ 245 jmp .Lwrcr3_pcid_\@
243 246
244.Lnoflush_\@: 247.Lnoflush_\@:
245 movq \scratch_reg2, \scratch_reg 248 movq \scratch_reg2, \scratch_reg
246 SET_NOFLUSH_BIT \scratch_reg 249 SET_NOFLUSH_BIT \scratch_reg
247 250
251.Lwrcr3_pcid_\@:
252 /* Flip the ASID to the user version */
253 orq $(PTI_USER_PCID_MASK), \scratch_reg
254
248.Lwrcr3_\@: 255.Lwrcr3_\@:
249 /* Flip the PGD and ASID to the user version */ 256 /* Flip the PGD to the user version */
250 orq $(PTI_SWITCH_MASK), \scratch_reg 257 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
251 mov \scratch_reg, %cr3 258 mov \scratch_reg, %cr3
252.Lend_\@: 259.Lend_\@:
253.endm 260.endm
@@ -263,17 +270,12 @@ For 32-bit we have the following conventions - kernel is built with
263 movq %cr3, \scratch_reg 270 movq %cr3, \scratch_reg
264 movq \scratch_reg, \save_reg 271 movq \scratch_reg, \save_reg
265 /* 272 /*
266 * Is the "switch mask" all zero? That means that both of 273 * Test the user pagetable bit. If set, then the user page tables
267 * these are zero: 274 * are active. If clear CR3 already has the kernel page table
268 * 275 * active.
269 * 1. The user/kernel PCID bit, and
270 * 2. The user/kernel "bit" that points CR3 to the
271 * bottom half of the 8k PGD
272 *
273 * That indicates a kernel CR3 value, not a user CR3.
274 */ 276 */
275 testq $(PTI_SWITCH_MASK), \scratch_reg 277 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
276 jz .Ldone_\@ 278 jnc .Ldone_\@
277 279
278 ADJUST_KERNEL_CR3 \scratch_reg 280 ADJUST_KERNEL_CR3 \scratch_reg
279 movq \scratch_reg, %cr3 281 movq \scratch_reg, %cr3
@@ -290,7 +292,7 @@ For 32-bit we have the following conventions - kernel is built with
290 * KERNEL pages can always resume with NOFLUSH as we do 292 * KERNEL pages can always resume with NOFLUSH as we do
291 * explicit flushes. 293 * explicit flushes.
292 */ 294 */
293 bt $X86_CR3_PTI_SWITCH_BIT, \save_reg 295 bt $PTI_USER_PGTABLE_BIT, \save_reg
294 jnc .Lnoflush_\@ 296 jnc .Lnoflush_\@
295 297
296 /* 298 /*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f321a5a1..a1f28a54f23a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
44#include <asm/asm.h> 44#include <asm/asm.h>
45#include <asm/smap.h> 45#include <asm/smap.h>
46#include <asm/frame.h> 46#include <asm/frame.h>
47#include <asm/nospec-branch.h>
47 48
48 .section .entry.text, "ax" 49 .section .entry.text, "ax"
49 50
@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
290 291
291 /* kernel thread */ 292 /* kernel thread */
2921: movl %edi, %eax 2931: movl %edi, %eax
293 call *%ebx 294 CALL_NOSPEC %ebx
294 /* 295 /*
295 * A kernel thread is allowed to return here after successfully 296 * A kernel thread is allowed to return here after successfully
296 * calling do_execve(). Exit to userspace to complete the execve() 297 * calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +920,7 @@ common_exception:
919 movl %ecx, %es 920 movl %ecx, %es
920 TRACE_IRQS_OFF 921 TRACE_IRQS_OFF
921 movl %esp, %eax # pt_regs pointer 922 movl %esp, %eax # pt_regs pointer
922 call *%edi 923 CALL_NOSPEC %edi
923 jmp ret_from_exception 924 jmp ret_from_exception
924END(common_exception) 925END(common_exception)
925 926
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e384ff54..4f8e1d35a97c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
37#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
38#include <asm/export.h> 38#include <asm/export.h>
39#include <asm/frame.h> 39#include <asm/frame.h>
40#include <asm/nospec-branch.h>
40#include <linux/err.h> 41#include <linux/err.h>
41 42
42#include "calling.h" 43#include "calling.h"
@@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
191 */ 192 */
192 pushq %rdi 193 pushq %rdi
193 movq $entry_SYSCALL_64_stage2, %rdi 194 movq $entry_SYSCALL_64_stage2, %rdi
194 jmp *%rdi 195 JMP_NOSPEC %rdi
195END(entry_SYSCALL_64_trampoline) 196END(entry_SYSCALL_64_trampoline)
196 197
197 .popsection 198 .popsection
@@ -270,7 +271,12 @@ entry_SYSCALL_64_fastpath:
270 * It might end up jumping to the slow path. If it jumps, RAX 271 * It might end up jumping to the slow path. If it jumps, RAX
271 * and all argument registers are clobbered. 272 * and all argument registers are clobbered.
272 */ 273 */
274#ifdef CONFIG_RETPOLINE
275 movq sys_call_table(, %rax, 8), %rax
276 call __x86_indirect_thunk_rax
277#else
273 call *sys_call_table(, %rax, 8) 278 call *sys_call_table(, %rax, 8)
279#endif
274.Lentry_SYSCALL_64_after_fastpath_call: 280.Lentry_SYSCALL_64_after_fastpath_call:
275 281
276 movq %rax, RAX(%rsp) 282 movq %rax, RAX(%rsp)
@@ -442,7 +448,7 @@ ENTRY(stub_ptregs_64)
442 jmp entry_SYSCALL64_slow_path 448 jmp entry_SYSCALL64_slow_path
443 449
4441: 4501:
445 jmp *%rax /* Called from C */ 451 JMP_NOSPEC %rax /* Called from C */
446END(stub_ptregs_64) 452END(stub_ptregs_64)
447 453
448.macro ptregs_stub func 454.macro ptregs_stub func
@@ -521,7 +527,7 @@ ENTRY(ret_from_fork)
5211: 5271:
522 /* kernel thread */ 528 /* kernel thread */
523 movq %r12, %rdi 529 movq %r12, %rdi
524 call *%rbx 530 CALL_NOSPEC %rbx
525 /* 531 /*
526 * A kernel thread is allowed to return here after successfully 532 * A kernel thread is allowed to return here after successfully
527 * calling do_execve(). Exit to userspace to complete the execve() 533 * calling do_execve(). Exit to userspace to complete the execve()
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 40f17009ec20..98d5358e4041 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -190,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
190 /* Interrupts are off on entry. */ 190 /* Interrupts are off on entry. */
191 swapgs 191 swapgs
192 192
193 /* Stash user ESP and switch to the kernel stack. */ 193 /* Stash user ESP */
194 movl %esp, %r8d 194 movl %esp, %r8d
195
196 /* Use %rsp as scratch reg. User ESP is stashed in r8 */
197 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
198
199 /* Switch to the kernel stack */
195 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 200 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
196 201
197 /* Construct struct pt_regs on stack */ 202 /* Construct struct pt_regs on stack */
@@ -220,12 +225,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
220 pushq $0 /* pt_regs->r15 = 0 */ 225 pushq $0 /* pt_regs->r15 = 0 */
221 226
222 /* 227 /*
223 * We just saved %rdi so it is safe to clobber. It is not
224 * preserved during the C calls inside TRACE_IRQS_OFF anyway.
225 */
226 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
227
228 /*
229 * User mode is traced as though IRQs are on, and SYSENTER 228 * User mode is traced as though IRQs are on, and SYSENTER
230 * turned them off. 229 * turned them off.
231 */ 230 */
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 141e07b06216..24ffa1e88cf9 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -582,6 +582,24 @@ static __init int bts_init(void)
582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
583 return -ENODEV; 583 return -ENODEV;
584 584
585 if (boot_cpu_has(X86_FEATURE_PTI)) {
586 /*
587 * BTS hardware writes through a virtual memory map we must
588 * either use the kernel physical map, or the user mapping of
589 * the AUX buffer.
590 *
591 * However, since this driver supports per-CPU and per-task inherit
592 * we cannot use the user mapping since it will not be availble
593 * if we're not running the owning process.
594 *
595 * With PTI we can't use the kernal map either, because its not
596 * there when we run userspace.
597 *
598 * For now, disable this driver when using PTI.
599 */
600 return -ENODEV;
601 }
602
585 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | 603 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
586 PERF_PMU_CAP_EXCLUSIVE; 604 PERF_PMU_CAP_EXCLUSIVE;
587 bts_pmu.task_ctx_nr = perf_sw_context; 605 bts_pmu.task_ctx_nr = perf_sw_context;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8f0aace08b87..8156e47da7ba 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -5,6 +5,7 @@
5 5
6#include <asm/cpu_entry_area.h> 6#include <asm/cpu_entry_area.h>
7#include <asm/perf_event.h> 7#include <asm/perf_event.h>
8#include <asm/tlbflush.h>
8#include <asm/insn.h> 9#include <asm/insn.h>
9 10
10#include "../perf_event.h" 11#include "../perf_event.h"
@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
283 284
284static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) 285static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
285{ 286{
287 unsigned long start = (unsigned long)cea;
286 phys_addr_t pa; 288 phys_addr_t pa;
287 size_t msz = 0; 289 size_t msz = 0;
288 290
289 pa = virt_to_phys(addr); 291 pa = virt_to_phys(addr);
292
293 preempt_disable();
290 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) 294 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
291 cea_set_pte(cea, pa, prot); 295 cea_set_pte(cea, pa, prot);
296
297 /*
298 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
299 * all TLB entries for it.
300 */
301 flush_tlb_kernel_range(start, start + size);
302 preempt_enable();
292} 303}
293 304
294static void ds_clear_cea(void *cea, size_t size) 305static void ds_clear_cea(void *cea, size_t size)
295{ 306{
307 unsigned long start = (unsigned long)cea;
296 size_t msz = 0; 308 size_t msz = 0;
297 309
310 preempt_disable();
298 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) 311 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
299 cea_set_pte(cea, 0, PAGE_NONE); 312 cea_set_pte(cea, 0, PAGE_NONE);
313
314 flush_tlb_kernel_range(start, start + size);
315 preempt_enable();
300} 316}
301 317
302static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) 318static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d0ec9df1cbe..44f5d79d5105 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,7 +49,7 @@ extern int acpi_fix_pin2_polarity;
49extern int acpi_disable_cmcff; 49extern int acpi_disable_cmcff;
50 50
51extern u8 acpi_sci_flags; 51extern u8 acpi_sci_flags;
52extern int acpi_sci_override_gsi; 52extern u32 acpi_sci_override_gsi;
53void acpi_pic_sci_set_trigger(unsigned int, u16); 53void acpi_pic_sci_set_trigger(unsigned int, u16);
54 54
55struct device; 55struct device;
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index dbfd0854651f..cf5961ca8677 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
140 ".popsection\n" \ 140 ".popsection\n" \
141 ".pushsection .altinstr_replacement, \"ax\"\n" \ 141 ".pushsection .altinstr_replacement, \"ax\"\n" \
142 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ 142 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
143 ".popsection" 143 ".popsection\n"
144 144
145#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ 145#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
146 OLDINSTR_2(oldinstr, 1, 2) \ 146 OLDINSTR_2(oldinstr, 1, 2) \
@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
151 ".pushsection .altinstr_replacement, \"ax\"\n" \ 151 ".pushsection .altinstr_replacement, \"ax\"\n" \
152 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ 152 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
153 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ 153 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
154 ".popsection" 154 ".popsection\n"
155 155
156/* 156/*
157 * Alternative instructions for different CPU types or capabilities. 157 * Alternative instructions for different CPU types or capabilities.
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index ff700d81e91e..0927cdc4f946 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,32 @@
11#include <asm/pgtable.h> 11#include <asm/pgtable.h>
12#include <asm/special_insns.h> 12#include <asm/special_insns.h>
13#include <asm/preempt.h> 13#include <asm/preempt.h>
14#include <asm/asm.h>
14 15
15#ifndef CONFIG_X86_CMPXCHG64 16#ifndef CONFIG_X86_CMPXCHG64
16extern void cmpxchg8b_emu(void); 17extern void cmpxchg8b_emu(void);
17#endif 18#endif
19
20#ifdef CONFIG_RETPOLINE
21#ifdef CONFIG_X86_32
22#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
23#else
24#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
25INDIRECT_THUNK(8)
26INDIRECT_THUNK(9)
27INDIRECT_THUNK(10)
28INDIRECT_THUNK(11)
29INDIRECT_THUNK(12)
30INDIRECT_THUNK(13)
31INDIRECT_THUNK(14)
32INDIRECT_THUNK(15)
33#endif
34INDIRECT_THUNK(ax)
35INDIRECT_THUNK(bx)
36INDIRECT_THUNK(cx)
37INDIRECT_THUNK(dx)
38INDIRECT_THUNK(si)
39INDIRECT_THUNK(di)
40INDIRECT_THUNK(bp)
41INDIRECT_THUNK(sp)
42#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 07cdd1715705..f275447862f4 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -203,6 +203,8 @@
203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
205#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ 205#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
206#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
207#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
206#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 208#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
207#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 209#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
208#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ 210#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
@@ -341,6 +343,8 @@
341#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 343#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
342#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 344#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
343#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 345#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
344#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */ 346#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
347#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
348#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
345 349
346#endif /* _ASM_X86_CPUFEATURES_H */ 350#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5400add2885b..8bf450b13d9f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
7#include <linux/nmi.h> 7#include <linux/nmi.h>
8#include <asm/io.h> 8#include <asm/io.h>
9#include <asm/hyperv.h> 9#include <asm/hyperv.h>
10#include <asm/nospec-branch.h>
10 11
11/* 12/*
12 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent 13 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
186 return U64_MAX; 187 return U64_MAX;
187 188
188 __asm__ __volatile__("mov %4, %%r8\n" 189 __asm__ __volatile__("mov %4, %%r8\n"
189 "call *%5" 190 CALL_NOSPEC
190 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 191 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
191 "+c" (control), "+d" (input_address) 192 "+c" (control), "+d" (input_address)
192 : "r" (output_address), "m" (hv_hypercall_pg) 193 : "r" (output_address),
194 THUNK_TARGET(hv_hypercall_pg)
193 : "cc", "memory", "r8", "r9", "r10", "r11"); 195 : "cc", "memory", "r8", "r9", "r10", "r11");
194#else 196#else
195 u32 input_address_hi = upper_32_bits(input_address); 197 u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
200 if (!hv_hypercall_pg) 202 if (!hv_hypercall_pg)
201 return U64_MAX; 203 return U64_MAX;
202 204
203 __asm__ __volatile__("call *%7" 205 __asm__ __volatile__(CALL_NOSPEC
204 : "=A" (hv_status), 206 : "=A" (hv_status),
205 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 207 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
206 : "A" (control), 208 : "A" (control),
207 "b" (input_address_hi), 209 "b" (input_address_hi),
208 "D"(output_address_hi), "S"(output_address_lo), 210 "D"(output_address_hi), "S"(output_address_lo),
209 "m" (hv_hypercall_pg) 211 THUNK_TARGET(hv_hypercall_pg)
210 : "cc", "memory"); 212 : "cc", "memory");
211#endif /* !x86_64 */ 213#endif /* !x86_64 */
212 return hv_status; 214 return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
227 229
228#ifdef CONFIG_X86_64 230#ifdef CONFIG_X86_64
229 { 231 {
230 __asm__ __volatile__("call *%4" 232 __asm__ __volatile__(CALL_NOSPEC
231 : "=a" (hv_status), ASM_CALL_CONSTRAINT, 233 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
232 "+c" (control), "+d" (input1) 234 "+c" (control), "+d" (input1)
233 : "m" (hv_hypercall_pg) 235 : THUNK_TARGET(hv_hypercall_pg)
234 : "cc", "r8", "r9", "r10", "r11"); 236 : "cc", "r8", "r9", "r10", "r11");
235 } 237 }
236#else 238#else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
238 u32 input1_hi = upper_32_bits(input1); 240 u32 input1_hi = upper_32_bits(input1);
239 u32 input1_lo = lower_32_bits(input1); 241 u32 input1_lo = lower_32_bits(input1);
240 242
241 __asm__ __volatile__ ("call *%5" 243 __asm__ __volatile__ (CALL_NOSPEC
242 : "=A"(hv_status), 244 : "=A"(hv_status),
243 "+c"(input1_lo), 245 "+c"(input1_lo),
244 ASM_CALL_CONSTRAINT 246 ASM_CALL_CONSTRAINT
245 : "A" (control), 247 : "A" (control),
246 "b" (input1_hi), 248 "b" (input1_hi),
247 "m" (hv_hypercall_pg) 249 THUNK_TARGET(hv_hypercall_pg)
248 : "cc", "edi", "esi"); 250 : "cc", "edi", "esi");
249 } 251 }
250#endif 252#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 34c4922bbc3f..e7b983a35506 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -355,6 +355,9 @@
355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL 355#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
356#define FAM10H_MMIO_CONF_BASE_SHIFT 20 356#define FAM10H_MMIO_CONF_BASE_SHIFT 20
357#define MSR_FAM10H_NODE_ID 0xc001100c 357#define MSR_FAM10H_NODE_ID 0xc001100c
358#define MSR_F10H_DECFG 0xc0011029
359#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
360#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
358 361
359/* K8 MSRs */ 362/* K8 MSRs */
360#define MSR_K8_TOP_MEM1 0xc001001a 363#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..402a11c803c3
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,214 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __NOSPEC_BRANCH_H__
4#define __NOSPEC_BRANCH_H__
5
6#include <asm/alternative.h>
7#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h>
9
10/*
11 * Fill the CPU return stack buffer.
12 *
13 * Each entry in the RSB, if used for a speculative 'ret', contains an
14 * infinite 'pause; jmp' loop to capture speculative execution.
15 *
16 * This is required in various cases for retpoline and IBRS-based
17 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
18 * eliminate potentially bogus entries from the RSB, and sometimes
19 * purely to ensure that it doesn't get empty, which on some CPUs would
20 * allow predictions from other (unwanted!) sources to be used.
21 *
22 * We define a CPP macro such that it can be used from both .S files and
23 * inline assembly. It's possible to do a .macro and then include that
24 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
25 */
26
27#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
28#define RSB_FILL_LOOPS 16 /* To avoid underflow */
29
30/*
31 * Google experimented with loop-unrolling and this turned out to be
32 * the optimal version — two calls, each with their own speculation
33 * trap should their return address end up getting used, in a loop.
34 */
35#define __FILL_RETURN_BUFFER(reg, nr, sp) \
36 mov $(nr/2), reg; \
37771: \
38 call 772f; \
39773: /* speculation trap */ \
40 pause; \
41 jmp 773b; \
42772: \
43 call 774f; \
44775: /* speculation trap */ \
45 pause; \
46 jmp 775b; \
47774: \
48 dec reg; \
49 jnz 771b; \
50 add $(BITS_PER_LONG/8) * nr, sp;
51
52#ifdef __ASSEMBLY__
53
54/*
55 * This should be used immediately before a retpoline alternative. It tells
56 * objtool where the retpolines are so that it can make sense of the control
57 * flow by just reading the original instruction(s) and ignoring the
58 * alternatives.
59 */
60.macro ANNOTATE_NOSPEC_ALTERNATIVE
61 .Lannotate_\@:
62 .pushsection .discard.nospec
63 .long .Lannotate_\@ - .
64 .popsection
65.endm
66
67/*
68 * These are the bare retpoline primitives for indirect jmp and call.
69 * Do not use these directly; they only exist to make the ALTERNATIVE
70 * invocation below less ugly.
71 */
72.macro RETPOLINE_JMP reg:req
73 call .Ldo_rop_\@
74.Lspec_trap_\@:
75 pause
76 jmp .Lspec_trap_\@
77.Ldo_rop_\@:
78 mov \reg, (%_ASM_SP)
79 ret
80.endm
81
82/*
83 * This is a wrapper around RETPOLINE_JMP so the called function in reg
84 * returns to the instruction after the macro.
85 */
86.macro RETPOLINE_CALL reg:req
87 jmp .Ldo_call_\@
88.Ldo_retpoline_jmp_\@:
89 RETPOLINE_JMP \reg
90.Ldo_call_\@:
91 call .Ldo_retpoline_jmp_\@
92.endm
93
94/*
95 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
96 * indirect jmp/call which may be susceptible to the Spectre variant 2
97 * attack.
98 */
99.macro JMP_NOSPEC reg:req
100#ifdef CONFIG_RETPOLINE
101 ANNOTATE_NOSPEC_ALTERNATIVE
102 ALTERNATIVE_2 __stringify(jmp *\reg), \
103 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
104 __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
105#else
106 jmp *\reg
107#endif
108.endm
109
110.macro CALL_NOSPEC reg:req
111#ifdef CONFIG_RETPOLINE
112 ANNOTATE_NOSPEC_ALTERNATIVE
113 ALTERNATIVE_2 __stringify(call *\reg), \
114 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
115 __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
116#else
117 call *\reg
118#endif
119.endm
120
121 /*
122 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
123 * monstrosity above, manually.
124 */
125.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
126#ifdef CONFIG_RETPOLINE
127 ANNOTATE_NOSPEC_ALTERNATIVE
128 ALTERNATIVE "jmp .Lskip_rsb_\@", \
129 __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
130 \ftr
131.Lskip_rsb_\@:
132#endif
133.endm
134
135#else /* __ASSEMBLY__ */
136
137#define ANNOTATE_NOSPEC_ALTERNATIVE \
138 "999:\n\t" \
139 ".pushsection .discard.nospec\n\t" \
140 ".long 999b - .\n\t" \
141 ".popsection\n\t"
142
143#if defined(CONFIG_X86_64) && defined(RETPOLINE)
144
145/*
146 * Since the inline asm uses the %V modifier which is only in newer GCC,
147 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
148 */
149# define CALL_NOSPEC \
150 ANNOTATE_NOSPEC_ALTERNATIVE \
151 ALTERNATIVE( \
152 "call *%[thunk_target]\n", \
153 "call __x86_indirect_thunk_%V[thunk_target]\n", \
154 X86_FEATURE_RETPOLINE)
155# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
156
157#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
158/*
159 * For i386 we use the original ret-equivalent retpoline, because
160 * otherwise we'll run out of registers. We don't care about CET
161 * here, anyway.
162 */
163# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
164 " jmp 904f;\n" \
165 " .align 16\n" \
166 "901: call 903f;\n" \
167 "902: pause;\n" \
168 " jmp 902b;\n" \
169 " .align 16\n" \
170 "903: addl $4, %%esp;\n" \
171 " pushl %[thunk_target];\n" \
172 " ret;\n" \
173 " .align 16\n" \
174 "904: call 901b;\n", \
175 X86_FEATURE_RETPOLINE)
176
177# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
178#else /* No retpoline for C / inline asm */
179# define CALL_NOSPEC "call *%[thunk_target]\n"
180# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
181#endif
182
183/* The Spectre V2 mitigation variants */
184enum spectre_v2_mitigation {
185 SPECTRE_V2_NONE,
186 SPECTRE_V2_RETPOLINE_MINIMAL,
187 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
188 SPECTRE_V2_RETPOLINE_GENERIC,
189 SPECTRE_V2_RETPOLINE_AMD,
190 SPECTRE_V2_IBRS,
191};
192
193/*
194 * On VMEXIT we must ensure that no RSB predictions learned in the guest
195 * can be followed in the host, by overwriting the RSB completely. Both
196 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
197 * CPUs with IBRS_ATT *might* it be avoided.
198 */
199static inline void vmexit_fill_RSB(void)
200{
201#ifdef CONFIG_RETPOLINE
202 unsigned long loops = RSB_CLEAR_LOOPS / 2;
203
204 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
205 ALTERNATIVE("jmp 910f",
206 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
207 X86_FEATURE_RETPOLINE)
208 "910:"
209 : "=&r" (loops), ASM_CALL_CONSTRAINT
210 : "r" (loops) : "memory" );
211#endif
212}
213#endif /* __ASSEMBLY__ */
214#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 7a5d6695abd3..eb66fa9cd0fc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -38,6 +38,7 @@ do { \
38#define PCI_NOASSIGN_ROMS 0x80000 38#define PCI_NOASSIGN_ROMS 0x80000
39#define PCI_ROOT_NO_CRS 0x100000 39#define PCI_ROOT_NO_CRS 0x100000
40#define PCI_NOASSIGN_BARS 0x200000 40#define PCI_NOASSIGN_BARS 0x200000
41#define PCI_BIG_ROOT_WINDOW 0x400000
41 42
42extern unsigned int pci_probe; 43extern unsigned int pci_probe;
43extern unsigned long pirq_table_addr; 44extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index b97a539bcdee..6b8f73dcbc2c 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 75#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
76#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 76#define PGDIR_MASK (~(PGDIR_SIZE - 1))
77 77
78/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 78/*
79 * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
80 *
81 * Be very careful vs. KASLR when changing anything here. The KASLR address
82 * range must not overlap with anything except the KASAN shadow area, which
83 * is correct as KASAN disables KASLR.
84 */
79#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 85#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
80 86
81#ifdef CONFIG_X86_5LEVEL 87#ifdef CONFIG_X86_5LEVEL
@@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
88# define VMALLOC_SIZE_TB _AC(32, UL) 94# define VMALLOC_SIZE_TB _AC(32, UL)
89# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 95# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
90# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 96# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
91# define LDT_PGD_ENTRY _AC(-4, UL) 97# define LDT_PGD_ENTRY _AC(-3, UL)
92# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 98# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
93#endif 99#endif
94 100
@@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
104 110
105#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 111#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
106/* The module sections ends with the start of the fixmap */ 112/* The module sections ends with the start of the fixmap */
107#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 113#define MODULES_END _AC(0xffffffffff000000, UL)
108#define MODULES_LEN (MODULES_END - MODULES_VADDR) 114#define MODULES_LEN (MODULES_END - MODULES_VADDR)
109 115
110#define ESPFIX_PGD_ENTRY _AC(-2, UL) 116#define ESPFIX_PGD_ENTRY _AC(-2, UL)
111#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 117#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
112 118
113#define CPU_ENTRY_AREA_PGD _AC(-3, UL) 119#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
114#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) 120#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
115 121
116#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 122#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 6a60fea90b9d..625a52a5594f 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -40,7 +40,7 @@
40#define CR3_NOFLUSH BIT_ULL(63) 40#define CR3_NOFLUSH BIT_ULL(63)
41 41
42#ifdef CONFIG_PAGE_TABLE_ISOLATION 42#ifdef CONFIG_PAGE_TABLE_ISOLATION
43# define X86_CR3_PTI_SWITCH_BIT 11 43# define X86_CR3_PTI_PCID_USER_BIT 11
44#endif 44#endif
45 45
46#else 46#else
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4a08dd2ab32a..d33e4a26dc7e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,13 +81,13 @@ static inline u16 kern_pcid(u16 asid)
81 * Make sure that the dynamic ASID space does not confict with the 81 * Make sure that the dynamic ASID space does not confict with the
82 * bit we are using to switch between user and kernel ASIDs. 82 * bit we are using to switch between user and kernel ASIDs.
83 */ 83 */
84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); 84 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
85 85
86 /* 86 /*
87 * The ASID being passed in here should have respected the 87 * The ASID being passed in here should have respected the
88 * MAX_ASID_AVAILABLE and thus never have the switch bit set. 88 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89 */ 89 */
90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); 90 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
91#endif 91#endif
92 /* 92 /*
93 * The dynamically-assigned ASIDs that get passed in are small 93 * The dynamically-assigned ASIDs that get passed in are small
@@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid)
112{ 112{
113 u16 ret = kern_pcid(asid); 113 u16 ret = kern_pcid(asid);
114#ifdef CONFIG_PAGE_TABLE_ISOLATION 114#ifdef CONFIG_PAGE_TABLE_ISOLATION
115 ret |= 1 << X86_CR3_PTI_SWITCH_BIT; 115 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
116#endif 116#endif
117 return ret; 117 return ret;
118} 118}
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index c1688c2d0a12..1f86e1b0a5cd 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
56 56
57#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) 57#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
58/* 58/*
59 * WARNING: The entire pt_regs may not be safe to dereference. In some cases, 59 * If 'partial' returns true, only the iret frame registers are valid.
60 * only the iret frame registers are accessible. Use with caution!
61 */ 60 */
62static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) 61static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
62 bool *partial)
63{ 63{
64 if (unwind_done(state)) 64 if (unwind_done(state))
65 return NULL; 65 return NULL;
66 66
67 if (partial) {
68#ifdef CONFIG_UNWINDER_ORC
69 *partial = !state->full_regs;
70#else
71 *partial = false;
72#endif
73 }
74
67 return state->regs; 75 return state->regs;
68} 76}
69#else 77#else
70static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) 78static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
79 bool *partial)
71{ 80{
72 return NULL; 81 return NULL;
73} 82}
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7cb282e9e587..bfd882617613 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/smap.h> 46#include <asm/smap.h>
47#include <asm/nospec-branch.h>
47 48
48#include <xen/interface/xen.h> 49#include <xen/interface/xen.h>
49#include <xen/interface/sched.h> 50#include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
217 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 218 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
218 219
219 stac(); 220 stac();
220 asm volatile("call *%[call]" 221 asm volatile(CALL_NOSPEC
221 : __HYPERCALL_5PARAM 222 : __HYPERCALL_5PARAM
222 : [call] "a" (&hypercall_page[call]) 223 : [thunk_target] "a" (&hypercall_page[call])
223 : __HYPERCALL_CLOBBER5); 224 : __HYPERCALL_CLOBBER5);
224 clac(); 225 clac();
225 226
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f4c463df8b08..ec3a286163c3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -68,8 +68,9 @@ int acpi_ioapic;
68int acpi_strict; 68int acpi_strict;
69int acpi_disable_cmcff; 69int acpi_disable_cmcff;
70 70
71/* ACPI SCI override configuration */
71u8 acpi_sci_flags __initdata; 72u8 acpi_sci_flags __initdata;
72int acpi_sci_override_gsi __initdata; 73u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
73int acpi_skip_timer_override __initdata; 74int acpi_skip_timer_override __initdata;
74int acpi_use_timer_override __initdata; 75int acpi_use_timer_override __initdata;
75int acpi_fix_pin2_polarity __initdata; 76int acpi_fix_pin2_polarity __initdata;
@@ -112,8 +113,6 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
112 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 113 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
113}; 114};
114 115
115#define ACPI_INVALID_GSI INT_MIN
116
117/* 116/*
118 * This is just a simple wrapper around early_memremap(), 117 * This is just a simple wrapper around early_memremap(),
119 * with sanity checks for phys == 0 and size == 0. 118 * with sanity checks for phys == 0 and size == 0.
@@ -372,7 +371,7 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
372 * and acpi_isa_irq_to_gsi() may give wrong result. 371 * and acpi_isa_irq_to_gsi() may give wrong result.
373 */ 372 */
374 if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi) 373 if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
375 isa_irq_to_gsi[gsi] = ACPI_INVALID_GSI; 374 isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
376 isa_irq_to_gsi[bus_irq] = gsi; 375 isa_irq_to_gsi[bus_irq] = gsi;
377} 376}
378 377
@@ -620,24 +619,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
620 } 619 }
621 620
622 rc = acpi_get_override_irq(gsi, &trigger, &polarity); 621 rc = acpi_get_override_irq(gsi, &trigger, &polarity);
623 if (rc == 0) { 622 if (rc)
624 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 623 return rc;
625 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
626 irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
627 if (irq >= 0) {
628 *irqp = irq;
629 return 0;
630 }
631 }
632 624
633 return -1; 625 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
626 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
627 irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
628 if (irq < 0)
629 return irq;
630
631 *irqp = irq;
632 return 0;
634} 633}
635EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 634EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
636 635
637int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) 636int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
638{ 637{
639 if (isa_irq < nr_legacy_irqs() && 638 if (isa_irq < nr_legacy_irqs() &&
640 isa_irq_to_gsi[isa_irq] != ACPI_INVALID_GSI) { 639 isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
641 *gsi = isa_irq_to_gsi[isa_irq]; 640 *gsi = isa_irq_to_gsi[isa_irq];
642 return 0; 641 return 0;
643 } 642 }
@@ -676,8 +675,7 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
676 mutex_lock(&acpi_ioapic_lock); 675 mutex_lock(&acpi_ioapic_lock);
677 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); 676 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
678 /* Don't set up the ACPI SCI because it's already set up */ 677 /* Don't set up the ACPI SCI because it's already set up */
679 if (irq >= 0 && enable_update_mptable && 678 if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
680 acpi_gbl_FADT.sci_interrupt != gsi)
681 mp_config_acpi_gsi(dev, gsi, trigger, polarity); 679 mp_config_acpi_gsi(dev, gsi, trigger, polarity);
682 mutex_unlock(&acpi_ioapic_lock); 680 mutex_unlock(&acpi_ioapic_lock);
683#endif 681#endif
@@ -1211,8 +1209,9 @@ static int __init acpi_parse_madt_ioapic_entries(void)
1211 /* 1209 /*
1212 * If BIOS did not supply an INT_SRC_OVR for the SCI 1210 * If BIOS did not supply an INT_SRC_OVR for the SCI
1213 * pretend we got one so we can set the SCI flags. 1211 * pretend we got one so we can set the SCI flags.
1212 * But ignore setting up SCI on hardware reduced platforms.
1214 */ 1213 */
1215 if (!acpi_sci_override_gsi) 1214 if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
1216 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, 1215 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
1217 acpi_gbl_FADT.sci_interrupt); 1216 acpi_gbl_FADT.sci_interrupt);
1218 1217
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index dbaf14d69ebd..4817d743c263 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -344,9 +344,12 @@ done:
344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
345{ 345{
346 unsigned long flags; 346 unsigned long flags;
347 int i;
347 348
348 if (instr[0] != 0x90) 349 for (i = 0; i < a->padlen; i++) {
349 return; 350 if (instr[i] != 0x90)
351 return;
352 }
350 353
351 local_irq_save(flags); 354 local_irq_save(flags);
352 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 355 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bcb75dc97d44..ea831c858195 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -829,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
829 set_cpu_cap(c, X86_FEATURE_K8); 829 set_cpu_cap(c, X86_FEATURE_K8);
830 830
831 if (cpu_has(c, X86_FEATURE_XMM2)) { 831 if (cpu_has(c, X86_FEATURE_XMM2)) {
832 /* MFENCE stops RDTSC speculation */ 832 unsigned long long val;
833 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 833 int ret;
834
835 /*
836 * A serializing LFENCE has less overhead than MFENCE, so
837 * use it for execution serialization. On families which
838 * don't have that MSR, LFENCE is already serializing.
839 * msr_set_bit() uses the safe accessors, too, even if the MSR
840 * is not present.
841 */
842 msr_set_bit(MSR_F10H_DECFG,
843 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
844
845 /*
846 * Verify that the MSR write was successful (could be running
847 * under a hypervisor) and only then assume that LFENCE is
848 * serializing.
849 */
850 ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
851 if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
852 /* A serializing LFENCE stops RDTSC speculation */
853 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
854 } else {
855 /* MFENCE stops RDTSC speculation */
856 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
857 }
834 } 858 }
835 859
836 /* 860 /*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b2424c9b0..e4dc26185aa7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,10 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/cpu.h>
14
15#include <asm/nospec-branch.h>
16#include <asm/cmdline.h>
13#include <asm/bugs.h> 17#include <asm/bugs.h>
14#include <asm/processor.h> 18#include <asm/processor.h>
15#include <asm/processor-flags.h> 19#include <asm/processor-flags.h>
@@ -20,6 +24,8 @@
20#include <asm/pgtable.h> 24#include <asm/pgtable.h>
21#include <asm/set_memory.h> 25#include <asm/set_memory.h>
22 26
27static void __init spectre_v2_select_mitigation(void);
28
23void __init check_bugs(void) 29void __init check_bugs(void)
24{ 30{
25 identify_boot_cpu(); 31 identify_boot_cpu();
@@ -29,6 +35,9 @@ void __init check_bugs(void)
29 print_cpu_info(&boot_cpu_data); 35 print_cpu_info(&boot_cpu_data);
30 } 36 }
31 37
38 /* Select the proper spectre mitigation before patching alternatives */
39 spectre_v2_select_mitigation();
40
32#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
33 /* 42 /*
34 * Check whether we are able to run this kernel safely on SMP. 43 * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +69,179 @@ void __init check_bugs(void)
60 set_memory_4k((unsigned long)__va(0), 1); 69 set_memory_4k((unsigned long)__va(0), 1);
61#endif 70#endif
62} 71}
72
73/* The kernel command line selection */
74enum spectre_v2_mitigation_cmd {
75 SPECTRE_V2_CMD_NONE,
76 SPECTRE_V2_CMD_AUTO,
77 SPECTRE_V2_CMD_FORCE,
78 SPECTRE_V2_CMD_RETPOLINE,
79 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
80 SPECTRE_V2_CMD_RETPOLINE_AMD,
81};
82
83static const char *spectre_v2_strings[] = {
84 [SPECTRE_V2_NONE] = "Vulnerable",
85 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
86 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
87 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
88 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
89};
90
91#undef pr_fmt
92#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
93
94static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
95
96static void __init spec2_print_if_insecure(const char *reason)
97{
98 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
99 pr_info("%s\n", reason);
100}
101
102static void __init spec2_print_if_secure(const char *reason)
103{
104 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
105 pr_info("%s\n", reason);
106}
107
108static inline bool retp_compiler(void)
109{
110 return __is_defined(RETPOLINE);
111}
112
113static inline bool match_option(const char *arg, int arglen, const char *opt)
114{
115 int len = strlen(opt);
116
117 return len == arglen && !strncmp(arg, opt, len);
118}
119
120static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
121{
122 char arg[20];
123 int ret;
124
125 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
126 sizeof(arg));
127 if (ret > 0) {
128 if (match_option(arg, ret, "off")) {
129 goto disable;
130 } else if (match_option(arg, ret, "on")) {
131 spec2_print_if_secure("force enabled on command line.");
132 return SPECTRE_V2_CMD_FORCE;
133 } else if (match_option(arg, ret, "retpoline")) {
134 spec2_print_if_insecure("retpoline selected on command line.");
135 return SPECTRE_V2_CMD_RETPOLINE;
136 } else if (match_option(arg, ret, "retpoline,amd")) {
137 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
138 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
139 return SPECTRE_V2_CMD_AUTO;
140 }
141 spec2_print_if_insecure("AMD retpoline selected on command line.");
142 return SPECTRE_V2_CMD_RETPOLINE_AMD;
143 } else if (match_option(arg, ret, "retpoline,generic")) {
144 spec2_print_if_insecure("generic retpoline selected on command line.");
145 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
146 } else if (match_option(arg, ret, "auto")) {
147 return SPECTRE_V2_CMD_AUTO;
148 }
149 }
150
151 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
152 return SPECTRE_V2_CMD_AUTO;
153disable:
154 spec2_print_if_insecure("disabled on command line.");
155 return SPECTRE_V2_CMD_NONE;
156}
157
158static void __init spectre_v2_select_mitigation(void)
159{
160 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
161 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
162
163 /*
164 * If the CPU is not affected and the command line mode is NONE or AUTO
165 * then nothing to do.
166 */
167 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
168 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
169 return;
170
171 switch (cmd) {
172 case SPECTRE_V2_CMD_NONE:
173 return;
174
175 case SPECTRE_V2_CMD_FORCE:
176 /* FALLTRHU */
177 case SPECTRE_V2_CMD_AUTO:
178 goto retpoline_auto;
179
180 case SPECTRE_V2_CMD_RETPOLINE_AMD:
181 if (IS_ENABLED(CONFIG_RETPOLINE))
182 goto retpoline_amd;
183 break;
184 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
185 if (IS_ENABLED(CONFIG_RETPOLINE))
186 goto retpoline_generic;
187 break;
188 case SPECTRE_V2_CMD_RETPOLINE:
189 if (IS_ENABLED(CONFIG_RETPOLINE))
190 goto retpoline_auto;
191 break;
192 }
193 pr_err("kernel not compiled with retpoline; no mitigation available!");
194 return;
195
196retpoline_auto:
197 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
198 retpoline_amd:
199 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
200 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
201 goto retpoline_generic;
202 }
203 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
204 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
205 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
206 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
207 } else {
208 retpoline_generic:
209 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
210 SPECTRE_V2_RETPOLINE_MINIMAL;
211 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
212 }
213
214 spectre_v2_enabled = mode;
215 pr_info("%s\n", spectre_v2_strings[mode]);
216}
217
218#undef pr_fmt
219
220#ifdef CONFIG_SYSFS
221ssize_t cpu_show_meltdown(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
225 return sprintf(buf, "Not affected\n");
226 if (boot_cpu_has(X86_FEATURE_PTI))
227 return sprintf(buf, "Mitigation: PTI\n");
228 return sprintf(buf, "Vulnerable\n");
229}
230
231ssize_t cpu_show_spectre_v1(struct device *dev,
232 struct device_attribute *attr, char *buf)
233{
234 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
235 return sprintf(buf, "Not affected\n");
236 return sprintf(buf, "Vulnerable\n");
237}
238
239ssize_t cpu_show_spectre_v2(struct device *dev,
240 struct device_attribute *attr, char *buf)
241{
242 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
243 return sprintf(buf, "Not affected\n");
244
245 return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
246}
247#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c47de4ebf63a..ef29ad001991 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -923,8 +923,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
923 923
924 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 924 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
925 925
926 /* Assume for now that ALL x86 CPUs are insecure */ 926 if (c->x86_vendor != X86_VENDOR_AMD)
927 setup_force_cpu_bug(X86_BUG_CPU_INSECURE); 927 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
928
929 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
930 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
928 931
929 fpu__init_system(c); 932 fpu__init_system(c);
930 933
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8ccdca6d3f9e..d9e460fc7a3b 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int cpu)
910{ 910{
911 struct cpuinfo_x86 *c = &cpu_data(cpu); 911 struct cpuinfo_x86 *c = &cpu_data(cpu);
912 912
913 if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { 913 /*
914 pr_err_once("late loading on model 79 is disabled.\n"); 914 * Late loading on model 79 with microcode revision less than 0x0b000021
915 * may result in a system hang. This behavior is documented in item
916 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
917 */
918 if (c->x86 == 6 &&
919 c->x86_model == INTEL_FAM6_BROADWELL_X &&
920 c->x86_mask == 0x01 &&
921 c->microcode < 0x0b000021) {
922 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
923 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
915 return true; 924 return true;
916 } 925 }
917 926
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 5fa110699ed2..afbecff161d1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -76,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs)
76 regs->sp, regs->flags); 76 regs->sp, regs->flags);
77} 77}
78 78
79static void show_regs_safe(struct stack_info *info, struct pt_regs *regs) 79static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
80 bool partial)
80{ 81{
81 if (on_stack(info, regs, sizeof(*regs))) 82 /*
83 * These on_stack() checks aren't strictly necessary: the unwind code
84 * has already validated the 'regs' pointer. The checks are done for
85 * ordering reasons: if the registers are on the next stack, we don't
86 * want to print them out yet. Otherwise they'll be shown as part of
87 * the wrong stack. Later, when show_trace_log_lvl() switches to the
88 * next stack, this function will be called again with the same regs so
89 * they can be printed in the right context.
90 */
91 if (!partial && on_stack(info, regs, sizeof(*regs))) {
82 __show_regs(regs, 0); 92 __show_regs(regs, 0);
83 else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET, 93
84 IRET_FRAME_SIZE)) { 94 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
95 IRET_FRAME_SIZE)) {
85 /* 96 /*
86 * When an interrupt or exception occurs in entry code, the 97 * When an interrupt or exception occurs in entry code, the
87 * full pt_regs might not have been saved yet. In that case 98 * full pt_regs might not have been saved yet. In that case
@@ -98,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
98 struct stack_info stack_info = {0}; 109 struct stack_info stack_info = {0};
99 unsigned long visit_mask = 0; 110 unsigned long visit_mask = 0;
100 int graph_idx = 0; 111 int graph_idx = 0;
112 bool partial;
101 113
102 printk("%sCall Trace:\n", log_lvl); 114 printk("%sCall Trace:\n", log_lvl);
103 115
104 unwind_start(&state, task, regs, stack); 116 unwind_start(&state, task, regs, stack);
105 stack = stack ? : get_stack_pointer(task, regs); 117 stack = stack ? : get_stack_pointer(task, regs);
118 regs = unwind_get_entry_regs(&state, &partial);
106 119
107 /* 120 /*
108 * Iterate through the stacks, starting with the current stack pointer. 121 * Iterate through the stacks, starting with the current stack pointer.
@@ -120,7 +133,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
120 * - hardirq stack 133 * - hardirq stack
121 * - entry stack 134 * - entry stack
122 */ 135 */
123 for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { 136 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
124 const char *stack_name; 137 const char *stack_name;
125 138
126 if (get_stack_info(stack, task, &stack_info, &visit_mask)) { 139 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
@@ -140,7 +153,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
140 printk("%s <%s>\n", log_lvl, stack_name); 153 printk("%s <%s>\n", log_lvl, stack_name);
141 154
142 if (regs) 155 if (regs)
143 show_regs_safe(&stack_info, regs); 156 show_regs_if_on_stack(&stack_info, regs, partial);
144 157
145 /* 158 /*
146 * Scan the stack, printing any text addresses we find. At the 159 * Scan the stack, printing any text addresses we find. At the
@@ -164,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
164 177
165 /* 178 /*
166 * Don't print regs->ip again if it was already printed 179 * Don't print regs->ip again if it was already printed
167 * by show_regs_safe() below. 180 * by show_regs_if_on_stack().
168 */ 181 */
169 if (regs && stack == &regs->ip) 182 if (regs && stack == &regs->ip)
170 goto next; 183 goto next;
@@ -199,9 +212,9 @@ next:
199 unwind_next_frame(&state); 212 unwind_next_frame(&state);
200 213
201 /* if the frame has entry regs, print them */ 214 /* if the frame has entry regs, print them */
202 regs = unwind_get_entry_regs(&state); 215 regs = unwind_get_entry_regs(&state, &partial);
203 if (regs) 216 if (regs)
204 show_regs_safe(&stack_info, regs); 217 show_regs_if_on_stack(&stack_info, regs, partial);
205 } 218 }
206 219
207 if (stack_name) 220 if (stack_name)
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b6c6468e10bc..4c8440de3355 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -8,6 +8,7 @@
8#include <asm/segment.h> 8#include <asm/segment.h>
9#include <asm/export.h> 9#include <asm/export.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h>
11 12
12#ifdef CC_USING_FENTRY 13#ifdef CC_USING_FENTRY
13# define function_hook __fentry__ 14# define function_hook __fentry__
@@ -197,7 +198,8 @@ ftrace_stub:
197 movl 0x4(%ebp), %edx 198 movl 0x4(%ebp), %edx
198 subl $MCOUNT_INSN_SIZE, %eax 199 subl $MCOUNT_INSN_SIZE, %eax
199 200
200 call *ftrace_trace_function 201 movl ftrace_trace_function, %ecx
202 CALL_NOSPEC %ecx
201 203
202 popl %edx 204 popl %edx
203 popl %ecx 205 popl %ecx
@@ -241,5 +243,5 @@ return_to_handler:
241 movl %eax, %ecx 243 movl %eax, %ecx
242 popl %edx 244 popl %edx
243 popl %eax 245 popl %eax
244 jmp *%ecx 246 JMP_NOSPEC %ecx
245#endif 247#endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291d948a..7cb8ba08beb9 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -7,7 +7,7 @@
7#include <asm/ptrace.h> 7#include <asm/ptrace.h>
8#include <asm/ftrace.h> 8#include <asm/ftrace.h>
9#include <asm/export.h> 9#include <asm/export.h>
10 10#include <asm/nospec-branch.h>
11 11
12 .code64 12 .code64
13 .section .entry.text, "ax" 13 .section .entry.text, "ax"
@@ -286,8 +286,8 @@ trace:
286 * ip and parent ip are used and the list function is called when 286 * ip and parent ip are used and the list function is called when
287 * function tracing is enabled. 287 * function tracing is enabled.
288 */ 288 */
289 call *ftrace_trace_function 289 movq ftrace_trace_function, %r8
290 290 CALL_NOSPEC %r8
291 restore_mcount_regs 291 restore_mcount_regs
292 292
293 jmp fgraph_trace 293 jmp fgraph_trace
@@ -329,5 +329,5 @@ GLOBAL(return_to_handler)
329 movq 8(%rsp), %rdx 329 movq 8(%rsp), %rdx
330 movq (%rsp), %rax 330 movq (%rsp), %rax
331 addq $24, %rsp 331 addq $24, %rsp
332 jmp *%rdi 332 JMP_NOSPEC %rdi
333#endif 333#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a83b3346a0e1..c1bdbd3d3232 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -20,6 +20,7 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/nospec-branch.h>
23 24
24#ifdef CONFIG_DEBUG_STACKOVERFLOW 25#ifdef CONFIG_DEBUG_STACKOVERFLOW
25 26
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
55static void call_on_stack(void *func, void *stack) 56static void call_on_stack(void *func, void *stack)
56{ 57{
57 asm volatile("xchgl %%ebx,%%esp \n" 58 asm volatile("xchgl %%ebx,%%esp \n"
58 "call *%%edi \n" 59 CALL_NOSPEC
59 "movl %%ebx,%%esp \n" 60 "movl %%ebx,%%esp \n"
60 : "=b" (stack) 61 : "=b" (stack)
61 : "0" (stack), 62 : "0" (stack),
62 "D"(func) 63 [thunk_target] "D"(func)
63 : "memory", "cc", "edx", "ecx", "eax"); 64 : "memory", "cc", "edx", "ecx", "eax");
64} 65}
65 66
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
95 call_on_stack(print_stack_overflow, isp); 96 call_on_stack(print_stack_overflow, isp);
96 97
97 asm volatile("xchgl %%ebx,%%esp \n" 98 asm volatile("xchgl %%ebx,%%esp \n"
98 "call *%%edi \n" 99 CALL_NOSPEC
99 "movl %%ebx,%%esp \n" 100 "movl %%ebx,%%esp \n"
100 : "=a" (arg1), "=b" (isp) 101 : "=a" (arg1), "=b" (isp)
101 : "0" (desc), "1" (isp), 102 : "0" (desc), "1" (isp),
102 "D" (desc->handle_irq) 103 [thunk_target] "D" (desc->handle_irq)
103 : "memory", "cc", "ecx"); 104 : "memory", "cc", "ecx");
104 return 1; 105 return 1;
105} 106}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index aed9d94bd46f..832a6acd730f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -47,7 +47,7 @@
47 * section. Since TSS's are completely CPU-local, we want them 47 * section. Since TSS's are completely CPU-local, we want them
48 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 48 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
49 */ 49 */
50__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = { 50__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
51 .x86_tss = { 51 .x86_tss = {
52 /* 52 /*
53 * .sp0 is only used when entering ring 0 from a lower 53 * .sp0 is only used when entering ring 0 from a lower
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 8af2e8d0c0a1..145810b0edf6 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -906,9 +906,6 @@ void __init setup_arch(char **cmdline_p)
906 set_bit(EFI_BOOT, &efi.flags); 906 set_bit(EFI_BOOT, &efi.flags);
907 set_bit(EFI_64BIT, &efi.flags); 907 set_bit(EFI_64BIT, &efi.flags);
908 } 908 }
909
910 if (efi_enabled(EFI_BOOT))
911 efi_memblock_x86_reserve_range();
912#endif 909#endif
913 910
914 x86_init.oem.arch_setup(); 911 x86_init.oem.arch_setup();
@@ -962,6 +959,8 @@ void __init setup_arch(char **cmdline_p)
962 959
963 parse_early_param(); 960 parse_early_param();
964 961
962 if (efi_enabled(EFI_BOOT))
963 efi_memblock_x86_reserve_range();
965#ifdef CONFIG_MEMORY_HOTPLUG 964#ifdef CONFIG_MEMORY_HOTPLUG
966 /* 965 /*
967 * Memory used by the kernel cannot be hot-removed because Linux 966 * Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 20161ef53537..093f2ea5dd56 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -102,7 +102,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); 102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
103 unwind_next_frame(&state)) { 103 unwind_next_frame(&state)) {
104 104
105 regs = unwind_get_entry_regs(&state); 105 regs = unwind_get_entry_regs(&state, NULL);
106 if (regs) { 106 if (regs) {
107 /* 107 /*
108 * Kernel mode registers on the stack indicate an 108 * Kernel mode registers on the stack indicate an
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a4eb27918ceb..a2486f444073 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
138 return -1; 138 return -1;
139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); 139 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
140 pte_unmap(pte); 140 pte_unmap(pte);
141
142 /*
143 * PTI poisons low addresses in the kernel page tables in the
144 * name of making them unusable for userspace. To execute
145 * code at such a low address, the poison must be cleared.
146 *
147 * Note: 'pgd' actually gets set in p4d_alloc() _or_
148 * pud_alloc() depending on 4/5-level paging.
149 */
150 pgd->pgd &= ~_PAGE_NX;
151
141 return 0; 152 return 0;
142} 153}
143 154
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c4deb1f34faa..2b8eb4da4d08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 3781bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3782{ 3782{
3783 if (unlikely(!lapic_in_kernel(vcpu) || 3783 if (unlikely(!lapic_in_kernel(vcpu) ||
3784 kvm_event_needs_reinjection(vcpu))) 3784 kvm_event_needs_reinjection(vcpu) ||
3785 vcpu->arch.exception.pending))
3785 return false; 3786 return false;
3786 3787
3787 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) 3788 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
5465 5466
5466int kvm_mmu_module_init(void) 5467int kvm_mmu_module_init(void)
5467{ 5468{
5469 int ret = -ENOMEM;
5470
5468 kvm_mmu_clear_all_pte_masks(); 5471 kvm_mmu_clear_all_pte_masks();
5469 5472
5470 pte_list_desc_cache = kmem_cache_create("pte_list_desc", 5473 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5471 sizeof(struct pte_list_desc), 5474 sizeof(struct pte_list_desc),
5472 0, SLAB_ACCOUNT, NULL); 5475 0, SLAB_ACCOUNT, NULL);
5473 if (!pte_list_desc_cache) 5476 if (!pte_list_desc_cache)
5474 goto nomem; 5477 goto out;
5475 5478
5476 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 5479 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5477 sizeof(struct kvm_mmu_page), 5480 sizeof(struct kvm_mmu_page),
5478 0, SLAB_ACCOUNT, NULL); 5481 0, SLAB_ACCOUNT, NULL);
5479 if (!mmu_page_header_cache) 5482 if (!mmu_page_header_cache)
5480 goto nomem; 5483 goto out;
5481 5484
5482 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) 5485 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5483 goto nomem; 5486 goto out;
5484 5487
5485 register_shrinker(&mmu_shrinker); 5488 ret = register_shrinker(&mmu_shrinker);
5489 if (ret)
5490 goto out;
5486 5491
5487 return 0; 5492 return 0;
5488 5493
5489nomem: 5494out:
5490 mmu_destroy_caches(); 5495 mmu_destroy_caches();
5491 return -ENOMEM; 5496 return ret;
5492} 5497}
5493 5498
5494/* 5499/*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb714f1cdf7e..f40d0da1f1d3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
45#include <asm/debugreg.h> 45#include <asm/debugreg.h>
46#include <asm/kvm_para.h> 46#include <asm/kvm_para.h>
47#include <asm/irq_remapping.h> 47#include <asm/irq_remapping.h>
48#include <asm/nospec-branch.h>
48 49
49#include <asm/virtext.h> 50#include <asm/virtext.h>
50#include "trace.h" 51#include "trace.h"
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
361{ 362{
362 struct vmcb_control_area *c, *h; 363 struct vmcb_control_area *c, *h;
363 struct nested_state *g; 364 struct nested_state *g;
364 u32 h_intercept_exceptions;
365 365
366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 366 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
367 367
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
372 h = &svm->nested.hsave->control; 372 h = &svm->nested.hsave->control;
373 g = &svm->nested; 373 g = &svm->nested;
374 374
375 /* No need to intercept #UD if L1 doesn't intercept it */
376 h_intercept_exceptions =
377 h->intercept_exceptions & ~(1U << UD_VECTOR);
378
379 c->intercept_cr = h->intercept_cr | g->intercept_cr; 375 c->intercept_cr = h->intercept_cr | g->intercept_cr;
380 c->intercept_dr = h->intercept_dr | g->intercept_dr; 376 c->intercept_dr = h->intercept_dr | g->intercept_dr;
381 c->intercept_exceptions = 377 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
382 h_intercept_exceptions | g->intercept_exceptions;
383 c->intercept = h->intercept | g->intercept; 378 c->intercept = h->intercept | g->intercept;
384} 379}
385 380
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm)
2202{ 2197{
2203 int er; 2198 int er;
2204 2199
2205 WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
2206 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); 2200 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
2207 if (er == EMULATE_USER_EXIT) 2201 if (er == EMULATE_USER_EXIT)
2208 return 0; 2202 return 0;
@@ -4986,6 +4980,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
4986 "mov %%r14, %c[r14](%[svm]) \n\t" 4980 "mov %%r14, %c[r14](%[svm]) \n\t"
4987 "mov %%r15, %c[r15](%[svm]) \n\t" 4981 "mov %%r15, %c[r15](%[svm]) \n\t"
4988#endif 4982#endif
4983 /*
4984 * Clear host registers marked as clobbered to prevent
4985 * speculative use.
4986 */
4987 "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
4988 "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
4989 "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
4990 "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
4991 "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
4992#ifdef CONFIG_X86_64
4993 "xor %%r8, %%r8 \n\t"
4994 "xor %%r9, %%r9 \n\t"
4995 "xor %%r10, %%r10 \n\t"
4996 "xor %%r11, %%r11 \n\t"
4997 "xor %%r12, %%r12 \n\t"
4998 "xor %%r13, %%r13 \n\t"
4999 "xor %%r14, %%r14 \n\t"
5000 "xor %%r15, %%r15 \n\t"
5001#endif
4989 "pop %%" _ASM_BP 5002 "pop %%" _ASM_BP
4990 : 5003 :
4991 : [svm]"a"(svm), 5004 : [svm]"a"(svm),
@@ -5015,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5015#endif 5028#endif
5016 ); 5029 );
5017 5030
5031 /* Eliminate branch target predictions from guest mode */
5032 vmexit_fill_RSB();
5033
5018#ifdef CONFIG_X86_64 5034#ifdef CONFIG_X86_64
5019 wrmsrl(MSR_GS_BASE, svm->host.gs_base); 5035 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5020#else 5036#else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 023afa0c8887..c829d89e2e63 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
50#include <asm/apic.h> 50#include <asm/apic.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
53#include <asm/nospec-branch.h>
53 54
54#include "trace.h" 55#include "trace.h"
55#include "pmu.h" 56#include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
899{ 900{
900 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); 901 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
901 902
902 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || 903 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
903 vmcs_field_to_offset_table[field] == 0) 904 return -ENOENT;
905
906 /*
907 * FIXME: Mitigation for CVE-2017-5753. To be replaced with a
908 * generic mechanism.
909 */
910 asm("lfence");
911
912 if (vmcs_field_to_offset_table[field] == 0)
904 return -ENOENT; 913 return -ENOENT;
905 914
906 return vmcs_field_to_offset_table[field]; 915 return vmcs_field_to_offset_table[field];
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1887{ 1896{
1888 u32 eb; 1897 u32 eb;
1889 1898
1890 eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | 1899 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1891 (1u << DB_VECTOR) | (1u << AC_VECTOR); 1900 (1u << DB_VECTOR) | (1u << AC_VECTOR);
1892 if ((vcpu->guest_debug & 1901 if ((vcpu->guest_debug &
1893 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 1902 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1905 */ 1914 */
1906 if (is_guest_mode(vcpu)) 1915 if (is_guest_mode(vcpu))
1907 eb |= get_vmcs12(vcpu)->exception_bitmap; 1916 eb |= get_vmcs12(vcpu)->exception_bitmap;
1908 else
1909 eb |= 1u << UD_VECTOR;
1910 1917
1911 vmcs_write32(EXCEPTION_BITMAP, eb); 1918 vmcs_write32(EXCEPTION_BITMAP, eb);
1912} 1919}
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
5917 return 1; /* already handled by vmx_vcpu_run() */ 5924 return 1; /* already handled by vmx_vcpu_run() */
5918 5925
5919 if (is_invalid_opcode(intr_info)) { 5926 if (is_invalid_opcode(intr_info)) {
5920 WARN_ON_ONCE(is_guest_mode(vcpu));
5921 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); 5927 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
5922 if (er == EMULATE_USER_EXIT) 5928 if (er == EMULATE_USER_EXIT)
5923 return 0; 5929 return 0;
@@ -9415,6 +9421,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9415 /* Save guest registers, load host registers, keep flags */ 9421 /* Save guest registers, load host registers, keep flags */
9416 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" 9422 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
9417 "pop %0 \n\t" 9423 "pop %0 \n\t"
9424 "setbe %c[fail](%0)\n\t"
9418 "mov %%" _ASM_AX ", %c[rax](%0) \n\t" 9425 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
9419 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" 9426 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
9420 __ASM_SIZE(pop) " %c[rcx](%0) \n\t" 9427 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9431,12 +9438,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9431 "mov %%r13, %c[r13](%0) \n\t" 9438 "mov %%r13, %c[r13](%0) \n\t"
9432 "mov %%r14, %c[r14](%0) \n\t" 9439 "mov %%r14, %c[r14](%0) \n\t"
9433 "mov %%r15, %c[r15](%0) \n\t" 9440 "mov %%r15, %c[r15](%0) \n\t"
9441 "xor %%r8d, %%r8d \n\t"
9442 "xor %%r9d, %%r9d \n\t"
9443 "xor %%r10d, %%r10d \n\t"
9444 "xor %%r11d, %%r11d \n\t"
9445 "xor %%r12d, %%r12d \n\t"
9446 "xor %%r13d, %%r13d \n\t"
9447 "xor %%r14d, %%r14d \n\t"
9448 "xor %%r15d, %%r15d \n\t"
9434#endif 9449#endif
9435 "mov %%cr2, %%" _ASM_AX " \n\t" 9450 "mov %%cr2, %%" _ASM_AX " \n\t"
9436 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" 9451 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
9437 9452
9453 "xor %%eax, %%eax \n\t"
9454 "xor %%ebx, %%ebx \n\t"
9455 "xor %%esi, %%esi \n\t"
9456 "xor %%edi, %%edi \n\t"
9438 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" 9457 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
9439 "setbe %c[fail](%0) \n\t"
9440 ".pushsection .rodata \n\t" 9458 ".pushsection .rodata \n\t"
9441 ".global vmx_return \n\t" 9459 ".global vmx_return \n\t"
9442 "vmx_return: " _ASM_PTR " 2b \n\t" 9460 "vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9473,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9473#endif 9491#endif
9474 ); 9492 );
9475 9493
9494 /* Eliminate branch target predictions from guest mode */
9495 vmexit_fill_RSB();
9496
9476 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 9497 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
9477 if (debugctlmsr) 9498 if (debugctlmsr)
9478 update_debugctlmsr(debugctlmsr); 9499 update_debugctlmsr(debugctlmsr);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 7b181b61170e..f23934bbaf4e 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o 27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
29lib-$(CONFIG_RETPOLINE) += retpoline.o
29 30
30obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o 31obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
31 32
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb548b41..46e71a74e612 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
29#include <asm/errno.h> 29#include <asm/errno.h>
30#include <asm/asm.h> 30#include <asm/asm.h>
31#include <asm/export.h> 31#include <asm/export.h>
32 32#include <asm/nospec-branch.h>
33
33/* 34/*
34 * computes a partial checksum, e.g. for TCP/UDP fragments 35 * computes a partial checksum, e.g. for TCP/UDP fragments
35 */ 36 */
@@ -156,7 +157,7 @@ ENTRY(csum_partial)
156 negl %ebx 157 negl %ebx
157 lea 45f(%ebx,%ebx,2), %ebx 158 lea 45f(%ebx,%ebx,2), %ebx
158 testl %esi, %esi 159 testl %esi, %esi
159 jmp *%ebx 160 JMP_NOSPEC %ebx
160 161
161 # Handle 2-byte-aligned regions 162 # Handle 2-byte-aligned regions
16220: addw (%esi), %ax 16320: addw (%esi), %ax
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
439 andl $-32,%edx 440 andl $-32,%edx
440 lea 3f(%ebx,%ebx), %ebx 441 lea 3f(%ebx,%ebx), %ebx
441 testl %esi, %esi 442 testl %esi, %esi
442 jmp *%ebx 443 JMP_NOSPEC %ebx
4431: addl $64,%esi 4441: addl $64,%esi
444 addl $64,%edi 445 addl $64,%edi
445 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) 446 SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 000000000000..cb45c6cb465f
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,48 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/stringify.h>
4#include <linux/linkage.h>
5#include <asm/dwarf2.h>
6#include <asm/cpufeatures.h>
7#include <asm/alternative-asm.h>
8#include <asm/export.h>
9#include <asm/nospec-branch.h>
10
11.macro THUNK reg
12 .section .text.__x86.indirect_thunk.\reg
13
14ENTRY(__x86_indirect_thunk_\reg)
15 CFI_STARTPROC
16 JMP_NOSPEC %\reg
17 CFI_ENDPROC
18ENDPROC(__x86_indirect_thunk_\reg)
19.endm
20
21/*
22 * Despite being an assembler file we can't just use .irp here
23 * because __KSYM_DEPS__ only uses the C preprocessor and would
24 * only see one instance of "__x86_indirect_thunk_\reg" rather
25 * than one per register with the correct names. So we do it
26 * the simple and nasty way...
27 */
28#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
29#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
30
31GENERATE_THUNK(_ASM_AX)
32GENERATE_THUNK(_ASM_BX)
33GENERATE_THUNK(_ASM_CX)
34GENERATE_THUNK(_ASM_DX)
35GENERATE_THUNK(_ASM_SI)
36GENERATE_THUNK(_ASM_DI)
37GENERATE_THUNK(_ASM_BP)
38GENERATE_THUNK(_ASM_SP)
39#ifdef CONFIG_64BIT
40GENERATE_THUNK(r8)
41GENERATE_THUNK(r9)
42GENERATE_THUNK(r10)
43GENERATE_THUNK(r11)
44GENERATE_THUNK(r12)
45GENERATE_THUNK(r13)
46GENERATE_THUNK(r14)
47GENERATE_THUNK(r15)
48#endif
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index f56902c1f04b..2a4849e92831 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -61,10 +61,10 @@ enum address_markers_idx {
61 KASAN_SHADOW_START_NR, 61 KASAN_SHADOW_START_NR,
62 KASAN_SHADOW_END_NR, 62 KASAN_SHADOW_END_NR,
63#endif 63#endif
64 CPU_ENTRY_AREA_NR,
64#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) 65#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
65 LDT_NR, 66 LDT_NR,
66#endif 67#endif
67 CPU_ENTRY_AREA_NR,
68#ifdef CONFIG_X86_ESPFIX64 68#ifdef CONFIG_X86_ESPFIX64
69 ESPFIX_START_NR, 69 ESPFIX_START_NR,
70#endif 70#endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8ca324d07282..82f5252c723a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -868,7 +868,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
868 .next_asid = 1, 868 .next_asid = 1,
869 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ 869 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
870}; 870};
871EXPORT_SYMBOL_GPL(cpu_tlbstate); 871EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
872 872
873void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) 873void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
874{ 874{
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 879ef930e2c2..aedebd2ebf1e 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -34,25 +34,14 @@
34#define TB_SHIFT 40 34#define TB_SHIFT 40
35 35
36/* 36/*
37 * Virtual address start and end range for randomization. The end changes base 37 * Virtual address start and end range for randomization.
38 * on configuration to have the highest amount of space for randomization.
39 * It increases the possible random position for each randomized region.
40 * 38 *
41 * You need to add an if/def entry if you introduce a new memory region 39 * The end address could depend on more configuration options to make the
42 * compatible with KASLR. Your entry must be in logical order with memory 40 * highest amount of space for randomization available, but that's too hard
43 * layout. For example, ESPFIX is before EFI because its virtual address is 41 * to keep straight and caused issues already.
44 * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
45 * ensure that this order is correct and won't be changed.
46 */ 42 */
47static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; 43static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48 44static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
49#if defined(CONFIG_X86_ESPFIX64)
50static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
51#elif defined(CONFIG_EFI)
52static const unsigned long vaddr_end = EFI_VA_END;
53#else
54static const unsigned long vaddr_end = __START_KERNEL_map;
55#endif
56 45
57/* Default values */ 46/* Default values */
58unsigned long page_offset_base = __PAGE_OFFSET_BASE; 47unsigned long page_offset_base = __PAGE_OFFSET_BASE;
@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
101 unsigned long remain_entropy; 90 unsigned long remain_entropy;
102 91
103 /* 92 /*
104 * All these BUILD_BUG_ON checks ensures the memory layout is 93 * These BUILD_BUG_ON checks ensure the memory layout is consistent
105 * consistent with the vaddr_start/vaddr_end variables. 94 * with the vaddr_start/vaddr_end variables. These checks are very
95 * limited....
106 */ 96 */
107 BUILD_BUG_ON(vaddr_start >= vaddr_end); 97 BUILD_BUG_ON(vaddr_start >= vaddr_end);
108 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 98 BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
109 vaddr_end >= EFI_VA_END);
110 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
111 IS_ENABLED(CONFIG_EFI)) &&
112 vaddr_end >= __START_KERNEL_map);
113 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); 99 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
114 100
115 if (!kaslr_memory_enabled()) 101 if (!kaslr_memory_enabled())
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index bce8aea65606..ce38f165489b 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -56,13 +56,13 @@
56 56
57static void __init pti_print_if_insecure(const char *reason) 57static void __init pti_print_if_insecure(const char *reason)
58{ 58{
59 if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) 59 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
60 pr_info("%s\n", reason); 60 pr_info("%s\n", reason);
61} 61}
62 62
63static void __init pti_print_if_secure(const char *reason) 63static void __init pti_print_if_secure(const char *reason)
64{ 64{
65 if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) 65 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
66 pr_info("%s\n", reason); 66 pr_info("%s\n", reason);
67} 67}
68 68
@@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
96 } 96 }
97 97
98autosel: 98autosel:
99 if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) 99 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
100 return; 100 return;
101enable: 101enable:
102 setup_force_cpu_cap(X86_FEATURE_PTI); 102 setup_force_cpu_cap(X86_FEATURE_PTI);
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
149 * 149 *
150 * Returns a pointer to a P4D on success, or NULL on failure. 150 * Returns a pointer to a P4D on success, or NULL on failure.
151 */ 151 */
152static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) 152static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
153{ 153{
154 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); 154 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
155 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 155 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
164 if (!new_p4d_page) 164 if (!new_p4d_page)
165 return NULL; 165 return NULL;
166 166
167 if (pgd_none(*pgd)) { 167 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
168 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
169 new_p4d_page = 0;
170 }
171 if (new_p4d_page)
172 free_page(new_p4d_page);
173 } 168 }
174 BUILD_BUG_ON(pgd_large(*pgd) != 0); 169 BUILD_BUG_ON(pgd_large(*pgd) != 0);
175 170
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
182 * 177 *
183 * Returns a pointer to a PMD on success, or NULL on failure. 178 * Returns a pointer to a PMD on success, or NULL on failure.
184 */ 179 */
185static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) 180static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
186{ 181{
187 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 182 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
188 p4d_t *p4d = pti_user_pagetable_walk_p4d(address); 183 p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
194 if (!new_pud_page) 189 if (!new_pud_page)
195 return NULL; 190 return NULL;
196 191
197 if (p4d_none(*p4d)) { 192 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
198 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
199 new_pud_page = 0;
200 }
201 if (new_pud_page)
202 free_page(new_pud_page);
203 } 193 }
204 194
205 pud = pud_offset(p4d, address); 195 pud = pud_offset(p4d, address);
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
213 if (!new_pmd_page) 203 if (!new_pmd_page)
214 return NULL; 204 return NULL;
215 205
216 if (pud_none(*pud)) { 206 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
217 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
218 new_pmd_page = 0;
219 }
220 if (new_pmd_page)
221 free_page(new_pmd_page);
222 } 207 }
223 208
224 return pmd_offset(pud, address); 209 return pmd_offset(pud, address);
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
251 if (!new_pte_page) 236 if (!new_pte_page)
252 return NULL; 237 return NULL;
253 238
254 if (pmd_none(*pmd)) { 239 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
255 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
256 new_pte_page = 0;
257 }
258 if (new_pte_page)
259 free_page(new_pte_page);
260 } 240 }
261 241
262 pte = pte_offset_kernel(pmd, address); 242 pte = pte_offset_kernel(pmd, address);
@@ -367,7 +347,8 @@ static void __init pti_setup_espfix64(void)
367static void __init pti_clone_entry_text(void) 347static void __init pti_clone_entry_text(void)
368{ 348{
369 pti_clone_pmds((unsigned long) __entry_text_start, 349 pti_clone_pmds((unsigned long) __entry_text_start,
370 (unsigned long) __irqentry_text_end, _PAGE_RW); 350 (unsigned long) __irqentry_text_end,
351 _PAGE_RW | _PAGE_GLOBAL);
371} 352}
372 353
373/* 354/*
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7a5350d08cef..563049c483a1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
594 } else if (!strcmp(str, "nocrs")) { 594 } else if (!strcmp(str, "nocrs")) {
595 pci_probe |= PCI_ROOT_NO_CRS; 595 pci_probe |= PCI_ROOT_NO_CRS;
596 return NULL; 596 return NULL;
597#ifdef CONFIG_PHYS_ADDR_T_64BIT
598 } else if (!strcmp(str, "big_root_window")) {
599 pci_probe |= PCI_BIG_ROOT_WINDOW;
600 return NULL;
601#endif
597 } else if (!strcmp(str, "earlydump")) { 602 } else if (!strcmp(str, "earlydump")) {
598 pci_early_dump_regs = 1; 603 pci_early_dump_regs = 1;
599 return NULL; 604 return NULL;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e663d6bf1328..f6a26e3cb476 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -662,10 +662,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
662 */ 662 */
663static void pci_amd_enable_64bit_bar(struct pci_dev *dev) 663static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
664{ 664{
665 unsigned i;
666 u32 base, limit, high; 665 u32 base, limit, high;
667 struct resource *res, *conflict;
668 struct pci_dev *other; 666 struct pci_dev *other;
667 struct resource *res;
668 unsigned i;
669 int r;
670
671 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
672 return;
669 673
670 /* Check that we are the only device of that type */ 674 /* Check that we are the only device of that type */
671 other = pci_get_device(dev->vendor, dev->device, NULL); 675 other = pci_get_device(dev->vendor, dev->device, NULL);
@@ -699,22 +703,25 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
699 if (!res) 703 if (!res)
700 return; 704 return;
701 705
706 /*
707 * Allocate a 256GB window directly below the 0xfd00000000 hardware
708 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
709 */
702 res->name = "PCI Bus 0000:00"; 710 res->name = "PCI Bus 0000:00";
703 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | 711 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
704 IORESOURCE_MEM_64 | IORESOURCE_WINDOW; 712 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
705 res->start = 0x100000000ull; 713 res->start = 0xbd00000000ull;
706 res->end = 0xfd00000000ull - 1; 714 res->end = 0xfd00000000ull - 1;
707 715
708 /* Just grab the free area behind system memory for this */ 716 r = request_resource(&iomem_resource, res);
709 while ((conflict = request_resource_conflict(&iomem_resource, res))) { 717 if (r) {
710 if (conflict->end >= res->end) { 718 kfree(res);
711 kfree(res); 719 return;
712 return;
713 }
714 res->start = conflict->end + 1;
715 } 720 }
716 721
717 dev_info(&dev->dev, "adding root bus resource %pR\n", res); 722 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
723 res);
724 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
718 725
719 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | 726 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
720 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; 727 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index d87ac96e37ed..2dd15e967c3f 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void)
135 pud[j] = *pud_offset(p4d_k, vaddr); 135 pud[j] = *pud_offset(p4d_k, vaddr);
136 } 136 }
137 } 137 }
138 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
138 } 139 }
140
139out: 141out:
140 __flush_tlb_all(); 142 __flush_tlb_all();
141 143
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 8a99a2e96537..5b513ccffde4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
592 /* 592 /*
593 * Update the first page pointer to skip over the CSH header. 593 * Update the first page pointer to skip over the CSH header.
594 */ 594 */
595 cap_info->pages[0] += csh->headersize; 595 cap_info->phys[0] += csh->headersize;
596
597 /*
598 * cap_info->capsule should point at a virtual mapping of the entire
599 * capsule, starting at the capsule header. Our image has the Quark
600 * security header prepended, so we cannot rely on the default vmap()
601 * mapping created by the generic capsule code.
602 * Given that the Quark firmware does not appear to care about the
603 * virtual mapping, let's just point cap_info->capsule at our copy
604 * of the capsule header.
605 */
606 cap_info->capsule = &cap_info->header;
596 607
597 return 1; 608 return 1;
598} 609}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index dc036e511f48..5a0483e7bf66 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
60 return 0; 60 return 0;
61} 61}
62 62
63static const struct bt_sfi_data tng_bt_sfi_data __initdata = { 63static struct bt_sfi_data tng_bt_sfi_data __initdata = {
64 .setup = tng_bt_sfi_setup, 64 .setup = tng_bt_sfi_setup,
65}; 65};
66 66
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 4d62c071b166..d85076223a69 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1325{ 1325{
1326 struct { 1326 struct {
1327 struct mmuext_op op; 1327 struct mmuext_op op;
1328#ifdef CONFIG_SMP
1329 DECLARE_BITMAP(mask, num_processors);
1330#else
1331 DECLARE_BITMAP(mask, NR_CPUS); 1328 DECLARE_BITMAP(mask, NR_CPUS);
1332#endif
1333 } *args; 1329 } *args;
1334 struct multicall_space mcs; 1330 struct multicall_space mcs;
1331 const size_t mc_entry_size = sizeof(args->op) +
1332 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1335 1333
1336 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); 1334 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1337 1335
1338 if (cpumask_empty(cpus)) 1336 if (cpumask_empty(cpus))
1339 return; /* nothing to do */ 1337 return; /* nothing to do */
1340 1338
1341 mcs = xen_mc_entry(sizeof(*args)); 1339 mcs = xen_mc_entry(mc_entry_size);
1342 args = mcs.args; 1340 args = mcs.args;
1343 args->op.arg2.vcpumask = to_cpumask(args->mask); 1341 args->op.arg2.vcpumask = to_cpumask(args->mask);
1344 1342
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 75011b80660f..3b34745d0a52 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
72void xen_setup_cpu_clockevents(void); 72void xen_setup_cpu_clockevents(void);
73void xen_save_time_memory_area(void); 73void xen_save_time_memory_area(void);
74void xen_restore_time_memory_area(void); 74void xen_restore_time_memory_area(void);
75void __init xen_init_time_ops(void); 75void __ref xen_init_time_ops(void);
76void __init xen_hvm_init_time_ops(void); 76void __init xen_hvm_init_time_ops(void);
77 77
78irqreturn_t xen_debug_interrupt(int irq, void *dev_id); 78irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/block/blk-core.c b/block/blk-core.c
index b8881750a3ac..3ba4326a63b5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
562 } 562 }
563} 563}
564 564
565void blk_drain_queue(struct request_queue *q)
566{
567 spin_lock_irq(q->queue_lock);
568 __blk_drain_queue(q, true);
569 spin_unlock_irq(q->queue_lock);
570}
571
565/** 572/**
566 * blk_queue_bypass_start - enter queue bypass mode 573 * blk_queue_bypass_start - enter queue bypass mode
567 * @q: queue of interest 574 * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
689 */ 696 */
690 blk_freeze_queue(q); 697 blk_freeze_queue(q);
691 spin_lock_irq(lock); 698 spin_lock_irq(lock);
692 if (!q->mq_ops)
693 __blk_drain_queue(q, true);
694 queue_flag_set(QUEUE_FLAG_DEAD, q); 699 queue_flag_set(QUEUE_FLAG_DEAD, q);
695 spin_unlock_irq(lock); 700 spin_unlock_irq(lock);
696 701
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11097477eeab..3d3797327491 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
161 * exported to drivers as the only user for unfreeze is blk_mq. 161 * exported to drivers as the only user for unfreeze is blk_mq.
162 */ 162 */
163 blk_freeze_queue_start(q); 163 blk_freeze_queue_start(q);
164 if (!q->mq_ops)
165 blk_drain_queue(q);
164 blk_mq_freeze_queue_wait(q); 166 blk_mq_freeze_queue_wait(q);
165} 167}
166 168
diff --git a/block/blk.h b/block/blk.h
index 3f1446937aec..442098aa9463 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
330} 330}
331#endif /* CONFIG_BOUNCE */ 331#endif /* CONFIG_BOUNCE */
332 332
333extern void blk_drain_queue(struct request_queue *q);
334
333#endif /* BLK_INTERNAL_H */ 335#endif /* BLK_INTERNAL_H */
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 444a387df219..35d4dcea381f 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
664 unsigned int i; 664 unsigned int i;
665 665
666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 666 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
667 ctx->rcvused -= rsgl->sg_num_bytes; 667 atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
668 af_alg_free_sg(&rsgl->sgl); 668 af_alg_free_sg(&rsgl->sgl);
669 list_del(&rsgl->list); 669 list_del(&rsgl->list);
670 if (rsgl != &areq->first_rsgl) 670 if (rsgl != &areq->first_rsgl)
@@ -1163,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1163 1163
1164 areq->last_rsgl = rsgl; 1164 areq->last_rsgl = rsgl;
1165 len += err; 1165 len += err;
1166 ctx->rcvused += err; 1166 atomic_add(err, &ctx->rcvused);
1167 rsgl->sg_num_bytes = err; 1167 rsgl->sg_num_bytes = err;
1168 iov_iter_advance(&msg->msg_iter, err); 1168 iov_iter_advance(&msg->msg_iter, err);
1169 } 1169 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 60d7366ed343..9a636f961572 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
167 167
168 spawn->alg = NULL; 168 spawn->alg = NULL;
169 spawns = &inst->alg.cra_users; 169 spawns = &inst->alg.cra_users;
170
171 /*
172 * We may encounter an unregistered instance here, since
173 * an instance's spawns are set up prior to the instance
174 * being registered. An unregistered instance will have
175 * NULL ->cra_users.next, since ->cra_users isn't
176 * properly initialized until registration. But an
177 * unregistered instance cannot have any users, so treat
178 * it the same as ->cra_users being empty.
179 */
180 if (spawns->next == NULL)
181 break;
170 } 182 }
171 } while ((spawns = crypto_more_spawns(alg, &stack, &top, 183 } while ((spawns = crypto_more_spawns(alg, &stack, &top,
172 &secondary_spawns))); 184 &secondary_spawns)));
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index ddcc45f77edd..e9885a35ef6e 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
571 INIT_LIST_HEAD(&ctx->tsgl_list); 571 INIT_LIST_HEAD(&ctx->tsgl_list);
572 ctx->len = len; 572 ctx->len = len;
573 ctx->used = 0; 573 ctx->used = 0;
574 ctx->rcvused = 0; 574 atomic_set(&ctx->rcvused, 0);
575 ctx->more = 0; 575 ctx->more = 0;
576 ctx->merge = 0; 576 ctx->merge = 0;
577 ctx->enc = 0; 577 ctx->enc = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index baef9bfccdda..c5c47b680152 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -390,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
390 INIT_LIST_HEAD(&ctx->tsgl_list); 390 INIT_LIST_HEAD(&ctx->tsgl_list);
391 ctx->len = len; 391 ctx->len = len;
392 ctx->used = 0; 392 ctx->used = 0;
393 ctx->rcvused = 0; 393 atomic_set(&ctx->rcvused, 0);
394 ctx->more = 0; 394 ctx->more = 0;
395 ctx->merge = 0; 395 ctx->merge = 0;
396 ctx->enc = 0; 396 ctx->enc = 0;
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index db1bc3147bc4..600afa99941f 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
610 algt->mask)); 610 algt->mask));
611 if (IS_ERR(poly)) 611 if (IS_ERR(poly))
612 return PTR_ERR(poly); 612 return PTR_ERR(poly);
613 poly_hash = __crypto_hash_alg_common(poly);
614
615 err = -EINVAL;
616 if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
617 goto out_put_poly;
613 618
614 err = -ENOMEM; 619 err = -ENOMEM;
615 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 620 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
618 623
619 ctx = aead_instance_ctx(inst); 624 ctx = aead_instance_ctx(inst);
620 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; 625 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
621 poly_hash = __crypto_hash_alg_common(poly);
622 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, 626 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
623 aead_crypto_instance(inst)); 627 aead_crypto_instance(inst));
624 if (err) 628 if (err)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb99fe25..f8ec3d4ba4a8 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
254 crypto_free_aead(ctx->child); 254 crypto_free_aead(ctx->child);
255} 255}
256 256
257static void pcrypt_free(struct aead_instance *inst)
258{
259 struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
260
261 crypto_drop_aead(&ctx->spawn);
262 kfree(inst);
263}
264
257static int pcrypt_init_instance(struct crypto_instance *inst, 265static int pcrypt_init_instance(struct crypto_instance *inst,
258 struct crypto_alg *alg) 266 struct crypto_alg *alg)
259{ 267{
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
319 inst->alg.encrypt = pcrypt_aead_encrypt; 327 inst->alg.encrypt = pcrypt_aead_encrypt;
320 inst->alg.decrypt = pcrypt_aead_decrypt; 328 inst->alg.decrypt = pcrypt_aead_decrypt;
321 329
330 inst->free = pcrypt_free;
331
322 err = aead_register_instance(tmpl, inst); 332 err = aead_register_instance(tmpl, inst);
323 if (err) 333 if (err)
324 goto out_drop_aead; 334 goto out_drop_aead;
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
349 return -EINVAL; 359 return -EINVAL;
350} 360}
351 361
352static void pcrypt_free(struct crypto_instance *inst)
353{
354 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
355
356 crypto_drop_aead(&ctx->spawn);
357 kfree(inst);
358}
359
360static int pcrypt_cpumask_change_notify(struct notifier_block *self, 362static int pcrypt_cpumask_change_notify(struct notifier_block *self,
361 unsigned long val, void *data) 363 unsigned long val, void *data)
362{ 364{
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
469static struct crypto_template pcrypt_tmpl = { 471static struct crypto_template pcrypt_tmpl = {
470 .name = "pcrypt", 472 .name = "pcrypt",
471 .create = pcrypt_create, 473 .create = pcrypt_create,
472 .free = pcrypt_free,
473 .module = THIS_MODULE, 474 .module = THIS_MODULE,
474}; 475};
475 476
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02cc8ea1..2bcffec8dbf0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
427 return 0; 427 return 0;
428} 428}
429 429
430struct lpss_device_links {
431 const char *supplier_hid;
432 const char *supplier_uid;
433 const char *consumer_hid;
434 const char *consumer_uid;
435 u32 flags;
436};
437
438/*
439 * The _DEP method is used to identify dependencies but instead of creating
440 * device links for every handle in _DEP, only links in the following list are
441 * created. That is necessary because, in the general case, _DEP can refer to
442 * devices that might not have drivers, or that are on different buses, or where
443 * the supplier is not enumerated until after the consumer is probed.
444 */
445static const struct lpss_device_links lpss_device_links[] = {
446 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
447};
448
449static bool hid_uid_match(const char *hid1, const char *uid1,
450 const char *hid2, const char *uid2)
451{
452 return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
453}
454
455static bool acpi_lpss_is_supplier(struct acpi_device *adev,
456 const struct lpss_device_links *link)
457{
458 return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
459 link->supplier_hid, link->supplier_uid);
460}
461
462static bool acpi_lpss_is_consumer(struct acpi_device *adev,
463 const struct lpss_device_links *link)
464{
465 return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
466 link->consumer_hid, link->consumer_uid);
467}
468
469struct hid_uid {
470 const char *hid;
471 const char *uid;
472};
473
474static int match_hid_uid(struct device *dev, void *data)
475{
476 struct acpi_device *adev = ACPI_COMPANION(dev);
477 struct hid_uid *id = data;
478
479 if (!adev)
480 return 0;
481
482 return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
483 id->hid, id->uid);
484}
485
486static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
487{
488 struct hid_uid data = {
489 .hid = hid,
490 .uid = uid,
491 };
492
493 return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
494}
495
496static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
497{
498 struct acpi_handle_list dep_devices;
499 acpi_status status;
500 int i;
501
502 if (!acpi_has_method(adev->handle, "_DEP"))
503 return false;
504
505 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
506 &dep_devices);
507 if (ACPI_FAILURE(status)) {
508 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
509 return false;
510 }
511
512 for (i = 0; i < dep_devices.count; i++) {
513 if (dep_devices.handles[i] == handle)
514 return true;
515 }
516
517 return false;
518}
519
520static void acpi_lpss_link_consumer(struct device *dev1,
521 const struct lpss_device_links *link)
522{
523 struct device *dev2;
524
525 dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
526 if (!dev2)
527 return;
528
529 if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
530 device_link_add(dev2, dev1, link->flags);
531
532 put_device(dev2);
533}
534
535static void acpi_lpss_link_supplier(struct device *dev1,
536 const struct lpss_device_links *link)
537{
538 struct device *dev2;
539
540 dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
541 if (!dev2)
542 return;
543
544 if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
545 device_link_add(dev1, dev2, link->flags);
546
547 put_device(dev2);
548}
549
550static void acpi_lpss_create_device_links(struct acpi_device *adev,
551 struct platform_device *pdev)
552{
553 int i;
554
555 for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
556 const struct lpss_device_links *link = &lpss_device_links[i];
557
558 if (acpi_lpss_is_supplier(adev, link))
559 acpi_lpss_link_consumer(&pdev->dev, link);
560
561 if (acpi_lpss_is_consumer(adev, link))
562 acpi_lpss_link_supplier(&pdev->dev, link);
563 }
564}
565
430static int acpi_lpss_create_device(struct acpi_device *adev, 566static int acpi_lpss_create_device(struct acpi_device *adev,
431 const struct acpi_device_id *id) 567 const struct acpi_device_id *id)
432{ 568{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
465 acpi_dev_free_resource_list(&resource_list); 601 acpi_dev_free_resource_list(&resource_list);
466 602
467 if (!pdata->mmio_base) { 603 if (!pdata->mmio_base) {
604 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
605 adev->pnp.type.platform_id = 0;
468 /* Skip the device, but continue the namespace scan. */ 606 /* Skip the device, but continue the namespace scan. */
469 ret = 0; 607 ret = 0;
470 goto err_out; 608 goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
500 adev->driver_data = pdata; 638 adev->driver_data = pdata;
501 pdev = acpi_create_platform_device(adev, dev_desc->properties); 639 pdev = acpi_create_platform_device(adev, dev_desc->properties);
502 if (!IS_ERR_OR_NULL(pdev)) { 640 if (!IS_ERR_OR_NULL(pdev)) {
641 acpi_lpss_create_device_links(adev, pdev);
503 return 1; 642 return 1;
504 } 643 }
505 644
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0e2eb8..f53ccc680238 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
80static bool device_id_scheme = false; 80static bool device_id_scheme = false;
81module_param(device_id_scheme, bool, 0444); 81module_param(device_id_scheme, bool, 0444);
82 82
83static bool only_lcd = false; 83static int only_lcd = -1;
84module_param(only_lcd, bool, 0444); 84module_param(only_lcd, int, 0444);
85 85
86static int register_count; 86static int register_count;
87static DEFINE_MUTEX(register_count_mutex); 87static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
2136 goto leave; 2136 goto leave;
2137 } 2137 }
2138 2138
2139 /*
2140 * We're seeing a lot of bogus backlight interfaces on newer machines
2141 * without a LCD such as desktops, servers and HDMI sticks. Checking
2142 * the lcd flag fixes this, so enable this on any machines which are
2143 * win8 ready (where we also prefer the native backlight driver, so
2144 * normally the acpi_video code should not register there anyways).
2145 */
2146 if (only_lcd == -1)
2147 only_lcd = acpi_osi_is_win8();
2148
2139 dmi_check_system(video_dmi_table); 2149 dmi_check_system(video_dmi_table);
2140 2150
2141 ret = acpi_bus_register_driver(&acpi_video_bus); 2151 ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b5ac5c..2243c8164b34 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
80 prefix, ACPICA_COPYRIGHT, \ 80 prefix, ACPICA_COPYRIGHT, \
81 prefix 81 prefix
82 82
83#define ACPI_COMMON_BUILD_TIME \
84 "Build date/time: %s %s\n", __DATE__, __TIME__
85
83/* Macros for usage messages */ 86/* Macros for usage messages */
84 87
85#define ACPI_USAGE_HEADER(usage) \ 88#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5252f5..54b8d9df9423 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
223acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags); 223acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
224 224
225void 225void
226acpi_db_create_execution_thread(char *method_name_arg,
227 char **arguments, acpi_object_type *types);
228
229void
226acpi_db_create_execution_threads(char *num_threads_arg, 230acpi_db_create_execution_threads(char *num_threads_arg,
227 char *num_loops_arg, char *method_name_arg); 231 char *num_loops_arg, char *method_name_arg);
228 232
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed442703f..45ef3f5dc9ad 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
46 46
47/***************************************************************************** 47/*****************************************************************************
48 * 48 *
49 * Globals related to the ACPI tables 49 * Globals related to the incoming ACPI tables
50 * 50 *
51 ****************************************************************************/ 51 ****************************************************************************/
52 52
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
87 87
88/***************************************************************************** 88/*****************************************************************************
89 * 89 *
90 * Mutual exclusion within ACPICA subsystem 90 * Mutual exclusion within the ACPICA subsystem
91 * 91 *
92 ****************************************************************************/ 92 ****************************************************************************/
93 93
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
167 167
168ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE); 168ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
169 169
170/* Misc */ 170/* Miscellaneous */
171 171
172ACPI_GLOBAL(u32, acpi_gbl_original_mode); 172ACPI_GLOBAL(u32, acpi_gbl_original_mode);
173ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count); 173ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
191extern const char acpi_gbl_upper_hex_digits[]; 191extern const char acpi_gbl_upper_hex_digits[];
192extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; 192extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
193 193
194#ifdef ACPI_DBG_TRACK_ALLOCATIONS
195
196/* Lists for tracking memory allocations (debug only) */ 194/* Lists for tracking memory allocations (debug only) */
197 195
196#ifdef ACPI_DBG_TRACK_ALLOCATIONS
198ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list); 197ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
199ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list); 198ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
200ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats); 199ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
203 202
204/***************************************************************************** 203/*****************************************************************************
205 * 204 *
206 * Namespace globals 205 * ACPI Namespace
207 * 206 *
208 ****************************************************************************/ 207 ****************************************************************************/
209 208
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
234 233
235/***************************************************************************** 234/*****************************************************************************
236 * 235 *
237 * Interpreter globals 236 * Interpreter/Parser globals
238 * 237 *
239 ****************************************************************************/ 238 ****************************************************************************/
240 239
241ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
242
243/* Control method single step flag */ 240/* Control method single step flag */
244 241
245ACPI_GLOBAL(u8, acpi_gbl_cm_single_step); 242ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
243ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
244ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
245
246/* ASL/ASL+ converter */
247
248ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
249ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
246 250
247/***************************************************************************** 251/*****************************************************************************
248 * 252 *
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
252 256
253extern struct acpi_bit_register_info 257extern struct acpi_bit_register_info
254 acpi_gbl_bit_register_info[ACPI_NUM_BITREG]; 258 acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
255
256ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a); 259ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
257ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b); 260ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
258 261
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
263 ****************************************************************************/ 266 ****************************************************************************/
264 267
265#if (!ACPI_REDUCED_HARDWARE) 268#if (!ACPI_REDUCED_HARDWARE)
266
267ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized); 269ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
268ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head); 270ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
269ACPI_GLOBAL(struct acpi_gpe_block_info *, 271ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
272ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context); 274ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
273ACPI_GLOBAL(struct acpi_fixed_event_handler, 275ACPI_GLOBAL(struct acpi_fixed_event_handler,
274 acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]); 276 acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
275
276extern struct acpi_fixed_event_info 277extern struct acpi_fixed_event_info
277 acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS]; 278 acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
278
279#endif /* !ACPI_REDUCED_HARDWARE */ 279#endif /* !ACPI_REDUCED_HARDWARE */
280 280
281/***************************************************************************** 281/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
291ACPI_GLOBAL(u32, acpi_sci_count); 291ACPI_GLOBAL(u32, acpi_sci_count);
292ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]); 292ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
293 293
294/* Support for dynamic control method tracing mechanism */ 294/* Dynamic control method tracing mechanism */
295 295
296ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level); 296ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
297ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer); 297ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
298 298
299/***************************************************************************** 299/*****************************************************************************
300 * 300 *
301 * Debugger and Disassembler globals 301 * Debugger and Disassembler
302 * 302 *
303 ****************************************************************************/ 303 ****************************************************************************/
304 304
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
326#endif 326#endif
327 327
328#ifdef ACPI_DEBUGGER 328#ifdef ACPI_DEBUGGER
329
330ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE); 329ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
331ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID); 330ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
332 331
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
340ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node); 339ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
341ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop); 340ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
342ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated); 341ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
343
344ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]); 342ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
345ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]); 343ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
346 344
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
350ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]); 348ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
351ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]); 349ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
352 350
353/* 351/* Statistics globals */
354 * Statistic globals 352
355 */
356ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]); 353ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
357ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]); 354ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
358ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc); 355ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
359ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc); 356ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
360ACPI_GLOBAL(u32, acpi_gbl_num_nodes); 357ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
361ACPI_GLOBAL(u32, acpi_gbl_num_objects); 358ACPI_GLOBAL(u32, acpi_gbl_num_objects);
362
363#endif /* ACPI_DEBUGGER */ 359#endif /* ACPI_DEBUGGER */
364 360
365#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER) 361#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
366
367ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]); 362ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
368ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]); 363ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
369ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]); 364ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
370ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]); 365ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
371
372ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE); 366ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
373
374#endif 367#endif
375 368
376/* 369/*****************************************************************************
377 * Meant for the -ca option. 370 *
378 */ 371 * ACPICA application-specific globals
372 *
373 ****************************************************************************/
374
375/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
376
377#ifdef ACPI_ASL_COMPILER
379ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL); 378ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
380ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL); 379ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
381ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL); 380ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
386ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL); 385ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
387ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL); 386ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
388 387
389ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
390
391ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head, 388ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
392 NULL); 389 NULL);
393ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail, 390ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
394 NULL); 391 NULL);
395
396ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head, 392ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
397 NULL); 393 NULL);
398ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail, 394ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
399 NULL); 395 NULL);
400
401ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head, 396ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
402 NULL); 397 NULL);
403ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail, 398ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
404 NULL); 399 NULL);
405
406ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head, 400ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
407 NULL); 401 NULL);
408ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail, 402ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
410 404
411ACPI_INIT_GLOBAL(struct acpi_comment_addr_node, 405ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
412 *acpi_gbl_comment_addr_list_head, NULL); 406 *acpi_gbl_comment_addr_list_head, NULL);
413
414ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
415
416ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL); 407ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
417 408
418ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache); 409ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
419ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache); 410ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
420ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache); 411ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
421 412
422ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
423
424ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE); 413ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
425ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL); 414ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
426
427ACPI_GLOBAL(char, acpi_gbl_table_sig[4]); 415ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
428 416#endif
429/*****************************************************************************
430 *
431 * Application globals
432 *
433 ****************************************************************************/
434 417
435#ifdef ACPI_APPLICATION 418#ifdef ACPI_APPLICATION
436
437ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL); 419ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
438ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL); 420ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
439ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE); 421ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
442 424
443ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */ 425ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
444ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]); 426ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
445
446#endif /* ACPI_APPLICATION */ 427#endif /* ACPI_APPLICATION */
447 428
448/*****************************************************************************
449 *
450 * Info/help support
451 *
452 ****************************************************************************/
453
454extern const struct ah_predefined_name asl_predefined_info[];
455extern const struct ah_device_id asl_device_ids[];
456
457#endif /* __ACGLOBAL_H__ */ 429#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8bb1678..a56675f0661e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
622 union acpi_parse_object *predicate_op; 622 union acpi_parse_object *predicate_op;
623 u8 *aml_predicate_start; /* Start of if/while predicate */ 623 u8 *aml_predicate_start; /* Start of if/while predicate */
624 u8 *package_end; /* End of if/while block */ 624 u8 *package_end; /* End of if/while block */
625 u32 loop_count; /* While() loop counter */ 625 u64 loop_timeout; /* While() loop timeout */
626}; 626};
627 627
628/* 628/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
1218 acpi_object_type *types; 1218 acpi_object_type *types;
1219 1219
1220 /* 1220 /*
1221 * Arguments to be passed to method for the command 1221 * Arguments to be passed to method for the commands Threads and
1222 * Threads - 1222 * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
1223 * the Number of threads, ID of current thread and 1223 *
1224 * Index of current thread inside all them created. 1224 * For the Threads command, the Number of threads, ID of current
1225 * thread and Index of current thread inside all them created.
1225 */ 1226 */
1226 char init_args; 1227 char init_args;
1227#ifdef ACPI_DEBUGGER 1228#ifdef ACPI_DEBUGGER
1228 acpi_object_type arg_types[4]; 1229 acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
1229#endif 1230#endif
1230 char *arguments[4]; 1231 char *arguments[ACPI_METHOD_NUM_ARGS];
1231 char num_threads_str[11]; 1232 char num_threads_str[11];
1232 char id_of_thread_str[11]; 1233 char id_of_thread_str[11];
1233 char index_of_thread_str[11]; 1234 char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96cc00f..128a3d71b598 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
455 * the plist contains a set of parens to allow variable-length lists. 455 * the plist contains a set of parens to allow variable-length lists.
456 * These macros are used for both the debug and non-debug versions of the code. 456 * These macros are used for both the debug and non-debug versions of the code.
457 */ 457 */
458#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e); 458#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
459#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e); 459#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
460#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist 460#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
461#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist 461#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51b3e37..2fb1bb78d85c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
289char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, 289char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
290 u8 no_trailing); 290 u8 no_trailing);
291 291
292char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
293 const char *internal_path);
294
292char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state); 295char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
293 296
294acpi_status 297acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9db7ef..b6b29d717824 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
118#ifndef ACPI_MSG_ERROR 118#ifndef ACPI_MSG_ERROR
119#define ACPI_MSG_ERROR "ACPI Error: " 119#define ACPI_MSG_ERROR "ACPI Error: "
120#endif 120#endif
121#ifndef ACPI_MSG_EXCEPTION
122#define ACPI_MSG_EXCEPTION "ACPI Exception: "
123#endif
124#ifndef ACPI_MSG_WARNING 121#ifndef ACPI_MSG_WARNING
125#define ACPI_MSG_WARNING "ACPI Warning: " 122#define ACPI_MSG_WARNING "ACPI Warning: "
126#endif 123#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
129#endif 126#endif
130 127
131#ifndef ACPI_MSG_BIOS_ERROR 128#ifndef ACPI_MSG_BIOS_ERROR
132#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): " 129#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
133#endif 130#endif
134#ifndef ACPI_MSG_BIOS_WARNING 131#ifndef ACPI_MSG_BIOS_WARNING
135#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): " 132#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
136#endif 133#endif
137 134
138/* 135/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
233 */ 230 */
234acpi_status acpi_ut_init_globals(void); 231acpi_status acpi_ut_init_globals(void);
235 232
236#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
237
238const char *acpi_ut_get_mutex_name(u32 mutex_id); 233const char *acpi_ut_get_mutex_name(u32 mutex_id);
239 234
235#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
236
240const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type); 237const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
241#endif 238#endif
242 239
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
641 638
642void acpi_ut_repair_name(char *name); 639void acpi_ut_repair_name(char *name);
643 640
644#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) 641#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
645u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source); 642u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
646 643
644void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
645
647u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source); 646u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
648 647
649u8 648u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
737 u8 node_flags, const char *format, ...); 736 u8 node_flags, const char *format, ...);
738 737
739void 738void
740acpi_ut_namespace_error(const char *module_name, 739acpi_ut_prefixed_namespace_error(const char *module_name,
741 u32 line_number, 740 u32 line_number,
742 const char *internal_name, acpi_status lookup_status); 741 union acpi_generic_state *prefix_scope,
742 const char *internal_name,
743 acpi_status lookup_status);
743 744
744void 745void
745acpi_ut_method_error(const char *module_name, 746acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319752f0..ed088fceb18d 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
67acpi_db_execution_walk(acpi_handle obj_handle, 67acpi_db_execution_walk(acpi_handle obj_handle,
68 u32 nesting_level, void *context, void **return_value); 68 u32 nesting_level, void *context, void **return_value);
69 69
70static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
71
70/******************************************************************************* 72/*******************************************************************************
71 * 73 *
72 * FUNCTION: acpi_db_delete_objects 74 * FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
229 231
230 ACPI_FUNCTION_NAME(db_execute_setup); 232 ACPI_FUNCTION_NAME(db_execute_setup);
231 233
232 /* Catenate the current scope to the supplied name */ 234 /* Concatenate the current scope to the supplied name */
233 235
234 info->pathname[0] = 0; 236 info->pathname[0] = 0;
235 if ((info->name[0] != '\\') && (info->name[0] != '/')) { 237 if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
611 613
612/******************************************************************************* 614/*******************************************************************************
613 * 615 *
616 * FUNCTION: acpi_db_single_execution_thread
617 *
618 * PARAMETERS: context - Method info struct
619 *
620 * RETURN: None
621 *
622 * DESCRIPTION: Create one thread and execute a method
623 *
624 ******************************************************************************/
625
626static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
627{
628 struct acpi_db_method_info *info = context;
629 acpi_status status;
630 struct acpi_buffer return_obj;
631
632 acpi_os_printf("\n");
633
634 status = acpi_db_execute_method(info, &return_obj);
635 if (ACPI_FAILURE(status)) {
636 acpi_os_printf("%s During evaluation of %s\n",
637 acpi_format_exception(status), info->pathname);
638 return;
639 }
640
641 /* Display a return object, if any */
642
643 if (return_obj.length) {
644 acpi_os_printf("Evaluation of %s returned object %p, "
645 "external buffer length %X\n",
646 acpi_gbl_db_method_info.pathname,
647 return_obj.pointer, (u32)return_obj.length);
648
649 acpi_db_dump_external_object(return_obj.pointer, 1);
650 }
651
652 acpi_os_printf("\nBackground thread completed\n%c ",
653 ACPI_DEBUGGER_COMMAND_PROMPT);
654}
655
656/*******************************************************************************
657 *
658 * FUNCTION: acpi_db_create_execution_thread
659 *
660 * PARAMETERS: method_name_arg - Control method to execute
661 * arguments - Array of arguments to the method
662 * types - Corresponding array of object types
663 *
664 * RETURN: None
665 *
666 * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
667 * arguments passed on command line for control methods.
668 *
669 ******************************************************************************/
670
671void
672acpi_db_create_execution_thread(char *method_name_arg,
673 char **arguments, acpi_object_type *types)
674{
675 acpi_status status;
676 u32 i;
677
678 memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
679 acpi_gbl_db_method_info.name = method_name_arg;
680 acpi_gbl_db_method_info.init_args = 1;
681 acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
682 acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
683
684 /* Setup method arguments, up to 7 (0-6) */
685
686 for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
687 acpi_gbl_db_method_info.arguments[i] = *arguments;
688 arguments++;
689
690 acpi_gbl_db_method_info.arg_types[i] = *types;
691 types++;
692 }
693
694 status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
695 if (ACPI_FAILURE(status)) {
696 return;
697 }
698
699 /* Get the NS node, determines existence also */
700
701 status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
702 &acpi_gbl_db_method_info.method);
703 if (ACPI_FAILURE(status)) {
704 acpi_os_printf("%s Could not get handle for %s\n",
705 acpi_format_exception(status),
706 acpi_gbl_db_method_info.pathname);
707 return;
708 }
709
710 status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
711 acpi_db_single_execution_thread,
712 &acpi_gbl_db_method_info);
713 if (ACPI_FAILURE(status)) {
714 return;
715 }
716
717 acpi_os_printf("\nBackground thread started\n");
718}
719
720/*******************************************************************************
721 *
614 * FUNCTION: acpi_db_create_execution_threads 722 * FUNCTION: acpi_db_create_execution_threads
615 * 723 *
616 * PARAMETERS: num_threads_arg - Number of threads to create 724 * PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea291d93..cf9607945704 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
99 } 99 }
100 100
101 acpi_os_printf("Debug output file %s opened\n", name); 101 acpi_os_printf("Debug output file %s opened\n", name);
102 strncpy(acpi_gbl_db_debug_filename, name, 102 acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
103 sizeof(acpi_gbl_db_debug_filename)); 103 sizeof(acpi_gbl_db_debug_filename));
104 acpi_gbl_db_output_to_file = TRUE; 104 acpi_gbl_db_output_to_file = TRUE;
105} 105}
106#endif 106#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79db064..954ca3b981a7 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
136 CMD_UNLOAD, 136 CMD_UNLOAD,
137 137
138 CMD_TERMINATE, 138 CMD_TERMINATE,
139 CMD_BACKGROUND,
139 CMD_THREADS, 140 CMD_THREADS,
140 141
141 CMD_TEST, 142 CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
212 {"UNLOAD", 1}, 213 {"UNLOAD", 1},
213 214
214 {"TERMINATE", 0}, 215 {"TERMINATE", 0},
216 {"BACKGROUND", 1},
215 {"THREADS", 3}, 217 {"THREADS", 3},
216 218
217 {"TEST", 1}, 219 {"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
222/* 224/*
223 * Help for all debugger commands. First argument is the number of lines 225 * Help for all debugger commands. First argument is the number of lines
224 * of help to output for the command. 226 * of help to output for the command.
227 *
228 * Note: Some commands are not supported by the kernel-level version of
229 * the debugger.
225 */ 230 */
226static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { 231static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
227 {0, "\nGeneral-Purpose Commands:", "\n"}, 232 {0, "\nNamespace Access:", "\n"},
233 {1, " Businfo", "Display system bus info\n"},
234 {1, " Disassemble <Method>", "Disassemble a control method\n"},
235 {1, " Find <AcpiName> (? is wildcard)",
236 "Find ACPI name(s) with wildcards\n"},
237 {1, " Integrity", "Validate namespace integrity\n"},
238 {1, " Methods", "Display list of loaded control methods\n"},
239 {1, " Namespace [Object] [Depth]",
240 "Display loaded namespace tree/subtree\n"},
241 {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
242 {1, " Objects [ObjectType]",
243 "Display summary of all objects or just given type\n"},
244 {1, " Owner <OwnerId> [Depth]",
245 "Display loaded namespace by object owner\n"},
246 {1, " Paths", "Display full pathnames of namespace objects\n"},
247 {1, " Predefined", "Check all predefined names\n"},
248 {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
249 {1, " References <Addr>", "Find all references to object at addr\n"},
250 {1, " Resources [DeviceName]",
251 "Display Device resources (no arg = all devices)\n"},
252 {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
253 {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
254 {1, " Type <Object>", "Display object type\n"},
255
256 {0, "\nControl Method Execution:", "\n"},
257 {1, " Evaluate <Namepath> [Arguments]",
258 "Evaluate object or control method\n"},
259 {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
260#ifdef ACPI_APPLICATION
261 {1, " Background <Namepath> [Arguments]",
262 "Evaluate object/method in a separate thread\n"},
263 {1, " Thread <Threads><Loops><NamePath>",
264 "Spawn threads to execute method(s)\n"},
265#endif
266 {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
267 {7, " [Arguments] formats:", "Control method argument formats\n"},
268 {1, " Hex Integer", "Integer\n"},
269 {1, " \"Ascii String\"", "String\n"},
270 {1, " (Hex Byte List)", "Buffer\n"},
271 {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
272 {1, " [Package Element List]", "Package\n"},
273 {1, " [0x01 0x1234 \"string\"]",
274 "Package example (3 elements)\n"},
275
276 {0, "\nMiscellaneous:", "\n"},
228 {1, " Allocations", "Display list of current memory allocations\n"}, 277 {1, " Allocations", "Display list of current memory allocations\n"},
229 {2, " Dump <Address>|<Namepath>", "\n"}, 278 {2, " Dump <Address>|<Namepath>", "\n"},
230 {0, " [Byte|Word|Dword|Qword]", 279 {0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
248 {1, " Stack", "Display CPU stack usage\n"}, 297 {1, " Stack", "Display CPU stack usage\n"},
249 {1, " Tables", "Info about current ACPI table(s)\n"}, 298 {1, " Tables", "Info about current ACPI table(s)\n"},
250 {1, " Tables", "Display info about loaded ACPI tables\n"}, 299 {1, " Tables", "Display info about loaded ACPI tables\n"},
300#ifdef ACPI_APPLICATION
301 {1, " Terminate", "Delete namespace and all internal objects\n"},
302#endif
251 {1, " ! <CommandNumber>", "Execute command from history buffer\n"}, 303 {1, " ! <CommandNumber>", "Execute command from history buffer\n"},
252 {1, " !!", "Execute last command again\n"}, 304 {1, " !!", "Execute last command again\n"},
253 305
254 {0, "\nNamespace Access Commands:", "\n"}, 306 {0, "\nMethod and Namespace Debugging:", "\n"},
255 {1, " Businfo", "Display system bus info\n"}, 307 {5, " Trace <State> [<Namepath>] [Once]",
256 {1, " Disassemble <Method>", "Disassemble a control method\n"}, 308 "Trace control method execution\n"},
257 {1, " Find <AcpiName> (? is wildcard)", 309 {1, " Enable", "Enable all messages\n"},
258 "Find ACPI name(s) with wildcards\n"}, 310 {1, " Disable", "Disable tracing\n"},
259 {1, " Integrity", "Validate namespace integrity\n"}, 311 {1, " Method", "Enable method execution messages\n"},
260 {1, " Methods", "Display list of loaded control methods\n"}, 312 {1, " Opcode", "Enable opcode execution messages\n"},
261 {1, " Namespace [Object] [Depth]", 313 {3, " Test <TestName>", "Invoke a debug test\n"},
262 "Display loaded namespace tree/subtree\n"}, 314 {1, " Objects", "Read/write/compare all namespace data objects\n"},
263 {1, " Notify <Object> <Value>", "Send a notification on Object\n"}, 315 {1, " Predefined",
264 {1, " Objects [ObjectType]", 316 "Validate all ACPI predefined names (_STA, etc.)\n"},
265 "Display summary of all objects or just given type\n"}, 317 {1, " Execute predefined",
266 {1, " Owner <OwnerId> [Depth]", 318 "Execute all predefined (public) methods\n"},
267 "Display loaded namespace by object owner\n"},
268 {1, " Paths", "Display full pathnames of namespace objects\n"},
269 {1, " Predefined", "Check all predefined names\n"},
270 {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
271 {1, " References <Addr>", "Find all references to object at addr\n"},
272 {1, " Resources [DeviceName]",
273 "Display Device resources (no arg = all devices)\n"},
274 {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
275 {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
276 {1, " Type <Object>", "Display object type\n"},
277 319
278 {0, "\nControl Method Execution Commands:", "\n"}, 320 {0, "\nControl Method Single-Step Execution:", "\n"},
279 {1, " Arguments (or Args)", "Display method arguments\n"}, 321 {1, " Arguments (or Args)", "Display method arguments\n"},
280 {1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"}, 322 {1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
281 {1, " Call", "Run to next control method invocation\n"}, 323 {1, " Call", "Run to next control method invocation\n"},
282 {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
283 {6, " Evaluate", "Synonym for Execute\n"},
284 {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
285 {1, " Hex Integer", "Integer method argument\n"},
286 {1, " \"Ascii String\"", "String method argument\n"},
287 {1, " (Hex Byte List)", "Buffer method argument\n"},
288 {1, " [Package Element List]", "Package method argument\n"},
289 {5, " Execute predefined",
290 "Execute all predefined (public) methods\n"},
291 {1, " Go", "Allow method to run to completion\n"}, 324 {1, " Go", "Allow method to run to completion\n"},
292 {1, " Information", "Display info about the current method\n"}, 325 {1, " Information", "Display info about the current method\n"},
293 {1, " Into", "Step into (not over) a method call\n"}, 326 {1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
296 {1, " Results", "Display method result stack\n"}, 329 {1, " Results", "Display method result stack\n"},
297 {1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"}, 330 {1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
298 {1, " Stop", "Terminate control method\n"}, 331 {1, " Stop", "Terminate control method\n"},
299 {5, " Trace <State> [<Namepath>] [Once]",
300 "Trace control method execution\n"},
301 {1, " Enable", "Enable all messages\n"},
302 {1, " Disable", "Disable tracing\n"},
303 {1, " Method", "Enable method execution messages\n"},
304 {1, " Opcode", "Enable opcode execution messages\n"},
305 {1, " Tree", "Display control method calling tree\n"}, 332 {1, " Tree", "Display control method calling tree\n"},
306 {1, " <Enter>", "Single step next AML opcode (over calls)\n"}, 333 {1, " <Enter>", "Single step next AML opcode (over calls)\n"},
307 334
308#ifdef ACPI_APPLICATION 335#ifdef ACPI_APPLICATION
309 {0, "\nHardware Simulation Commands:", "\n"}, 336 {0, "\nFile Operations:", "\n"},
310 {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
311 {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
312 {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
313 {1, " Gpes", "Display info on all GPE devices\n"},
314 {1, " Sci", "Generate an SCI\n"},
315 {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
316
317 {0, "\nFile I/O Commands:", "\n"},
318 {1, " Close", "Close debug output file\n"}, 337 {1, " Close", "Close debug output file\n"},
319 {1, " Load <Input Filename>", "Load ACPI table from a file\n"}, 338 {1, " Load <Input Filename>", "Load ACPI table from a file\n"},
320 {1, " Open <Output Filename>", "Open a file for debug output\n"}, 339 {1, " Open <Output Filename>", "Open a file for debug output\n"},
321 {1, " Unload <Namepath>", 340 {1, " Unload <Namepath>",
322 "Unload an ACPI table via namespace object\n"}, 341 "Unload an ACPI table via namespace object\n"},
323 342
324 {0, "\nUser Space Commands:", "\n"}, 343 {0, "\nHardware Simulation:", "\n"},
325 {1, " Terminate", "Delete namespace and all internal objects\n"}, 344 {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
326 {1, " Thread <Threads><Loops><NamePath>", 345 {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
327 "Spawn threads to execute method(s)\n"}, 346 {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
328 347 {1, " Gpes", "Display info on all GPE devices\n"},
329 {0, "\nDebug Test Commands:", "\n"}, 348 {1, " Sci", "Generate an SCI\n"},
330 {3, " Test <TestName>", "Invoke a debug test\n"}, 349 {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
331 {1, " Objects", "Read/write/compare all namespace data objects\n"},
332 {1, " Predefined",
333 "Execute all ACPI predefined names (_STA, etc.)\n"},
334#endif 350#endif
335 {0, NULL, NULL} 351 {0, NULL, NULL}
336}; 352};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
442 458
443 /* No argument to help, display help for all commands */ 459 /* No argument to help, display help for all commands */
444 460
461 acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
462
445 while (next->invocation) { 463 while (next->invocation) {
446 acpi_os_printf("%-38s%s", next->invocation, 464 acpi_os_printf("%-38s%s", next->invocation,
447 next->description); 465 next->description);
448 next++; 466 next++;
449 } 467 }
468 acpi_os_printf("\n");
469
450 } else { 470 } else {
451 /* Display help for all commands that match the subtring */ 471 /* Display help for all commands that match the subtring */
452 472
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
1087 /* acpi_initialize (NULL); */ 1107 /* acpi_initialize (NULL); */
1088 break; 1108 break;
1089 1109
1110 case CMD_BACKGROUND:
1111
1112 acpi_db_create_execution_thread(acpi_gbl_db_args[1],
1113 &acpi_gbl_db_args[2],
1114 &acpi_gbl_db_arg_types[2]);
1115 break;
1116
1090 case CMD_THREADS: 1117 case CMD_THREADS:
1091 1118
1092 acpi_db_create_execution_threads(acpi_gbl_db_args[1], 1119 acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81b0499..4b6ebc2a2851 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
118 control_state->control.package_end = 118 control_state->control.package_end =
119 walk_state->parser_state.pkg_end; 119 walk_state->parser_state.pkg_end;
120 control_state->control.opcode = op->common.aml_opcode; 120 control_state->control.opcode = op->common.aml_opcode;
121 control_state->control.loop_timeout = acpi_os_get_timer() +
122 (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
121 123
122 /* Push the control state on this walk's control stack */ 124 /* Push the control state on this walk's control stack */
123 125
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
206 /* Predicate was true, the body of the loop was just executed */ 208 /* Predicate was true, the body of the loop was just executed */
207 209
208 /* 210 /*
209 * This loop counter mechanism allows the interpreter to escape 211 * This infinite loop detection mechanism allows the interpreter
210 * possibly infinite loops. This can occur in poorly written AML 212 * to escape possibly infinite loops. This can occur in poorly
211 * when the hardware does not respond within a while loop and the 213 * written AML when the hardware does not respond within a while
212 * loop does not implement a timeout. 214 * loop and the loop does not implement a timeout.
213 */ 215 */
214 control_state->control.loop_count++; 216 if (ACPI_TIME_AFTER(acpi_os_get_timer(),
215 if (control_state->control.loop_count > 217 control_state->control.
216 acpi_gbl_max_loop_iterations) { 218 loop_timeout)) {
217 status = AE_AML_INFINITE_LOOP; 219 status = AE_AML_LOOP_TIMEOUT;
218 break; 220 break;
219 } 221 }
220 222
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5ea029..0cab34a593d5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
209 ACPI_IMODE_LOAD_PASS1, flags, 209 ACPI_IMODE_LOAD_PASS1, flags,
210 walk_state, &node); 210 walk_state, &node);
211 if (ACPI_FAILURE(status)) { 211 if (ACPI_FAILURE(status)) {
212 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 212 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
213 arg->common.value.string, status);
213 return_ACPI_STATUS(status); 214 return_ACPI_STATUS(status);
214 } 215 }
215 } 216 }
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
383 walk_state, 384 walk_state,
384 &info->connection_node); 385 &info->connection_node);
385 if (ACPI_FAILURE(status)) { 386 if (ACPI_FAILURE(status)) {
386 ACPI_ERROR_NAMESPACE(child->common. 387 ACPI_ERROR_NAMESPACE(walk_state->
388 scope_info,
389 child->common.
387 value.name, 390 value.name,
388 status); 391 status);
389 return_ACPI_STATUS(status); 392 return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
402 ACPI_NS_DONT_OPEN_SCOPE, 405 ACPI_NS_DONT_OPEN_SCOPE,
403 walk_state, &info->field_node); 406 walk_state, &info->field_node);
404 if (ACPI_FAILURE(status)) { 407 if (ACPI_FAILURE(status)) {
405 ACPI_ERROR_NAMESPACE((char *)&arg->named.name, 408 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
409 (char *)&arg->named.name,
406 status); 410 status);
407 return_ACPI_STATUS(status); 411 return_ACPI_STATUS(status);
408 } else { 412 } else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
498 &region_node); 502 &region_node);
499#endif 503#endif
500 if (ACPI_FAILURE(status)) { 504 if (ACPI_FAILURE(status)) {
501 ACPI_ERROR_NAMESPACE(arg->common.value.name, status); 505 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
506 arg->common.value.name, status);
502 return_ACPI_STATUS(status); 507 return_ACPI_STATUS(status);
503 } 508 }
504 } 509 }
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
618 ACPI_IMODE_LOAD_PASS1, flags, 623 ACPI_IMODE_LOAD_PASS1, flags,
619 walk_state, &node); 624 walk_state, &node);
620 if (ACPI_FAILURE(status)) { 625 if (ACPI_FAILURE(status)) {
621 ACPI_ERROR_NAMESPACE((char *)&arg->named.name, 626 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
627 (char *)&arg->named.name,
622 status); 628 status);
623 if (status != AE_ALREADY_EXISTS) { 629 if (status != AE_ALREADY_EXISTS) {
624 return_ACPI_STATUS(status); 630 return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
681 &region_node); 687 &region_node);
682#endif 688#endif
683 if (ACPI_FAILURE(status)) { 689 if (ACPI_FAILURE(status)) {
684 ACPI_ERROR_NAMESPACE(arg->common.value.name, status); 690 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
691 arg->common.value.name, status);
685 return_ACPI_STATUS(status); 692 return_ACPI_STATUS(status);
686 } 693 }
687 } 694 }
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
695 ACPI_NS_SEARCH_PARENT, walk_state, 702 ACPI_NS_SEARCH_PARENT, walk_state,
696 &info.register_node); 703 &info.register_node);
697 if (ACPI_FAILURE(status)) { 704 if (ACPI_FAILURE(status)) {
698 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 705 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
706 arg->common.value.string, status);
699 return_ACPI_STATUS(status); 707 return_ACPI_STATUS(status);
700 } 708 }
701 709
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
765 ACPI_NS_SEARCH_PARENT, walk_state, 773 ACPI_NS_SEARCH_PARENT, walk_state,
766 &info.register_node); 774 &info.register_node);
767 if (ACPI_FAILURE(status)) { 775 if (ACPI_FAILURE(status)) {
768 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 776 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
777 arg->common.value.string, status);
769 return_ACPI_STATUS(status); 778 return_ACPI_STATUS(status);
770 } 779 }
771 780
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
778 ACPI_NS_SEARCH_PARENT, walk_state, 787 ACPI_NS_SEARCH_PARENT, walk_state,
779 &info.data_register_node); 788 &info.data_register_node);
780 if (ACPI_FAILURE(status)) { 789 if (ACPI_FAILURE(status)) {
781 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 790 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
791 arg->common.value.string, status);
782 return_ACPI_STATUS(status); 792 return_ACPI_STATUS(status);
783 } 793 }
784 794
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 82448551781b..b21fe084ffc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
112 acpi_namespace_node, 112 acpi_namespace_node,
113 &(op->common.node))); 113 &(op->common.node)));
114 if (ACPI_FAILURE(status)) { 114 if (ACPI_FAILURE(status)) {
115 ACPI_ERROR_NAMESPACE(op->common.value. 115 ACPI_ERROR_NAMESPACE(walk_state->
116 scope_info,
117 op->common.value.
116 string, status); 118 string, status);
117 return_ACPI_STATUS(status); 119 return_ACPI_STATUS(status);
118 } 120 }
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487edfe2de..5a602b75084e 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
297{ 297{
298 union acpi_operand_object **element_ptr; 298 union acpi_operand_object **element_ptr;
299 299
300 ACPI_FUNCTION_TRACE(ds_init_package_element);
301
300 if (!source_object) { 302 if (!source_object) {
301 return (AE_OK); 303 return_ACPI_STATUS(AE_OK);
302 } 304 }
303 305
304 /* 306 /*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
329 source_object->package.flags |= AOPOBJ_DATA_VALID; 331 source_object->package.flags |= AOPOBJ_DATA_VALID;
330 } 332 }
331 333
332 return (AE_OK); 334 return_ACPI_STATUS(AE_OK);
333} 335}
334 336
335/******************************************************************************* 337/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
352 union acpi_generic_state scope_info; 354 union acpi_generic_state scope_info;
353 union acpi_operand_object *element = *element_ptr; 355 union acpi_operand_object *element = *element_ptr;
354 struct acpi_namespace_node *resolved_node; 356 struct acpi_namespace_node *resolved_node;
357 struct acpi_namespace_node *original_node;
355 char *external_path = NULL; 358 char *external_path = NULL;
356 acpi_object_type type; 359 acpi_object_type type;
357 360
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
441 * will remain as named references. This behavior is not described 444 * will remain as named references. This behavior is not described
442 * in the ACPI spec, but it appears to be an oversight. 445 * in the ACPI spec, but it appears to be an oversight.
443 */ 446 */
447 original_node = resolved_node;
444 status = acpi_ex_resolve_node_to_value(&resolved_node, NULL); 448 status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
445 if (ACPI_FAILURE(status)) { 449 if (ACPI_FAILURE(status)) {
446 return_VOID; 450 return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
468 */ 472 */
469 case ACPI_TYPE_DEVICE: 473 case ACPI_TYPE_DEVICE:
470 case ACPI_TYPE_THERMAL: 474 case ACPI_TYPE_THERMAL:
471 475 case ACPI_TYPE_METHOD:
472 /* TBD: This may not be necesssary */
473
474 acpi_ut_add_reference(resolved_node->object);
475 break; 476 break;
476 477
477 case ACPI_TYPE_MUTEX: 478 case ACPI_TYPE_MUTEX:
478 case ACPI_TYPE_METHOD:
479 case ACPI_TYPE_POWER: 479 case ACPI_TYPE_POWER:
480 case ACPI_TYPE_PROCESSOR: 480 case ACPI_TYPE_PROCESSOR:
481 case ACPI_TYPE_EVENT: 481 case ACPI_TYPE_EVENT:
482 case ACPI_TYPE_REGION: 482 case ACPI_TYPE_REGION:
483 483
484 /* acpi_ex_resolve_node_to_value gave these an extra reference */
485
486 acpi_ut_remove_reference(original_node->object);
484 break; 487 break;
485 488
486 default: 489 default:
487 /* 490 /*
488 * For all other types - the node was resolved to an actual 491 * For all other types - the node was resolved to an actual
489 * operand object with a value, return the object 492 * operand object with a value, return the object. Remove
493 * a reference on the existing object.
490 */ 494 */
495 acpi_ut_remove_reference(element);
491 *element_ptr = (union acpi_operand_object *)resolved_node; 496 *element_ptr = (union acpi_operand_object *)resolved_node;
492 break; 497 break;
493 } 498 }
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b95684..4c5faf629a83 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
583 } 583 }
584 584
585 if (ACPI_FAILURE(status)) { 585 if (ACPI_FAILURE(status)) {
586 ACPI_ERROR_NAMESPACE(name_string, status); 586 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
587 name_string, status);
587 } 588 }
588 } 589 }
589 590
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a89702..5771e4e4a99a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
207 } 207 }
208#endif 208#endif
209 if (ACPI_FAILURE(status)) { 209 if (ACPI_FAILURE(status)) {
210 ACPI_ERROR_NAMESPACE(path, status); 210 ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
211 status);
211 return_ACPI_STATUS(status); 212 return_ACPI_STATUS(status);
212 } 213 }
213 214
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
375 } 376 }
376 377
377 if (ACPI_FAILURE(status)) { 378 if (ACPI_FAILURE(status)) {
378 ACPI_ERROR_NAMESPACE(path, status); 379 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
380 path, status);
379 return_ACPI_STATUS(status); 381 return_ACPI_STATUS(status);
380 } 382 }
381 } 383 }
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef5a4ec..b3d0aaec8203 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
184 if (status == AE_NOT_FOUND) { 184 if (status == AE_NOT_FOUND) {
185 status = AE_OK; 185 status = AE_OK;
186 } else { 186 } else {
187 ACPI_ERROR_NAMESPACE(buffer_ptr, 187 ACPI_ERROR_NAMESPACE(walk_state->
188 scope_info,
189 buffer_ptr,
188 status); 190 status);
189 } 191 }
190#else 192#else
191 ACPI_ERROR_NAMESPACE(buffer_ptr, status); 193 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
194 buffer_ptr, status);
192#endif 195#endif
193 return_ACPI_STATUS(status); 196 return_ACPI_STATUS(status);
194 } 197 }
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
343 } 346 }
344 347
345 if (ACPI_FAILURE(status)) { 348 if (ACPI_FAILURE(status)) {
346 ACPI_ERROR_NAMESPACE(buffer_ptr, status); 349 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
350 buffer_ptr, status);
347 return_ACPI_STATUS(status); 351 return_ACPI_STATUS(status);
348 } 352 }
349 353
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
719 */ 723 */
720 op->common.node = new_node; 724 op->common.node = new_node;
721 } else { 725 } else {
722 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 726 ACPI_ERROR_NAMESPACE(walk_state->scope_info,
727 arg->common.value.string, status);
723 } 728 }
724 break; 729 break;
725 730
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447ff92df..bb58419f0d61 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
298 ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", 298 ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
299 acpi_ut_get_region_name(region_obj->region. 299 acpi_ut_get_region_name(region_obj->region.
300 space_id))); 300 space_id)));
301
302 /*
303 * Special case for an EC timeout. These are seen so frequently
304 * that an additional error message is helpful
305 */
306 if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
307 (status == AE_TIME)) {
308 ACPI_ERROR((AE_INFO,
309 "Timeout from EC hardware or EC device driver"));
310 }
301 } 311 }
302 312
303 if (!(handler_desc->address_space.handler_flags & 313 if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc4b7c2..b2ff61bdb9a8 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
617 u32 length; 617 u32 length;
618 u32 index; 618 u32 index;
619 619
620 ACPI_FUNCTION_NAME(ex_dump_operand) 620 ACPI_FUNCTION_NAME(ex_dump_operand);
621 621
622 /* Check if debug output enabled */ 622 /* Check if debug output enabled */
623 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) { 623
624 if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
624 return; 625 return;
625 } 626 }
626 627
@@ -904,7 +905,7 @@ void
904acpi_ex_dump_operands(union acpi_operand_object **operands, 905acpi_ex_dump_operands(union acpi_operand_object **operands,
905 const char *opcode_name, u32 num_operands) 906 const char *opcode_name, u32 num_operands)
906{ 907{
907 ACPI_FUNCTION_NAME(ex_dump_operands); 908 ACPI_FUNCTION_TRACE(ex_dump_operands);
908 909
909 if (!opcode_name) { 910 if (!opcode_name) {
910 opcode_name = "UNKNOWN"; 911 opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
928 929
929 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 930 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
930 "**** End operand dump for [%s]\n", opcode_name)); 931 "**** End operand dump for [%s]\n", opcode_name));
931 return; 932 return_VOID;
932} 933}
933 934
934/******************************************************************************* 935/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25d45b1..5b4282902a83 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
150 * 150 *
151 ******************************************************************************/ 151 ******************************************************************************/
152acpi_status 152acpi_status
153acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed) 153acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
154{ 154{
155 acpi_status status; 155 acpi_status status;
156 u32 delta_ticks; 156 u64 delta_ticks;
157 u64 quotient; 157 u64 quotient;
158 158
159 ACPI_FUNCTION_TRACE(acpi_get_timer_duration); 159 ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
168 return_ACPI_STATUS(AE_SUPPORT); 168 return_ACPI_STATUS(AE_SUPPORT);
169 } 169 }
170 170
171 if (start_ticks == end_ticks) {
172 *time_elapsed = 0;
173 return_ACPI_STATUS(AE_OK);
174 }
175
171 /* 176 /*
172 * Compute Tick Delta: 177 * Compute Tick Delta:
173 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers. 178 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
174 */ 179 */
175 if (start_ticks < end_ticks) { 180 delta_ticks = end_ticks;
176 delta_ticks = end_ticks - start_ticks; 181 if (start_ticks > end_ticks) {
177 } else if (start_ticks > end_ticks) {
178 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) { 182 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
179 183
180 /* 24-bit Timer */ 184 /* 24-bit Timer */
181 185
182 delta_ticks = 186 delta_ticks |= (u64)1 << 24;
183 (((0x00FFFFFF - start_ticks) +
184 end_ticks) & 0x00FFFFFF);
185 } else { 187 } else {
186 /* 32-bit Timer */ 188 /* 32-bit Timer */
187 189
188 delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks; 190 delta_ticks |= (u64)1 << 32;
189 } 191 }
190 } else { /* start_ticks == end_ticks */
191
192 *time_elapsed = 0;
193 return_ACPI_STATUS(AE_OK);
194 } 192 }
193 delta_ticks -= start_ticks;
195 194
196 /* 195 /*
197 * Compute Duration (Requires a 64-bit multiply and divide): 196 * Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
199 * time_elapsed (microseconds) = 198 * time_elapsed (microseconds) =
200 * (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY; 199 * (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
201 */ 200 */
202 status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC, 201 status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
203 ACPI_PM_TIMER_FREQUENCY, &quotient, NULL); 202 ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
204 203
205 *time_elapsed = (u32) quotient; 204 *time_elapsed = (u32)quotient;
206 return_ACPI_STATUS(status); 205 return_ACPI_STATUS(status);
207} 206}
208 207
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec4eab4..d1679035d5f3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
128 acpi_io_address last_address; 128 acpi_io_address last_address;
129 const struct acpi_port_info *port_info; 129 const struct acpi_port_info *port_info;
130 130
131 ACPI_FUNCTION_NAME(hw_validate_io_request); 131 ACPI_FUNCTION_TRACE(hw_validate_io_request);
132 132
133 /* Supported widths are 8/16/32 */ 133 /* Supported widths are 8/16/32 */
134 134
135 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { 135 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
136 ACPI_ERROR((AE_INFO, 136 ACPI_ERROR((AE_INFO,
137 "Bad BitWidth parameter: %8.8X", bit_width)); 137 "Bad BitWidth parameter: %8.8X", bit_width));
138 return (AE_BAD_PARAMETER); 138 return_ACPI_STATUS(AE_BAD_PARAMETER);
139 } 139 }
140 140
141 port_info = acpi_protected_ports; 141 port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
153 ACPI_ERROR((AE_INFO, 153 ACPI_ERROR((AE_INFO,
154 "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X", 154 "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
155 ACPI_FORMAT_UINT64(address), byte_width)); 155 ACPI_FORMAT_UINT64(address), byte_width));
156 return (AE_LIMIT); 156 return_ACPI_STATUS(AE_LIMIT);
157 } 157 }
158 158
159 /* Exit if requested address is not within the protected port table */ 159 /* Exit if requested address is not within the protected port table */
160 160
161 if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) { 161 if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
162 return (AE_OK); 162 return_ACPI_STATUS(AE_OK);
163 } 163 }
164 164
165 /* Check request against the list of protected I/O ports */ 165 /* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
180 /* Port illegality may depend on the _OSI calls made by the BIOS */ 180 /* Port illegality may depend on the _OSI calls made by the BIOS */
181 181
182 if (acpi_gbl_osi_data >= port_info->osi_dependency) { 182 if (acpi_gbl_osi_data >= port_info->osi_dependency) {
183 ACPI_DEBUG_PRINT((ACPI_DB_IO, 183 ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
184 "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)", 184 "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
185 ACPI_FORMAT_UINT64(address), 185 ACPI_FORMAT_UINT64(address),
186 byte_width, port_info->name, 186 byte_width, port_info->name,
187 port_info->start, 187 port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
198 } 198 }
199 } 199 }
200 200
201 return (AE_OK); 201 return_ACPI_STATUS(AE_OK);
202} 202}
203 203
204/****************************************************************************** 204/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f51ca8d..33e652a12fca 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
644 this_node->object; 644 this_node->object;
645 } 645 }
646 } 646 }
647#ifdef ACPI_ASL_COMPILER
648 if (!acpi_gbl_disasm_flag &&
649 (this_node->flags & ANOBJ_IS_EXTERNAL)) {
650 this_node->flags |= IMPLICIT_EXTERNAL;
651 }
652#endif
653 } 647 }
654 648
655 /* Special handling for the last segment (num_segments == 0) */ 649 /* Special handling for the last segment (num_segments == 0) */
656 650
657 else { 651 else {
652#ifdef ACPI_ASL_COMPILER
653 if (!acpi_gbl_disasm_flag
654 && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
655 this_node->flags &= ~IMPLICIT_EXTERNAL;
656 }
657#endif
658
658 /* 659 /*
659 * Sanity typecheck of the target object: 660 * Sanity typecheck of the target object:
660 * 661 *
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775bbc92..d55dcc82f434 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
495 495
496 /* Check if we are resolving a named reference within a package */ 496 /* Check if we are resolving a named reference within a package */
497 497
498 ACPI_ERROR_NAMESPACE(original_object->string.pointer, status); 498 ACPI_ERROR_NAMESPACE(&scope_info,
499 original_object->string.pointer, status);
499 goto error_exit; 500 goto error_exit;
500 } 501 }
501 502
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760a0308..22c92d1a24d8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
49#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsnames") 50ACPI_MODULE_NAME("nsnames")
51 51
52/* Local Prototypes */
53static void acpi_ns_normalize_pathname(char *original_path);
54
52/******************************************************************************* 55/*******************************************************************************
53 * 56 *
54 * FUNCTION: acpi_ns_get_external_pathname 57 * FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
63 * for error and debug statements. 66 * for error and debug statements.
64 * 67 *
65 ******************************************************************************/ 68 ******************************************************************************/
69
66char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) 70char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
67{ 71{
68 char *name_buffer; 72 char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
352 356
353 return_PTR(name_buffer); 357 return_PTR(name_buffer);
354} 358}
359
360/*******************************************************************************
361 *
362 * FUNCTION: acpi_ns_build_prefixed_pathname
363 *
364 * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
365 * internal_path - Name or path of the namespace node
366 *
367 * RETURN: None
368 *
369 * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
370 * 1) Path associated with the prefix_scope namespace node
371 * 2) External path representation of the Internal path
372 *
373 ******************************************************************************/
374
375char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
376 const char *internal_path)
377{
378 acpi_status status;
379 char *full_path = NULL;
380 char *external_path = NULL;
381 char *prefix_path = NULL;
382 u32 prefix_path_length = 0;
383
384 /* If there is a prefix, get the pathname to it */
385
386 if (prefix_scope && prefix_scope->scope.node) {
387 prefix_path =
388 acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
389 TRUE);
390 if (prefix_path) {
391 prefix_path_length = strlen(prefix_path);
392 }
393 }
394
395 status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
396 NULL, &external_path);
397 if (ACPI_FAILURE(status)) {
398 goto cleanup;
399 }
400
401 /* Merge the prefix path and the path. 2 is for one dot and trailing null */
402
403 full_path =
404 ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
405 2);
406 if (!full_path) {
407 goto cleanup;
408 }
409
410 /* Don't merge if the External path is already fully qualified */
411
412 if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
413 strcat(full_path, prefix_path);
414 if (prefix_path[1]) {
415 strcat(full_path, ".");
416 }
417 }
418
419 acpi_ns_normalize_pathname(external_path);
420 strcat(full_path, external_path);
421
422cleanup:
423 if (prefix_path) {
424 ACPI_FREE(prefix_path);
425 }
426 if (external_path) {
427 ACPI_FREE(external_path);
428 }
429
430 return (full_path);
431}
432
433/*******************************************************************************
434 *
435 * FUNCTION: acpi_ns_normalize_pathname
436 *
437 * PARAMETERS: original_path - Path to be normalized, in External format
438 *
439 * RETURN: The original path is processed in-place
440 *
441 * DESCRIPTION: Remove trailing underscores from each element of a path.
442 *
443 * For example: \A___.B___.C___ becomes \A.B.C
444 *
445 ******************************************************************************/
446
447static void acpi_ns_normalize_pathname(char *original_path)
448{
449 char *input_path = original_path;
450 char *new_path_buffer;
451 char *new_path;
452 u32 i;
453
454 /* Allocate a temp buffer in which to construct the new path */
455
456 new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
457 new_path = new_path_buffer;
458 if (!new_path_buffer) {
459 return;
460 }
461
462 /* Special characters may appear at the beginning of the path */
463
464 if (*input_path == '\\') {
465 *new_path = *input_path;
466 new_path++;
467 input_path++;
468 }
469
470 while (*input_path == '^') {
471 *new_path = *input_path;
472 new_path++;
473 input_path++;
474 }
475
476 /* Remainder of the path */
477
478 while (*input_path) {
479
480 /* Do one nameseg at a time */
481
482 for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
483 if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
484 *new_path = *input_path;
485 new_path++;
486 }
487
488 input_path++;
489 }
490
491 /* Dot means that there are more namesegs to come */
492
493 if (*input_path == '.') {
494 *new_path = *input_path;
495 new_path++;
496 input_path++;
497 }
498 }
499
500 *new_path = 0;
501 strcpy(original_path, new_path_buffer);
502 ACPI_FREE(new_path_buffer);
503}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957f5ef0..e91dbee9235f 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
417 if (flags & ACPI_NS_EXTERNAL || 417 if (flags & ACPI_NS_EXTERNAL ||
418 (walk_state && walk_state->opcode == AML_SCOPE_OP)) { 418 (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
419 new_node->flags |= ANOBJ_IS_EXTERNAL; 419 new_node->flags |= ANOBJ_IS_EXTERNAL;
420 new_node->flags |= IMPLICIT_EXTERNAL;
420 } 421 }
421#endif 422#endif
422 423
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c838aee..9b51f65823b2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
61 * 61 *
62 * PARAMETERS: handle - Object handle (optional) 62 * PARAMETERS: handle - Object handle (optional)
63 * pathname - Object pathname (optional) 63 * pathname - Object pathname (optional)
64 * external_params - List of parameters to pass to method, 64 * external_params - List of parameters to pass to a method,
65 * terminated by NULL. May be NULL 65 * terminated by NULL. May be NULL
66 * if no parameters are being passed. 66 * if no parameters are being passed.
67 * return_buffer - Where to put method's return value (if 67 * return_buffer - Where to put the object's return value (if
68 * any). If NULL, no value is returned. 68 * any). If NULL, no value is returned.
69 * return_type - Expected type of return object 69 * return_type - Expected type of return object
70 * 70 *
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
100 free_buffer_on_error = TRUE; 100 free_buffer_on_error = TRUE;
101 } 101 }
102 102
103 /* Get a handle here, in order to build an error message if needed */
104
105 target_handle = handle;
103 if (pathname) { 106 if (pathname) {
104 status = acpi_get_handle(handle, pathname, &target_handle); 107 status = acpi_get_handle(handle, pathname, &target_handle);
105 if (ACPI_FAILURE(status)) { 108 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status); 109 return_ACPI_STATUS(status);
107 } 110 }
108 } else {
109 target_handle = handle;
110 } 111 }
111 112
112 full_pathname = acpi_ns_get_external_pathname(target_handle); 113 full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfaca555f..171e2faa7c50 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
361 /* Final exception check (may have been changed from code above) */ 361 /* Final exception check (may have been changed from code above) */
362 362
363 if (ACPI_FAILURE(status)) { 363 if (ACPI_FAILURE(status)) {
364 ACPI_ERROR_NAMESPACE(path, status); 364 ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
365 365
366 if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) == 366 if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
367 ACPI_PARSE_EXECUTE) { 367 ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df71bba..c0b179883ff2 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
372 * external declaration opcode. Setting walk_state->Aml to 372 * external declaration opcode. Setting walk_state->Aml to
373 * walk_state->parser_state.Aml + 2 moves increments the 373 * walk_state->parser_state.Aml + 2 moves increments the
374 * walk_state->Aml past the object type and the paramcount of the 374 * walk_state->Aml past the object type and the paramcount of the
375 * external opcode. For the error message, only print the AML 375 * external opcode.
376 * offset. We could attempt to print the name but this may cause
377 * a segmentation fault when printing the namepath because the
378 * AML may be incorrect.
379 */ 376 */
380 acpi_os_printf
381 ("// Invalid external declaration at AML offset 0x%x.\n",
382 walk_state->aml -
383 walk_state->parser_state.aml_start);
384 walk_state->aml = walk_state->parser_state.aml + 2; 377 walk_state->aml = walk_state->parser_state.aml + 2;
378 walk_state->parser_state.aml = walk_state->aml;
385 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); 379 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
386 } 380 }
387#endif 381#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 02642760cb93..cd59dfe6a47d 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
94 op->common.descriptor_type = ACPI_DESC_TYPE_PARSER; 94 op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
95 op->common.aml_opcode = opcode; 95 op->common.aml_opcode = opcode;
96 96
97 ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name, 97 ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
98 (acpi_ps_get_opcode_info(opcode))-> 98 (acpi_ps_get_opcode_info
99 name, sizeof(op->common.aml_op_name))); 99 (opcode))->name,
100 sizeof(op->common.
101 aml_op_name)));
100} 102}
101 103
102/******************************************************************************* 104/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
158 if (opcode == AML_SCOPE_OP) { 160 if (opcode == AML_SCOPE_OP) {
159 acpi_gbl_current_scope = op; 161 acpi_gbl_current_scope = op;
160 } 162 }
161 }
162 163
163 if (gbl_capture_comments) { 164 if (acpi_gbl_capture_comments) {
164 ASL_CV_TRANSFER_COMMENTS(op); 165 ASL_CV_TRANSFER_COMMENTS(op);
166 }
165 } 167 }
166 168
167 return (op); 169 return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885e2ca3..cff7154b7fee 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
163{ 163{
164 acpi_thread_id thread_id; 164 acpi_thread_id thread_id;
165 va_list args; 165 va_list args;
166#ifdef ACPI_APPLICATION
167 int fill_count;
168#endif
166 169
167 /* Check if debug output enabled */ 170 /* Check if debug output enabled */
168 171
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
202 acpi_os_printf("[%u] ", (u32)thread_id); 205 acpi_os_printf("[%u] ", (u32)thread_id);
203 } 206 }
204 207
205 acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level); 208 fill_count = 48 - acpi_gbl_nesting_level -
206#endif 209 strlen(acpi_ut_trim_function_name(function_name));
210 if (fill_count < 0) {
211 fill_count = 0;
212 }
213
214 acpi_os_printf("[%02ld] %*s",
215 acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
216 acpi_os_printf("%s%*s: ",
217 acpi_ut_trim_function_name(function_name), fill_count,
218 " ");
207 219
220#else
208 acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name)); 221 acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
222#endif
209 223
210 va_start(args, format); 224 va_start(args, format);
211 acpi_os_vprintf(format, args); 225 acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2d961a..55debbad487d 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
395 return (acpi_gbl_ref_class_names[object->reference.class]); 395 return (acpi_gbl_ref_class_names[object->reference.class]);
396} 396}
397 397
398#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
399/*
400 * Strings and procedures used for debug only
401 */
402
403/******************************************************************************* 398/*******************************************************************************
404 * 399 *
405 * FUNCTION: acpi_ut_get_mutex_name 400 * FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
433 return (acpi_gbl_mutex_names[mutex_id]); 428 return (acpi_gbl_mutex_names[mutex_id]);
434} 429}
435 430
431#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
432
433/*
434 * Strings and procedures used for debug only
435 */
436
436/******************************************************************************* 437/*******************************************************************************
437 * 438 *
438 * FUNCTION: acpi_ut_get_notify_name 439 * FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e3368186e1c1..42388dcb5ccc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
182 182
183/******************************************************************************* 183/*******************************************************************************
184 * 184 *
185 * FUNCTION: acpi_ut_prefixed_namespace_error
186 *
187 * PARAMETERS: module_name - Caller's module name (for error output)
188 * line_number - Caller's line number (for error output)
189 * prefix_scope - Scope/Path that prefixes the internal path
190 * internal_path - Name or path of the namespace node
191 * lookup_status - Exception code from NS lookup
192 *
193 * RETURN: None
194 *
195 * DESCRIPTION: Print error message with the full pathname constructed this way:
196 *
197 * prefix_scope_node_full_path.externalized_internal_path
198 *
199 * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
200 *
201 ******************************************************************************/
202
203void
204acpi_ut_prefixed_namespace_error(const char *module_name,
205 u32 line_number,
206 union acpi_generic_state *prefix_scope,
207 const char *internal_path,
208 acpi_status lookup_status)
209{
210 char *full_path;
211 const char *message;
212
213 /*
214 * Main cases:
215 * 1) Object creation, object must not already exist
216 * 2) Object lookup, object must exist
217 */
218 switch (lookup_status) {
219 case AE_ALREADY_EXISTS:
220
221 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
222 message = "Failure creating";
223 break;
224
225 case AE_NOT_FOUND:
226
227 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
228 message = "Failure looking up";
229 break;
230
231 default:
232
233 acpi_os_printf(ACPI_MSG_ERROR);
234 message = "Failure looking up";
235 break;
236 }
237
238 /* Concatenate the prefix path and the internal path */
239
240 full_path =
241 acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
242
243 acpi_os_printf("%s [%s], %s", message,
244 full_path ? full_path : "Could not get pathname",
245 acpi_format_exception(lookup_status));
246
247 if (full_path) {
248 ACPI_FREE(full_path);
249 }
250
251 ACPI_MSG_SUFFIX;
252}
253
254#ifdef __OBSOLETE_FUNCTION
255/*******************************************************************************
256 *
185 * FUNCTION: acpi_ut_namespace_error 257 * FUNCTION: acpi_ut_namespace_error
186 * 258 *
187 * PARAMETERS: module_name - Caller's module name (for error output) 259 * PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
240 ACPI_MSG_SUFFIX; 312 ACPI_MSG_SUFFIX;
241 ACPI_MSG_REDIRECT_END; 313 ACPI_MSG_REDIRECT_END;
242} 314}
315#endif
243 316
244/******************************************************************************* 317/*******************************************************************************
245 * 318 *
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d1691d..45eeb0dcf283 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
206 acpi_gbl_next_owner_id_offset = 0; 206 acpi_gbl_next_owner_id_offset = 0;
207 acpi_gbl_debugger_configuration = DEBUGGER_THREADING; 207 acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
208 acpi_gbl_osi_mutex = NULL; 208 acpi_gbl_osi_mutex = NULL;
209 acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
210 209
211 /* Hardware oriented */ 210 /* Hardware oriented */
212 211
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680076c4..2055a858e5f5 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
134 134
135 if ((count & 63) >= 32) { 135 if ((count & 63) >= 32) {
136 operand_ovl.part.hi = operand_ovl.part.lo; 136 operand_ovl.part.hi = operand_ovl.part.lo;
137 operand_ovl.part.lo ^= operand_ovl.part.lo; 137 operand_ovl.part.lo = 0;
138 count = (count & 63) - 32; 138 count = (count & 63) - 32;
139 } 139 }
140 ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi, 140 ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
171 171
172 if ((count & 63) >= 32) { 172 if ((count & 63) >= 32) {
173 operand_ovl.part.lo = operand_ovl.part.hi; 173 operand_ovl.part.lo = operand_ovl.part.hi;
174 operand_ovl.part.hi ^= operand_ovl.part.hi; 174 operand_ovl.part.hi = 0;
175 count = (count & 63) - 32; 175 count = (count & 63) - 32;
176 } 176 }
177 ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi, 177 ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 586354788018..524ba931d5e8 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
286 acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id; 286 acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
287 } else { 287 } else {
288 ACPI_EXCEPTION((AE_INFO, status, 288 ACPI_EXCEPTION((AE_INFO, status,
289 "Thread %u could not acquire Mutex [0x%X]", 289 "Thread %u could not acquire Mutex [%s] (0x%X)",
290 (u32)this_thread_id, mutex_id)); 290 (u32)this_thread_id,
291 acpi_ut_get_mutex_name(mutex_id), mutex_id));
291 } 292 }
292 293
293 return (status); 294 return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
322 */ 323 */
323 if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) { 324 if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
324 ACPI_ERROR((AE_INFO, 325 ACPI_ERROR((AE_INFO,
325 "Mutex [0x%X] is not acquired, cannot release", 326 "Mutex [%s] (0x%X) is not acquired, cannot release",
326 mutex_id)); 327 acpi_ut_get_mutex_name(mutex_id), mutex_id));
327 328
328 return (AE_NOT_ACQUIRED); 329 return (AE_NOT_ACQUIRED);
329 } 330 }
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 792664982ea3..33a0970646df 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
140 return (c1 - c2); 140 return (c1 - c2);
141} 141}
142 142
143#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) 143#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
144/******************************************************************************* 144/*******************************************************************************
145 * 145 *
146 * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat 146 * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
199 strncat(dest, source, max_transfer_length); 199 strncat(dest, source, max_transfer_length);
200 return (FALSE); 200 return (FALSE);
201} 201}
202
203void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
204{
205 /* Always terminate destination string */
206
207 strncpy(dest, source, dest_size);
208 dest[dest_size - 1] = 0;
209}
210
202#endif 211#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b133c0e4..f6b8dd24b006 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
101 {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ 101 {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
102 {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */ 102 {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
103 {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */ 103 {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
104 {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
105 {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
104 106
105 /* Feature Group Strings */ 107 /* Feature Group Strings */
106 108
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5cec94f..97f48d71f9e6 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
52acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit); 52acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
53 53
54static acpi_status 54static acpi_status
55acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product); 55acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
56 56
57static acpi_status 57static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
58acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
59 58
60/******************************************************************************* 59/*******************************************************************************
61 * 60 *
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
357 * FUNCTION: acpi_ut_strtoul_multiply64 356 * FUNCTION: acpi_ut_strtoul_multiply64
358 * 357 *
359 * PARAMETERS: multiplicand - Current accumulated converted integer 358 * PARAMETERS: multiplicand - Current accumulated converted integer
360 * multiplier - Base/Radix 359 * base - Base/Radix
361 * out_product - Where the product is returned 360 * out_product - Where the product is returned
362 * 361 *
363 * RETURN: Status and 64-bit product 362 * RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
369 ******************************************************************************/ 368 ******************************************************************************/
370 369
371static acpi_status 370static acpi_status
372acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product) 371acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
373{ 372{
374 u64 val; 373 u64 product;
374 u64 quotient;
375 375
376 /* Exit if either operand is zero */ 376 /* Exit if either operand is zero */
377 377
378 *out_product = 0; 378 *out_product = 0;
379 if (!multiplicand || !multiplier) { 379 if (!multiplicand || !base) {
380 return (AE_OK); 380 return (AE_OK);
381 } 381 }
382 382
383 /* Check for 64-bit overflow before the actual multiplication */ 383 /*
384 384 * Check for 64-bit overflow before the actual multiplication.
385 acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL); 385 *
386 if (multiplicand > val) { 386 * Notes: 64-bit division is often not supported on 32-bit platforms
387 * (it requires a library function), Therefore ACPICA has a local
388 * 64-bit divide function. Also, Multiplier is currently only used
389 * as the radix (8/10/16), to the 64/32 divide will always work.
390 */
391 acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
392 if (multiplicand > quotient) {
387 return (AE_NUMERIC_OVERFLOW); 393 return (AE_NUMERIC_OVERFLOW);
388 } 394 }
389 395
390 val = multiplicand * multiplier; 396 product = multiplicand * base;
391 397
392 /* Check for 32-bit overflow if necessary */ 398 /* Check for 32-bit overflow if necessary */
393 399
394 if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) { 400 if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
395 return (AE_NUMERIC_OVERFLOW); 401 return (AE_NUMERIC_OVERFLOW);
396 } 402 }
397 403
398 *out_product = val; 404 *out_product = product;
399 return (AE_OK); 405 return (AE_OK);
400} 406}
401 407
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
404 * FUNCTION: acpi_ut_strtoul_add64 410 * FUNCTION: acpi_ut_strtoul_add64
405 * 411 *
406 * PARAMETERS: addend1 - Current accumulated converted integer 412 * PARAMETERS: addend1 - Current accumulated converted integer
407 * addend2 - New hex value/char 413 * digit - New hex value/char
408 * out_sum - Where sum is returned (Accumulator) 414 * out_sum - Where sum is returned (Accumulator)
409 * 415 *
410 * RETURN: Status and 64-bit sum 416 * RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
415 * 421 *
416 ******************************************************************************/ 422 ******************************************************************************/
417 423
418static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum) 424static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
419{ 425{
420 u64 sum; 426 u64 sum;
421 427
422 /* Check for 64-bit overflow before the actual addition */ 428 /* Check for 64-bit overflow before the actual addition */
423 429
424 if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) { 430 if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
425 return (AE_NUMERIC_OVERFLOW); 431 return (AE_NUMERIC_OVERFLOW);
426 } 432 }
427 433
428 sum = addend1 + addend2; 434 sum = addend1 + digit;
429 435
430 /* Check for 32-bit overflow if necessary */ 436 /* Check for 32-bit overflow if necessary */
431 437
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88ecbd5..633b4e2c669f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
402 allocation->component = component; 402 allocation->component = component;
403 allocation->line = line; 403 allocation->line = line;
404 404
405 strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME); 405 acpi_ut_safe_strncpy(allocation->module, (char *)module,
406 allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0; 406 ACPI_MAX_MODULE_NAME);
407 407
408 if (!element) { 408 if (!element) {
409 409
@@ -717,7 +717,7 @@ exit:
717 if (!num_outstanding) { 717 if (!num_outstanding) {
718 ACPI_INFO(("No outstanding allocations")); 718 ACPI_INFO(("No outstanding allocations"));
719 } else { 719 } else {
720 ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations", 720 ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
721 num_outstanding, num_outstanding)); 721 num_outstanding, num_outstanding));
722 } 722 }
723 723
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e500bfa..9da4f8ef2e77 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
96 * 96 *
97 * RETURN: None 97 * RETURN: None
98 * 98 *
99 * DESCRIPTION: Print "ACPI Exception" message with module/line/version info 99 * DESCRIPTION: Print an "ACPI Error" message with module/line/version
100 * and decoded acpi_status. 100 * info as well as decoded acpi_status.
101 * 101 *
102 ******************************************************************************/ 102 ******************************************************************************/
103void ACPI_INTERNAL_VAR_XFACE 103void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
111 /* For AE_OK, just print the message */ 111 /* For AE_OK, just print the message */
112 112
113 if (ACPI_SUCCESS(status)) { 113 if (ACPI_SUCCESS(status)) {
114 acpi_os_printf(ACPI_MSG_EXCEPTION); 114 acpi_os_printf(ACPI_MSG_ERROR);
115 115
116 } else { 116 } else {
117 acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ", 117 acpi_os_printf(ACPI_MSG_ERROR "%s, ",
118 acpi_format_exception(status)); 118 acpi_format_exception(status));
119 } 119 }
120 120
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7fad3bb..16c4a10b7506 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -414,6 +414,51 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
414#endif 414#endif
415} 415}
416 416
417/*
418 * PCIe AER errors need to be sent to the AER driver for reporting and
419 * recovery. The GHES severities map to the following AER severities and
420 * require the following handling:
421 *
422 * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
423 * These need to be reported by the AER driver but no recovery is
424 * necessary.
425 * GHES_SEV_RECOVERABLE -> AER_NONFATAL
426 * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
427 * These both need to be reported and recovered from by the AER driver.
428 * GHES_SEV_PANIC does not make it to this handling since the kernel must
429 * panic.
430 */
431static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
432{
433#ifdef CONFIG_ACPI_APEI_PCIEAER
434 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
435
436 if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
437 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
438 unsigned int devfn;
439 int aer_severity;
440
441 devfn = PCI_DEVFN(pcie_err->device_id.device,
442 pcie_err->device_id.function);
443 aer_severity = cper_severity_to_aer(gdata->error_severity);
444
445 /*
446 * If firmware reset the component to contain
447 * the error, we must reinitialize it before
448 * use, so treat it as a fatal AER error.
449 */
450 if (gdata->flags & CPER_SEC_RESET)
451 aer_severity = AER_FATAL;
452
453 aer_recover_queue(pcie_err->device_id.segment,
454 pcie_err->device_id.bus,
455 devfn, aer_severity,
456 (struct aer_capability_regs *)
457 pcie_err->aer_info);
458 }
459#endif
460}
461
417static void ghes_do_proc(struct ghes *ghes, 462static void ghes_do_proc(struct ghes *ghes,
418 const struct acpi_hest_generic_status *estatus) 463 const struct acpi_hest_generic_status *estatus)
419{ 464{
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
441 arch_apei_report_mem_error(sev, mem_err); 486 arch_apei_report_mem_error(sev, mem_err);
442 ghes_handle_memory_failure(gdata, sev); 487 ghes_handle_memory_failure(gdata, sev);
443 } 488 }
444#ifdef CONFIG_ACPI_APEI_PCIEAER
445 else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { 489 else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
446 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); 490 ghes_handle_aer(gdata);
447
448 if (sev == GHES_SEV_RECOVERABLE &&
449 sec_sev == GHES_SEV_RECOVERABLE &&
450 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
451 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
452 unsigned int devfn;
453 int aer_severity;
454
455 devfn = PCI_DEVFN(pcie_err->device_id.device,
456 pcie_err->device_id.function);
457 aer_severity = cper_severity_to_aer(gdata->error_severity);
458
459 /*
460 * If firmware reset the component to contain
461 * the error, we must reinitialize it before
462 * use, so treat it as a fatal AER error.
463 */
464 if (gdata->flags & CPER_SEC_RESET)
465 aer_severity = AER_FATAL;
466
467 aer_recover_queue(pcie_err->device_id.segment,
468 pcie_err->device_id.bus,
469 devfn, aer_severity,
470 (struct aer_capability_regs *)
471 pcie_err->aer_info);
472 }
473
474 } 491 }
475#endif
476 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { 492 else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
477 struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); 493 struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
478 494
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
870 struct ghes_estatus_node *estatus_node; 886 struct ghes_estatus_node *estatus_node;
871 struct acpi_hest_generic *generic; 887 struct acpi_hest_generic *generic;
872 struct acpi_hest_generic_status *estatus; 888 struct acpi_hest_generic_status *estatus;
873 u32 len, node_len;
874 889
875 llnode = llist_del_all(&ghes_estatus_llist); 890 llnode = llist_del_all(&ghes_estatus_llist);
876 /* 891 /*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
882 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 897 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
883 llnode); 898 llnode);
884 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 899 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
885 len = cper_estatus_len(estatus);
886 node_len = GHES_ESTATUS_NODE_LEN(len);
887 generic = estatus_node->generic; 900 generic = estatus_node->generic;
888 ghes_print_estatus(NULL, generic, estatus); 901 ghes_print_estatus(NULL, generic, estatus);
889 llnode = llnode->next; 902 llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56e33ae..19bc440820e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
70static bool battery_driver_registered; 70static bool battery_driver_registered;
71static int battery_bix_broken_package; 71static int battery_bix_broken_package;
72static int battery_notification_delay_ms; 72static int battery_notification_delay_ms;
73static int battery_full_discharging;
73static unsigned int cache_time = 1000; 74static unsigned int cache_time = 1000;
74module_param(cache_time, uint, 0644); 75module_param(cache_time, uint, 0644);
75MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 76MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
214 return -ENODEV; 215 return -ENODEV;
215 switch (psp) { 216 switch (psp) {
216 case POWER_SUPPLY_PROP_STATUS: 217 case POWER_SUPPLY_PROP_STATUS:
217 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) 218 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
218 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 219 if (battery_full_discharging && battery->rate_now == 0)
219 else if (battery->state & ACPI_BATTERY_STATE_CHARGING) 220 val->intval = POWER_SUPPLY_STATUS_FULL;
221 else
222 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
223 } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
220 val->intval = POWER_SUPPLY_STATUS_CHARGING; 224 val->intval = POWER_SUPPLY_STATUS_CHARGING;
221 else if (acpi_battery_is_charged(battery)) 225 else if (acpi_battery_is_charged(battery))
222 val->intval = POWER_SUPPLY_STATUS_FULL; 226 val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
1166 return 0; 1170 return 0;
1167} 1171}
1168 1172
1173static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
1174{
1175 battery_full_discharging = 1;
1176 return 0;
1177}
1178
1169static const struct dmi_system_id bat_dmi_table[] __initconst = { 1179static const struct dmi_system_id bat_dmi_table[] __initconst = {
1170 { 1180 {
1171 .callback = battery_bix_broken_package_quirk, 1181 .callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
1183 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), 1193 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
1184 }, 1194 },
1185 }, 1195 },
1196 {
1197 .callback = battery_full_discharging_quirk,
1198 .ident = "ASUS GL502VSK",
1199 .matches = {
1200 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1201 DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
1202 },
1203 },
1204 {
1205 .callback = battery_full_discharging_quirk,
1206 .ident = "ASUS UX305LA",
1207 .matches = {
1208 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1209 DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
1210 },
1211 },
1186 {}, 1212 {},
1187}; 1213};
1188 1214
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
1237 1263
1238#ifdef CONFIG_ACPI_PROCFS_POWER 1264#ifdef CONFIG_ACPI_PROCFS_POWER
1239 result = acpi_battery_add_fs(device); 1265 result = acpi_battery_add_fs(device);
1240#endif
1241 if (result) { 1266 if (result) {
1242#ifdef CONFIG_ACPI_PROCFS_POWER
1243 acpi_battery_remove_fs(device); 1267 acpi_battery_remove_fs(device);
1244#endif
1245 goto fail; 1268 goto fail;
1246 } 1269 }
1270#endif
1247 1271
1248 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", 1272 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
1249 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), 1273 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d371fa7..e1eee7a60fad 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
30#include <linux/input.h> 30#include <linux/input.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/acpi.h> 32#include <linux/acpi.h>
33#include <linux/dmi.h>
33#include <acpi/button.h> 34#include <acpi/button.h>
34 35
35#define PREFIX "ACPI: " 36#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
76}; 77};
77MODULE_DEVICE_TABLE(acpi, button_device_ids); 78MODULE_DEVICE_TABLE(acpi, button_device_ids);
78 79
80/*
81 * Some devices which don't even have a lid in anyway have a broken _LID
82 * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
83 */
84static const struct dmi_system_id lid_blacklst[] = {
85 {
86 /* GP-electronic T701 */
87 .matches = {
88 DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
89 DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
90 DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
91 },
92 },
93 {}
94};
95
79static int acpi_button_add(struct acpi_device *device); 96static int acpi_button_add(struct acpi_device *device);
80static int acpi_button_remove(struct acpi_device *device); 97static int acpi_button_remove(struct acpi_device *device);
81static void acpi_button_notify(struct acpi_device *device, u32 event); 98static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
210 } 227 }
211 /* Send the platform triggered reliable event */ 228 /* Send the platform triggered reliable event */
212 if (do_update) { 229 if (do_update) {
230 acpi_handle_debug(device->handle, "ACPI LID %s\n",
231 state ? "open" : "closed");
213 input_report_switch(button->input, SW_LID, !state); 232 input_report_switch(button->input, SW_LID, !state);
214 input_sync(button->input); 233 input_sync(button->input);
215 button->last_state = !!state; 234 button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
473 char *name, *class; 492 char *name, *class;
474 int error; 493 int error;
475 494
495 if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
496 return -ENODEV;
497
476 button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL); 498 button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
477 if (!button) 499 if (!button)
478 return -ENOMEM; 500 return -ENOMEM;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b9af3d..d9f38c645e4a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
1516 } 1516 }
1517 1517
1518 acpi_handle_info(ec->handle, 1518 acpi_handle_info(ec->handle,
1519 "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", 1519 "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
1520 ec->gpe, ec->command_addr, ec->data_addr); 1520 ec->gpe, ec->command_addr, ec->data_addr);
1521 return ret; 1521 return ret;
1522} 1522}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
128 return -ENOMEM; 128 return -ENOMEM;
129 } 129 }
130 130
131 if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe)) 131 if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
132 goto error; 132 goto error;
133 if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, 133 if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
134 &first_ec->global_lock)) 134 &first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f060356a22..f13ba2c07667 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
49 49
50#define MODULE_NAME "acpi-ged" 50#define MODULE_NAME "acpi-ged"
51 51
52struct acpi_ged_device {
53 struct device *dev;
54 struct list_head event_list;
55};
56
52struct acpi_ged_event { 57struct acpi_ged_event {
53 struct list_head node; 58 struct list_head node;
54 struct device *dev; 59 struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
76 unsigned int irq; 81 unsigned int irq;
77 unsigned int gsi; 82 unsigned int gsi;
78 unsigned int irqflags = IRQF_ONESHOT; 83 unsigned int irqflags = IRQF_ONESHOT;
79 struct device *dev = context; 84 struct acpi_ged_device *geddev = context;
85 struct device *dev = geddev->dev;
80 acpi_handle handle = ACPI_HANDLE(dev); 86 acpi_handle handle = ACPI_HANDLE(dev);
81 acpi_handle evt_handle; 87 acpi_handle evt_handle;
82 struct resource r; 88 struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
102 return AE_ERROR; 108 return AE_ERROR;
103 } 109 }
104 110
105 dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
106
107 event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL); 111 event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
108 if (!event) 112 if (!event)
109 return AE_ERROR; 113 return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
116 if (r.flags & IORESOURCE_IRQ_SHAREABLE) 120 if (r.flags & IORESOURCE_IRQ_SHAREABLE)
117 irqflags |= IRQF_SHARED; 121 irqflags |= IRQF_SHARED;
118 122
119 if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler, 123 if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
120 irqflags, "ACPI:Ged", event)) { 124 irqflags, "ACPI:Ged", event)) {
121 dev_err(dev, "failed to setup event handler for irq %u\n", irq); 125 dev_err(dev, "failed to setup event handler for irq %u\n", irq);
122 return AE_ERROR; 126 return AE_ERROR;
123 } 127 }
124 128
129 dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
130 list_add_tail(&event->node, &geddev->event_list);
125 return AE_OK; 131 return AE_OK;
126} 132}
127 133
128static int ged_probe(struct platform_device *pdev) 134static int ged_probe(struct platform_device *pdev)
129{ 135{
136 struct acpi_ged_device *geddev;
130 acpi_status acpi_ret; 137 acpi_status acpi_ret;
131 138
139 geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
140 if (!geddev)
141 return -ENOMEM;
142
143 geddev->dev = &pdev->dev;
144 INIT_LIST_HEAD(&geddev->event_list);
132 acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS", 145 acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
133 acpi_ged_request_interrupt, &pdev->dev); 146 acpi_ged_request_interrupt, geddev);
134 if (ACPI_FAILURE(acpi_ret)) { 147 if (ACPI_FAILURE(acpi_ret)) {
135 dev_err(&pdev->dev, "unable to parse the _CRS record\n"); 148 dev_err(&pdev->dev, "unable to parse the _CRS record\n");
136 return -EINVAL; 149 return -EINVAL;
137 } 150 }
151 platform_set_drvdata(pdev, geddev);
138 152
139 return 0; 153 return 0;
140} 154}
141 155
156static void ged_shutdown(struct platform_device *pdev)
157{
158 struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
159 struct acpi_ged_event *event, *next;
160
161 list_for_each_entry_safe(event, next, &geddev->event_list, node) {
162 free_irq(event->irq, event);
163 list_del(&event->node);
164 dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
165 event->gsi, event->irq);
166 }
167}
168
169static int ged_remove(struct platform_device *pdev)
170{
171 ged_shutdown(pdev);
172 return 0;
173}
174
142static const struct acpi_device_id ged_acpi_ids[] = { 175static const struct acpi_device_id ged_acpi_ids[] = {
143 {"ACPI0013"}, 176 {"ACPI0013"},
144 {}, 177 {},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
146 179
147static struct platform_driver ged_driver = { 180static struct platform_driver ged_driver = {
148 .probe = ged_probe, 181 .probe = ged_probe,
182 .remove = ged_remove,
183 .shutdown = ged_shutdown,
149 .driver = { 184 .driver = {
150 .name = MODULE_NAME, 185 .name = MODULE_NAME,
151 .acpi_match_table = ACPI_PTR(ged_acpi_ids), 186 .acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423de43c..1d0a501bc7f0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
159 -------------------------------------------------------------------------- */ 159 -------------------------------------------------------------------------- */
160struct acpi_ec { 160struct acpi_ec {
161 acpi_handle handle; 161 acpi_handle handle;
162 unsigned long gpe; 162 u32 gpe;
163 unsigned long command_addr; 163 unsigned long command_addr;
164 unsigned long data_addr; 164 unsigned long data_addr;
165 bool global_lock; 165 bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b28ce440a06f..bbe48ad20886 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -838,6 +838,18 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
838 return true; 838 return true;
839} 839}
840 840
841static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
842 struct acpi_nfit_capabilities *pcap)
843{
844 struct device *dev = acpi_desc->dev;
845 u32 mask;
846
847 mask = (1 << (pcap->highest_capability + 1)) - 1;
848 acpi_desc->platform_cap = pcap->capabilities & mask;
849 dev_dbg(dev, "%s: cap: %#x\n", __func__, acpi_desc->platform_cap);
850 return true;
851}
852
841static void *add_table(struct acpi_nfit_desc *acpi_desc, 853static void *add_table(struct acpi_nfit_desc *acpi_desc,
842 struct nfit_table_prev *prev, void *table, const void *end) 854 struct nfit_table_prev *prev, void *table, const void *end)
843{ 855{
@@ -883,6 +895,10 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc,
883 case ACPI_NFIT_TYPE_SMBIOS: 895 case ACPI_NFIT_TYPE_SMBIOS:
884 dev_dbg(dev, "%s: smbios\n", __func__); 896 dev_dbg(dev, "%s: smbios\n", __func__);
885 break; 897 break;
898 case ACPI_NFIT_TYPE_CAPABILITIES:
899 if (!add_platform_cap(acpi_desc, table))
900 return err;
901 break;
886 default: 902 default:
887 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 903 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
888 break; 904 break;
@@ -2659,6 +2675,12 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2659 else 2675 else
2660 ndr_desc->numa_node = NUMA_NO_NODE; 2676 ndr_desc->numa_node = NUMA_NO_NODE;
2661 2677
2678 if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2679 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2680
2681 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2682 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2683
2662 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2684 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2663 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 2685 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2664 struct nd_mapping_desc *mapping; 2686 struct nd_mapping_desc *mapping;
@@ -3467,6 +3489,7 @@ static __init int nfit_init(void)
3467 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 3489 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3468 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 3490 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3469 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 3491 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3492 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3470 3493
3471 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); 3494 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3472 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); 3495 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index f0cf18b2da8b..50d36e166d70 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -202,6 +202,7 @@ struct acpi_nfit_desc {
202 unsigned long dimm_cmd_force_en; 202 unsigned long dimm_cmd_force_en;
203 unsigned long bus_cmd_force_en; 203 unsigned long bus_cmd_force_en;
204 unsigned long bus_nfit_cmd_force_en; 204 unsigned long bus_nfit_cmd_force_en;
205 unsigned int platform_cap;
205 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 206 int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
206 void *iobuf, u64 len, int rw); 207 void *iobuf, u64 len, int rw);
207}; 208};
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc0fda4..8ccaae3550d2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
460 srat_proc, ARRAY_SIZE(srat_proc), 0); 460 srat_proc, ARRAY_SIZE(srat_proc), 0);
461 461
462 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 462 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
463 acpi_parse_memory_affinity, 463 acpi_parse_memory_affinity, 0);
464 NR_NODE_MEMBLKS);
465 } 464 }
466 465
467 /* SLIT: System Locality Information Table */ 466 /* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914dfc3e..85ad679390e3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
612 acpi_isa_irq_penalty[link->irq.active] += 612 acpi_isa_irq_penalty[link->irq.active] +=
613 PIRQ_PENALTY_PCI_USING; 613 PIRQ_PENALTY_PCI_USING;
614 614
615 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 615 pr_info("%s [%s] enabled at IRQ %d\n",
616 acpi_device_name(link->device), 616 acpi_device_name(link->device),
617 acpi_device_bid(link->device), link->irq.active); 617 acpi_device_bid(link->device), link->irq.active);
618 } 618 }
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aad4d20..886ac8b93cd0 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
400 &intel_bxtwc_pmic_opregion_data); 400 &intel_bxtwc_pmic_opregion_data);
401} 401}
402 402
403static struct platform_device_id bxt_wc_opregion_id_table[] = { 403static const struct platform_device_id bxt_wc_opregion_id_table[] = {
404 { .name = "bxt_wcove_region" }, 404 { .name = "bxt_wcove_region" },
405 {}, 405 {},
406}; 406};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
412 }, 412 },
413 .id_table = bxt_wc_opregion_id_table, 413 .id_table = bxt_wc_opregion_id_table,
414}; 414};
415 415builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
416static int __init intel_bxtwc_pmic_opregion_driver_init(void)
417{
418 return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
419}
420device_initcall(intel_bxtwc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9c9c7a..f6d73a243d80 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
131 }, 131 },
132 .id_table = chtdc_ti_pmic_opregion_id_table, 132 .id_table = chtdc_ti_pmic_opregion_id_table,
133}; 133};
134module_platform_driver(chtdc_ti_pmic_opregion_driver); 134builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
135
136MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
137MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7a9d39..9912422c8185 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
260 &intel_cht_wc_pmic_opregion_data); 260 &intel_cht_wc_pmic_opregion_data);
261} 261}
262 262
263static struct platform_device_id cht_wc_opregion_id_table[] = { 263static const struct platform_device_id cht_wc_opregion_id_table[] = {
264 { .name = "cht_wcove_region" }, 264 { .name = "cht_wcove_region" },
265 {}, 265 {},
266}; 266};
267MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
268 267
269static struct platform_driver intel_cht_wc_pmic_opregion_driver = { 268static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
270 .probe = intel_cht_wc_pmic_opregion_probe, 269 .probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
273 }, 272 },
274 .id_table = cht_wc_opregion_id_table, 273 .id_table = cht_wc_opregion_id_table,
275}; 274};
276module_platform_driver(intel_cht_wc_pmic_opregion_driver); 275builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
277
278MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
279MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
280MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761ab1bc..7ffa74048107 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
201 .name = "crystal_cove_pmic", 201 .name = "crystal_cove_pmic",
202 }, 202 },
203}; 203};
204 204builtin_platform_driver(intel_crc_pmic_opregion_driver);
205static int __init intel_crc_pmic_opregion_driver_init(void)
206{
207 return platform_driver_register(&intel_crc_pmic_opregion_driver);
208}
209device_initcall(intel_crc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f81095..316e55174aa9 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
278 .name = "axp288_pmic_acpi", 278 .name = "axp288_pmic_acpi",
279 }, 279 },
280}; 280};
281 281builtin_platform_driver(intel_xpower_pmic_opregion_driver);
282static int __init intel_xpower_pmic_opregion_driver_init(void)
283{
284 return platform_driver_register(&intel_xpower_pmic_opregion_driver);
285}
286device_initcall(intel_xpower_pmic_opregion_driver_init);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150bb35bf..4fc59c3bc673 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
816 * interface: 816 * interface:
817 * echo unmask > /sys/firmware/acpi/interrupts/gpe00 817 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
818 */ 818 */
819 819#define ACPI_MASKABLE_GPE_MAX 0xFF
820/* 820static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
821 * Currently, the GPE flooding prevention only supports to mask the GPEs
822 * numbered from 00 to 7f.
823 */
824#define ACPI_MASKABLE_GPE_MAX 0x80
825
826static u64 __initdata acpi_masked_gpes;
827 821
828static int __init acpi_gpe_set_masked_gpes(char *val) 822static int __init acpi_gpe_set_masked_gpes(char *val)
829{ 823{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
831 825
832 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) 826 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
833 return -EINVAL; 827 return -EINVAL;
834 acpi_masked_gpes |= ((u64)1<<gpe); 828 set_bit(gpe, acpi_masked_gpes_map);
835 829
836 return 1; 830 return 1;
837} 831}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
843 acpi_status status; 837 acpi_status status;
844 u8 gpe; 838 u8 gpe;
845 839
846 for (gpe = 0; 840 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
847 gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count); 841 status = acpi_get_gpe_device(gpe, &handle);
848 gpe++) { 842 if (ACPI_SUCCESS(status)) {
849 if (acpi_masked_gpes & ((u64)1<<gpe)) { 843 pr_info("Masking GPE 0x%x.\n", gpe);
850 status = acpi_get_gpe_device(gpe, &handle); 844 (void)acpi_mask_gpe(handle, gpe, TRUE);
851 if (ACPI_SUCCESS(status)) {
852 pr_info("Masking GPE 0x%x.\n", gpe);
853 (void)acpi_mask_gpe(handle, gpe, TRUE);
854 }
855 } 845 }
856 } 846 }
857} 847}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1acebe3..78db97687f26 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
737} 737}
738EXPORT_SYMBOL(acpi_dev_found); 738EXPORT_SYMBOL(acpi_dev_found);
739 739
740struct acpi_dev_present_info { 740struct acpi_dev_match_info {
741 const char *dev_name;
741 struct acpi_device_id hid[2]; 742 struct acpi_device_id hid[2];
742 const char *uid; 743 const char *uid;
743 s64 hrv; 744 s64 hrv;
744}; 745};
745 746
746static int acpi_dev_present_cb(struct device *dev, void *data) 747static int acpi_dev_match_cb(struct device *dev, void *data)
747{ 748{
748 struct acpi_device *adev = to_acpi_device(dev); 749 struct acpi_device *adev = to_acpi_device(dev);
749 struct acpi_dev_present_info *match = data; 750 struct acpi_dev_match_info *match = data;
750 unsigned long long hrv; 751 unsigned long long hrv;
751 acpi_status status; 752 acpi_status status;
752 753
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
757 strcmp(adev->pnp.unique_id, match->uid))) 758 strcmp(adev->pnp.unique_id, match->uid)))
758 return 0; 759 return 0;
759 760
761 match->dev_name = acpi_dev_name(adev);
762
760 if (match->hrv == -1) 763 if (match->hrv == -1)
761 return 1; 764 return 1;
762 765
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
789 */ 792 */
790bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) 793bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
791{ 794{
792 struct acpi_dev_present_info match = {}; 795 struct acpi_dev_match_info match = {};
793 struct device *dev; 796 struct device *dev;
794 797
795 strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); 798 strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
796 match.uid = uid; 799 match.uid = uid;
797 match.hrv = hrv; 800 match.hrv = hrv;
798 801
799 dev = bus_find_device(&acpi_bus_type, NULL, &match, 802 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
800 acpi_dev_present_cb);
801
802 return !!dev; 803 return !!dev;
803} 804}
804EXPORT_SYMBOL(acpi_dev_present); 805EXPORT_SYMBOL(acpi_dev_present);
805 806
807/**
808 * acpi_dev_get_first_match_name - Return name of first match of ACPI device
809 * @hid: Hardware ID of the device.
810 * @uid: Unique ID of the device, pass NULL to not check _UID
811 * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
812 *
813 * Return device name if a matching device was present
814 * at the moment of invocation, or NULL otherwise.
815 *
816 * See additional information in acpi_dev_present() as well.
817 */
818const char *
819acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
820{
821 struct acpi_dev_match_info match = {};
822 struct device *dev;
823
824 strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
825 match.uid = uid;
826 match.hrv = hrv;
827
828 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
829 return dev ? match.dev_name : NULL;
830}
831EXPORT_SYMBOL(acpi_dev_get_first_match_name);
832
806/* 833/*
807 * acpi_backlight= handling, this is done here rather then in video_detect.c 834 * acpi_backlight= handling, this is done here rather then in video_detect.c
808 * because __setup cannot be used in modules. 835 * because __setup cannot be used in modules.
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc87907d6a1..2415ad9f6dd4 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES
236config GENERIC_CPU_AUTOPROBE 236config GENERIC_CPU_AUTOPROBE
237 bool 237 bool
238 238
239config GENERIC_CPU_VULNERABILITIES
240 bool
241
239config SOC_BUS 242config SOC_BUS
240 bool 243 bool
241 select GLOB 244 select GLOB
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b608d821..d99038487a0d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
511#endif 511#endif
512} 512}
513 513
514#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
515
516ssize_t __weak cpu_show_meltdown(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "Not affected\n");
520}
521
522ssize_t __weak cpu_show_spectre_v1(struct device *dev,
523 struct device_attribute *attr, char *buf)
524{
525 return sprintf(buf, "Not affected\n");
526}
527
528ssize_t __weak cpu_show_spectre_v2(struct device *dev,
529 struct device_attribute *attr, char *buf)
530{
531 return sprintf(buf, "Not affected\n");
532}
533
534static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
535static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
536static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
537
538static struct attribute *cpu_root_vulnerabilities_attrs[] = {
539 &dev_attr_meltdown.attr,
540 &dev_attr_spectre_v1.attr,
541 &dev_attr_spectre_v2.attr,
542 NULL
543};
544
545static const struct attribute_group cpu_root_vulnerabilities_group = {
546 .name = "vulnerabilities",
547 .attrs = cpu_root_vulnerabilities_attrs,
548};
549
550static void __init cpu_register_vulnerabilities(void)
551{
552 if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
553 &cpu_root_vulnerabilities_group))
554 pr_err("Unable to register CPU vulnerabilities\n");
555}
556
557#else
558static inline void cpu_register_vulnerabilities(void) { }
559#endif
560
514void __init cpu_dev_init(void) 561void __init cpu_dev_init(void)
515{ 562{
516 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) 563 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
517 panic("Failed to register CPU subsystem"); 564 panic("Failed to register CPU subsystem");
518 565
519 cpu_dev_register_generic(); 566 cpu_dev_register_generic();
567 cpu_register_vulnerabilities();
520} 568}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e61506968..d5fe720cf149 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
1581 return err; 1581 return err;
1582} 1582}
1583 1583
1584static void lo_release(struct gendisk *disk, fmode_t mode) 1584static void __lo_release(struct loop_device *lo)
1585{ 1585{
1586 struct loop_device *lo = disk->private_data;
1587 int err; 1586 int err;
1588 1587
1589 if (atomic_dec_return(&lo->lo_refcnt)) 1588 if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1610 mutex_unlock(&lo->lo_ctl_mutex); 1609 mutex_unlock(&lo->lo_ctl_mutex);
1611} 1610}
1612 1611
1612static void lo_release(struct gendisk *disk, fmode_t mode)
1613{
1614 mutex_lock(&loop_index_mutex);
1615 __lo_release(disk->private_data);
1616 mutex_unlock(&loop_index_mutex);
1617}
1618
1613static const struct block_device_operations lo_fops = { 1619static const struct block_device_operations lo_fops = {
1614 .owner = THIS_MODULE, 1620 .owner = THIS_MODULE,
1615 .open = lo_open, 1621 .open = lo_open,
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3047 mutex_unlock(&rbd_dev->watch_mutex); 3047 mutex_unlock(&rbd_dev->watch_mutex);
3048} 3048}
3049 3049
3050static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3051{
3052 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3053
3054 strcpy(rbd_dev->lock_cookie, cookie);
3055 rbd_set_owner_cid(rbd_dev, &cid);
3056 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3057}
3058
3050/* 3059/*
3051 * lock_rwsem must be held for write 3060 * lock_rwsem must be held for write
3052 */ 3061 */
3053static int rbd_lock(struct rbd_device *rbd_dev) 3062static int rbd_lock(struct rbd_device *rbd_dev)
3054{ 3063{
3055 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3064 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3056 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3057 char cookie[32]; 3065 char cookie[32];
3058 int ret; 3066 int ret;
3059 3067
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
3068 return ret; 3076 return ret;
3069 3077
3070 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3078 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3071 strcpy(rbd_dev->lock_cookie, cookie); 3079 __rbd_lock(rbd_dev, cookie);
3072 rbd_set_owner_cid(rbd_dev, &cid);
3073 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3074 return 0; 3080 return 0;
3075} 3081}
3076 3082
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3856 queue_delayed_work(rbd_dev->task_wq, 3862 queue_delayed_work(rbd_dev->task_wq,
3857 &rbd_dev->lock_dwork, 0); 3863 &rbd_dev->lock_dwork, 0);
3858 } else { 3864 } else {
3859 strcpy(rbd_dev->lock_cookie, cookie); 3865 __rbd_lock(rbd_dev, cookie);
3860 } 3866 }
3861} 3867}
3862 3868
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4381 segment_size = rbd_obj_bytes(&rbd_dev->header); 4387 segment_size = rbd_obj_bytes(&rbd_dev->header);
4382 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4383 q->limits.max_sectors = queue_max_hw_sectors(q); 4389 q->limits.max_sectors = queue_max_hw_sectors(q);
4384 blk_queue_max_segments(q, segment_size / SECTOR_SIZE); 4390 blk_queue_max_segments(q, USHRT_MAX);
4385 blk_queue_max_segment_size(q, segment_size); 4391 blk_queue_max_segment_size(q, segment_size);
4386 blk_queue_io_min(q, segment_size); 4392 blk_queue_io_min(q, segment_size);
4387 blk_queue_io_opt(q, segment_size); 4393 blk_queue_io_opt(q, segment_size);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93781cf..1b76d9585902 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
178 .match = sunxi_rsb_device_match, 178 .match = sunxi_rsb_device_match,
179 .probe = sunxi_rsb_device_probe, 179 .probe = sunxi_rsb_device_probe,
180 .remove = sunxi_rsb_device_remove, 180 .remove = sunxi_rsb_device_remove,
181 .uevent = of_device_uevent_modalias,
181}; 182};
182 183
183static void sunxi_rsb_dev_release(struct device *dev) 184static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..b56b3f711d94 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
5 select CRYPTO_SHA256 5 select CRYPTO_SHA256
6 select CRYPTO_SHA512 6 select CRYPTO_SHA512
7 select CRYPTO_AUTHENC 7 select CRYPTO_AUTHENC
8 select CRYPTO_GF128MUL
8 ---help--- 9 ---help---
9 The Chelsio Crypto Co-processor driver for T6 adapters. 10 The Chelsio Crypto Co-processor driver for T6 adapters.
10 11
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..4bcef78a08aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
607 ndesc = ctx->handle_result(priv, ring, sreq->req, 607 ndesc = ctx->handle_result(priv, ring, sreq->req,
608 &should_complete, &ret); 608 &should_complete, &ret);
609 if (ndesc < 0) { 609 if (ndesc < 0) {
610 kfree(sreq);
610 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 611 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
611 return; 612 return;
612 } 613 }
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..fcc0a606d748 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
14 14
15#include <crypto/aes.h> 15#include <crypto/aes.h>
16#include <crypto/skcipher.h> 16#include <crypto/skcipher.h>
17#include <crypto/internal/skcipher.h>
17 18
18#include "safexcel.h" 19#include "safexcel.h"
19 20
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
33 unsigned int key_len; 34 unsigned int key_len;
34}; 35};
35 36
37struct safexcel_cipher_req {
38 bool needs_inv;
39};
40
36static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, 41static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
37 struct crypto_async_request *async, 42 struct crypto_async_request *async,
38 struct safexcel_command_desc *cdesc, 43 struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
126 return 0; 131 return 0;
127} 132}
128 133
129static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 134static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
130 struct crypto_async_request *async, 135 struct crypto_async_request *async,
131 bool *should_complete, int *ret) 136 bool *should_complete, int *ret)
132{ 137{
133 struct skcipher_request *req = skcipher_request_cast(async); 138 struct skcipher_request *req = skcipher_request_cast(async);
134 struct safexcel_result_desc *rdesc; 139 struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
265 spin_unlock_bh(&priv->ring[ring].egress_lock); 270 spin_unlock_bh(&priv->ring[ring].egress_lock);
266 271
267 request->req = &req->base; 272 request->req = &req->base;
268 ctx->base.handle_result = safexcel_handle_result;
269 273
270 *commands = n_cdesc; 274 *commands = n_cdesc;
271 *results = n_rdesc; 275 *results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
341 345
342 ring = safexcel_select_ring(priv); 346 ring = safexcel_select_ring(priv);
343 ctx->base.ring = ring; 347 ctx->base.ring = ring;
344 ctx->base.needs_inv = false;
345 ctx->base.send = safexcel_aes_send;
346 348
347 spin_lock_bh(&priv->ring[ring].queue_lock); 349 spin_lock_bh(&priv->ring[ring].queue_lock);
348 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 350 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
359 return ndesc; 361 return ndesc;
360} 362}
361 363
364static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
365 struct crypto_async_request *async,
366 bool *should_complete, int *ret)
367{
368 struct skcipher_request *req = skcipher_request_cast(async);
369 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
370 int err;
371
372 if (sreq->needs_inv) {
373 sreq->needs_inv = false;
374 err = safexcel_handle_inv_result(priv, ring, async,
375 should_complete, ret);
376 } else {
377 err = safexcel_handle_req_result(priv, ring, async,
378 should_complete, ret);
379 }
380
381 return err;
382}
383
362static int safexcel_cipher_send_inv(struct crypto_async_request *async, 384static int safexcel_cipher_send_inv(struct crypto_async_request *async,
363 int ring, struct safexcel_request *request, 385 int ring, struct safexcel_request *request,
364 int *commands, int *results) 386 int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
368 struct safexcel_crypto_priv *priv = ctx->priv; 390 struct safexcel_crypto_priv *priv = ctx->priv;
369 int ret; 391 int ret;
370 392
371 ctx->base.handle_result = safexcel_handle_inv_result;
372
373 ret = safexcel_invalidate_cache(async, &ctx->base, priv, 393 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
374 ctx->base.ctxr_dma, ring, request); 394 ctx->base.ctxr_dma, ring, request);
375 if (unlikely(ret)) 395 if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
381 return 0; 401 return 0;
382} 402}
383 403
404static int safexcel_send(struct crypto_async_request *async,
405 int ring, struct safexcel_request *request,
406 int *commands, int *results)
407{
408 struct skcipher_request *req = skcipher_request_cast(async);
409 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
410 int ret;
411
412 if (sreq->needs_inv)
413 ret = safexcel_cipher_send_inv(async, ring, request,
414 commands, results);
415 else
416 ret = safexcel_aes_send(async, ring, request,
417 commands, results);
418 return ret;
419}
420
384static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) 421static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
385{ 422{
386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 423 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
387 struct safexcel_crypto_priv *priv = ctx->priv; 424 struct safexcel_crypto_priv *priv = ctx->priv;
388 struct skcipher_request req; 425 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
426 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
389 struct safexcel_inv_result result = {}; 427 struct safexcel_inv_result result = {};
390 int ring = ctx->base.ring; 428 int ring = ctx->base.ring;
391 429
392 memset(&req, 0, sizeof(struct skcipher_request)); 430 memset(req, 0, sizeof(struct skcipher_request));
393 431
394 /* create invalidation request */ 432 /* create invalidation request */
395 init_completion(&result.completion); 433 init_completion(&result.completion);
396 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 434 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
397 safexcel_inv_complete, &result); 435 safexcel_inv_complete, &result);
398 436
399 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); 437 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
400 ctx = crypto_tfm_ctx(req.base.tfm); 438 ctx = crypto_tfm_ctx(req->base.tfm);
401 ctx->base.exit_inv = true; 439 ctx->base.exit_inv = true;
402 ctx->base.send = safexcel_cipher_send_inv; 440 sreq->needs_inv = true;
403 441
404 spin_lock_bh(&priv->ring[ring].queue_lock); 442 spin_lock_bh(&priv->ring[ring].queue_lock);
405 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
406 spin_unlock_bh(&priv->ring[ring].queue_lock); 444 spin_unlock_bh(&priv->ring[ring].queue_lock);
407 445
408 if (!priv->ring[ring].need_dequeue) 446 if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
424 enum safexcel_cipher_direction dir, u32 mode) 462 enum safexcel_cipher_direction dir, u32 mode)
425{ 463{
426 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 464 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
465 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
427 struct safexcel_crypto_priv *priv = ctx->priv; 466 struct safexcel_crypto_priv *priv = ctx->priv;
428 int ret, ring; 467 int ret, ring;
429 468
469 sreq->needs_inv = false;
430 ctx->direction = dir; 470 ctx->direction = dir;
431 ctx->mode = mode; 471 ctx->mode = mode;
432 472
433 if (ctx->base.ctxr) { 473 if (ctx->base.ctxr) {
434 if (ctx->base.needs_inv) 474 if (ctx->base.needs_inv) {
435 ctx->base.send = safexcel_cipher_send_inv; 475 sreq->needs_inv = true;
476 ctx->base.needs_inv = false;
477 }
436 } else { 478 } else {
437 ctx->base.ring = safexcel_select_ring(priv); 479 ctx->base.ring = safexcel_select_ring(priv);
438 ctx->base.send = safexcel_aes_send;
439
440 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 480 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
441 EIP197_GFP_FLAGS(req->base), 481 EIP197_GFP_FLAGS(req->base),
442 &ctx->base.ctxr_dma); 482 &ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
476 alg.skcipher.base); 516 alg.skcipher.base);
477 517
478 ctx->priv = tmpl->priv; 518 ctx->priv = tmpl->priv;
519 ctx->base.send = safexcel_send;
520 ctx->base.handle_result = safexcel_handle_result;
521
522 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
523 sizeof(struct safexcel_cipher_req));
479 524
480 return 0; 525 return 0;
481} 526}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..0c5a5820b06e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
32 bool last_req; 32 bool last_req;
33 bool finish; 33 bool finish;
34 bool hmac; 34 bool hmac;
35 bool needs_inv;
35 36
36 u8 state_sz; /* expected sate size, only set once */ 37 u8 state_sz; /* expected sate size, only set once */
37 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; 38 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
38 39
39 u64 len; 40 u64 len;
40 u64 processed; 41 u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
119 } 120 }
120} 121}
121 122
122static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, 123static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
123 struct crypto_async_request *async, 124 struct crypto_async_request *async,
124 bool *should_complete, int *ret) 125 bool *should_complete, int *ret)
125{ 126{
126 struct safexcel_result_desc *rdesc; 127 struct safexcel_result_desc *rdesc;
127 struct ahash_request *areq = ahash_request_cast(async); 128 struct ahash_request *areq = ahash_request_cast(async);
128 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 129 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); 130 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130 int cache_len, result_sz = sreq->state_sz; 131 int cache_len;
131 132
132 *ret = 0; 133 *ret = 0;
133 134
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
148 spin_unlock_bh(&priv->ring[ring].egress_lock); 149 spin_unlock_bh(&priv->ring[ring].egress_lock);
149 150
150 if (sreq->finish) 151 if (sreq->finish)
151 result_sz = crypto_ahash_digestsize(ahash); 152 memcpy(areq->result, sreq->state,
152 memcpy(sreq->state, areq->result, result_sz); 153 crypto_ahash_digestsize(ahash));
153 154
154 dma_unmap_sg(priv->dev, areq->src, 155 dma_unmap_sg(priv->dev, areq->src,
155 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); 156 sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
165 return 1; 166 return 1;
166} 167}
167 168
168static int safexcel_ahash_send(struct crypto_async_request *async, int ring, 169static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
169 struct safexcel_request *request, int *commands, 170 struct safexcel_request *request,
170 int *results) 171 int *commands, int *results)
171{ 172{
172 struct ahash_request *areq = ahash_request_cast(async); 173 struct ahash_request *areq = ahash_request_cast(async);
173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 174 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
273 /* Add the token */ 274 /* Add the token */
274 safexcel_hash_token(first_cdesc, len, req->state_sz); 275 safexcel_hash_token(first_cdesc, len, req->state_sz);
275 276
276 ctx->base.result_dma = dma_map_single(priv->dev, areq->result, 277 ctx->base.result_dma = dma_map_single(priv->dev, req->state,
277 req->state_sz, DMA_FROM_DEVICE); 278 req->state_sz, DMA_FROM_DEVICE);
278 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { 279 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
279 ret = -EINVAL; 280 ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
292 293
293 req->processed += len; 294 req->processed += len;
294 request->req = &areq->base; 295 request->req = &areq->base;
295 ctx->base.handle_result = safexcel_handle_result;
296 296
297 *commands = n_cdesc; 297 *commands = n_cdesc;
298 *results = 1; 298 *results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
374 374
375 ring = safexcel_select_ring(priv); 375 ring = safexcel_select_ring(priv);
376 ctx->base.ring = ring; 376 ctx->base.ring = ring;
377 ctx->base.needs_inv = false;
378 ctx->base.send = safexcel_ahash_send;
379 377
380 spin_lock_bh(&priv->ring[ring].queue_lock); 378 spin_lock_bh(&priv->ring[ring].queue_lock);
381 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 379 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
392 return 1; 390 return 1;
393} 391}
394 392
393static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
394 struct crypto_async_request *async,
395 bool *should_complete, int *ret)
396{
397 struct ahash_request *areq = ahash_request_cast(async);
398 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
399 int err;
400
401 if (req->needs_inv) {
402 req->needs_inv = false;
403 err = safexcel_handle_inv_result(priv, ring, async,
404 should_complete, ret);
405 } else {
406 err = safexcel_handle_req_result(priv, ring, async,
407 should_complete, ret);
408 }
409
410 return err;
411}
412
395static int safexcel_ahash_send_inv(struct crypto_async_request *async, 413static int safexcel_ahash_send_inv(struct crypto_async_request *async,
396 int ring, struct safexcel_request *request, 414 int ring, struct safexcel_request *request,
397 int *commands, int *results) 415 int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
400 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
401 int ret; 419 int ret;
402 420
403 ctx->base.handle_result = safexcel_handle_inv_result;
404 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, 421 ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
405 ctx->base.ctxr_dma, ring, request); 422 ctx->base.ctxr_dma, ring, request);
406 if (unlikely(ret)) 423 if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
412 return 0; 429 return 0;
413} 430}
414 431
432static int safexcel_ahash_send(struct crypto_async_request *async,
433 int ring, struct safexcel_request *request,
434 int *commands, int *results)
435{
436 struct ahash_request *areq = ahash_request_cast(async);
437 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
438 int ret;
439
440 if (req->needs_inv)
441 ret = safexcel_ahash_send_inv(async, ring, request,
442 commands, results);
443 else
444 ret = safexcel_ahash_send_req(async, ring, request,
445 commands, results);
446 return ret;
447}
448
415static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) 449static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
416{ 450{
417 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 451 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct safexcel_crypto_priv *priv = ctx->priv; 452 struct safexcel_crypto_priv *priv = ctx->priv;
419 struct ahash_request req; 453 AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
454 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
420 struct safexcel_inv_result result = {}; 455 struct safexcel_inv_result result = {};
421 int ring = ctx->base.ring; 456 int ring = ctx->base.ring;
422 457
423 memset(&req, 0, sizeof(struct ahash_request)); 458 memset(req, 0, sizeof(struct ahash_request));
424 459
425 /* create invalidation request */ 460 /* create invalidation request */
426 init_completion(&result.completion); 461 init_completion(&result.completion);
427 ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, 462 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
428 safexcel_inv_complete, &result); 463 safexcel_inv_complete, &result);
429 464
430 ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); 465 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
431 ctx = crypto_tfm_ctx(req.base.tfm); 466 ctx = crypto_tfm_ctx(req->base.tfm);
432 ctx->base.exit_inv = true; 467 ctx->base.exit_inv = true;
433 ctx->base.send = safexcel_ahash_send_inv; 468 rctx->needs_inv = true;
434 469
435 spin_lock_bh(&priv->ring[ring].queue_lock); 470 spin_lock_bh(&priv->ring[ring].queue_lock);
436 crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
437 spin_unlock_bh(&priv->ring[ring].queue_lock); 472 spin_unlock_bh(&priv->ring[ring].queue_lock);
438 473
439 if (!priv->ring[ring].need_dequeue) 474 if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
481 struct safexcel_crypto_priv *priv = ctx->priv; 516 struct safexcel_crypto_priv *priv = ctx->priv;
482 int ret, ring; 517 int ret, ring;
483 518
484 ctx->base.send = safexcel_ahash_send; 519 req->needs_inv = false;
485 520
486 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 521 if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
487 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 522 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
488 523
489 if (ctx->base.ctxr) { 524 if (ctx->base.ctxr) {
490 if (ctx->base.needs_inv) 525 if (ctx->base.needs_inv) {
491 ctx->base.send = safexcel_ahash_send_inv; 526 ctx->base.needs_inv = false;
527 req->needs_inv = true;
528 }
492 } else { 529 } else {
493 ctx->base.ring = safexcel_select_ring(priv); 530 ctx->base.ring = safexcel_select_ring(priv);
494 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, 531 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
622 struct safexcel_alg_template, alg.ahash); 659 struct safexcel_alg_template, alg.ahash);
623 660
624 ctx->priv = tmpl->priv; 661 ctx->priv = tmpl->priv;
662 ctx->base.send = safexcel_ahash_send;
663 ctx->base.handle_result = safexcel_handle_result;
625 664
626 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 665 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
627 sizeof(struct safexcel_ahash_req)); 666 sizeof(struct safexcel_ahash_req));
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
1625 CWQ_ENTRY_SIZE, 0, NULL); 1625 CWQ_ENTRY_SIZE, 0, NULL);
1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1628 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1628 return -ENOMEM; 1629 return -ENOMEM;
1629 } 1630 }
1630 return 0; 1631 return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
1634{ 1635{
1635 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1638 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1639 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1637} 1640}
1638 1641
1639static long spu_queue_register_workfn(void *arg) 1642static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c4dd84..055e2e8f985a 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
20 20
21#define NO_FURTHER_WRITE_ACTION -1 21#define NO_FURTHER_WRITE_ACTION -1
22 22
23#ifndef phys_to_page
24#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
25#endif
26
27/** 23/**
28 * efi_free_all_buff_pages - free all previous allocated buffer pages 24 * efi_free_all_buff_pages - free all previous allocated buffer pages
29 * @cap_info: pointer to current instance of capsule_info structure 25 * @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
35static void efi_free_all_buff_pages(struct capsule_info *cap_info) 31static void efi_free_all_buff_pages(struct capsule_info *cap_info)
36{ 32{
37 while (cap_info->index > 0) 33 while (cap_info->index > 0)
38 __free_page(phys_to_page(cap_info->pages[--cap_info->index])); 34 __free_page(cap_info->pages[--cap_info->index]);
39 35
40 cap_info->index = NO_FURTHER_WRITE_ACTION; 36 cap_info->index = NO_FURTHER_WRITE_ACTION;
41} 37}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
71 67
72 cap_info->pages = temp_page; 68 cap_info->pages = temp_page;
73 69
70 temp_page = krealloc(cap_info->phys,
71 pages_needed * sizeof(phys_addr_t *),
72 GFP_KERNEL | __GFP_ZERO);
73 if (!temp_page)
74 return -ENOMEM;
75
76 cap_info->phys = temp_page;
77
74 return 0; 78 return 0;
75} 79}
76 80
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
105 **/ 109 **/
106static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) 110static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
107{ 111{
112 bool do_vunmap = false;
108 int ret; 113 int ret;
109 114
110 ret = efi_capsule_update(&cap_info->header, cap_info->pages); 115 /*
116 * cap_info->capsule may have been assigned already by a quirk
117 * handler, so only overwrite it if it is NULL
118 */
119 if (!cap_info->capsule) {
120 cap_info->capsule = vmap(cap_info->pages, cap_info->index,
121 VM_MAP, PAGE_KERNEL);
122 if (!cap_info->capsule)
123 return -ENOMEM;
124 do_vunmap = true;
125 }
126
127 ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
128 if (do_vunmap)
129 vunmap(cap_info->capsule);
111 if (ret) { 130 if (ret) {
112 pr_err("capsule update failed\n"); 131 pr_err("capsule update failed\n");
113 return ret; 132 return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
165 goto failed; 184 goto failed;
166 } 185 }
167 186
168 cap_info->pages[cap_info->index++] = page_to_phys(page); 187 cap_info->pages[cap_info->index] = page;
188 cap_info->phys[cap_info->index] = page_to_phys(page);
169 cap_info->page_bytes_remain = PAGE_SIZE; 189 cap_info->page_bytes_remain = PAGE_SIZE;
190 cap_info->index++;
170 } else { 191 } else {
171 page = phys_to_page(cap_info->pages[cap_info->index - 1]); 192 page = cap_info->pages[cap_info->index - 1];
172 } 193 }
173 194
174 kbuff = kmap(page); 195 kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
252 struct capsule_info *cap_info = file->private_data; 273 struct capsule_info *cap_info = file->private_data;
253 274
254 kfree(cap_info->pages); 275 kfree(cap_info->pages);
276 kfree(cap_info->phys);
255 kfree(file->private_data); 277 kfree(file->private_data);
256 file->private_data = NULL; 278 file->private_data = NULL;
257 return 0; 279 return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
281 return -ENOMEM; 303 return -ENOMEM;
282 } 304 }
283 305
306 cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
307 if (!cap_info->phys) {
308 kfree(cap_info->pages);
309 kfree(cap_info);
310 return -ENOMEM;
311 }
312
284 file->private_data = cap_info; 313 file->private_data = cap_info;
285 314
286 return 0; 315 return 0;
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31ac337..c38624ea0251 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/acpi.h>
12#include <linux/bitops.h> 13#include <linux/bitops.h>
13#include <linux/gpio/driver.h> 14#include <linux/gpio/driver.h>
14#include <linux/init.h> 15#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
380 } 381 }
381} 382}
382 383
384static const char *mrfld_gpio_get_pinctrl_dev_name(void)
385{
386 const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
387 return dev_name ? dev_name : "pinctrl-merrifield";
388}
389
383static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) 390static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
384{ 391{
385 const struct mrfld_gpio_pinrange *range; 392 const struct mrfld_gpio_pinrange *range;
393 const char *pinctrl_dev_name;
386 struct mrfld_gpio *priv; 394 struct mrfld_gpio *priv;
387 u32 gpio_base, irq_base; 395 u32 gpio_base, irq_base;
388 void __iomem *base; 396 void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
439 return retval; 447 return retval;
440 } 448 }
441 449
450 pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
442 for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) { 451 for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
443 range = &mrfld_gpio_ranges[i]; 452 range = &mrfld_gpio_ranges[i];
444 retval = gpiochip_add_pin_range(&priv->chip, 453 retval = gpiochip_add_pin_range(&priv->chip,
445 "pinctrl-merrifield", 454 pinctrl_dev_name,
446 range->gpio_base, 455 range->gpio_base,
447 range->pin_base, 456 range->pin_base,
448 range->npins); 457 range->npins);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44332b793718..14532d9576e4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2893,6 +2893,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
2893EXPORT_SYMBOL_GPL(gpiod_set_raw_value); 2893EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2894 2894
2895/** 2895/**
2896 * gpiod_set_value_nocheck() - set a GPIO line value without checking
2897 * @desc: the descriptor to set the value on
2898 * @value: value to set
2899 *
2900 * This sets the value of a GPIO line backing a descriptor, applying
2901 * different semantic quirks like active low and open drain/source
2902 * handling.
2903 */
2904static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
2905{
2906 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
2907 value = !value;
2908 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2909 gpio_set_open_drain_value_commit(desc, value);
2910 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2911 gpio_set_open_source_value_commit(desc, value);
2912 else
2913 gpiod_set_raw_value_commit(desc, value);
2914}
2915
2916/**
2896 * gpiod_set_value() - assign a gpio's value 2917 * gpiod_set_value() - assign a gpio's value
2897 * @desc: gpio whose value will be assigned 2918 * @desc: gpio whose value will be assigned
2898 * @value: value to assign 2919 * @value: value to assign
@@ -2906,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
2906void gpiod_set_value(struct gpio_desc *desc, int value) 2927void gpiod_set_value(struct gpio_desc *desc, int value)
2907{ 2928{
2908 VALIDATE_DESC_VOID(desc); 2929 VALIDATE_DESC_VOID(desc);
2909 /* Should be using gpiod_set_value_cansleep() */
2910 WARN_ON(desc->gdev->chip->can_sleep); 2930 WARN_ON(desc->gdev->chip->can_sleep);
2911 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2931 gpiod_set_value_nocheck(desc, value);
2912 value = !value;
2913 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
2914 gpio_set_open_drain_value_commit(desc, value);
2915 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
2916 gpio_set_open_source_value_commit(desc, value);
2917 else
2918 gpiod_set_raw_value_commit(desc, value);
2919} 2932}
2920EXPORT_SYMBOL_GPL(gpiod_set_value); 2933EXPORT_SYMBOL_GPL(gpiod_set_value);
2921 2934
@@ -3243,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
3243{ 3256{
3244 might_sleep_if(extra_checks); 3257 might_sleep_if(extra_checks);
3245 VALIDATE_DESC_VOID(desc); 3258 VALIDATE_DESC_VOID(desc);
3246 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 3259 gpiod_set_value_nocheck(desc, value);
3247 value = !value;
3248 gpiod_set_raw_value_commit(desc, value);
3249} 3260}
3250EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); 3261EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
3251 3262
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1aba47..34daf895f848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
1360 1360
1361void dpp1_cm_set_output_csc_default( 1361void dpp1_cm_set_output_csc_default(
1362 struct dpp *dpp_base, 1362 struct dpp *dpp_base,
1363 const struct default_adjustment *default_adjust); 1363 enum dc_color_space colorspace);
1364 1364
1365void dpp1_cm_set_gamut_remap( 1365void dpp1_cm_set_gamut_remap(
1366 struct dpp *dpp, 1366 struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c244bf5..ed1216b53465 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
225 225
226void dpp1_cm_set_output_csc_default( 226void dpp1_cm_set_output_csc_default(
227 struct dpp *dpp_base, 227 struct dpp *dpp_base,
228 const struct default_adjustment *default_adjust) 228 enum dc_color_space colorspace)
229{ 229{
230 230
231 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 231 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
232 uint32_t ocsc_mode = 0; 232 uint32_t ocsc_mode = 0;
233 233
234 if (default_adjust != NULL) { 234 switch (colorspace) {
235 switch (default_adjust->out_color_space) {
236 case COLOR_SPACE_SRGB: 235 case COLOR_SPACE_SRGB:
237 case COLOR_SPACE_2020_RGB_FULLRANGE: 236 case COLOR_SPACE_2020_RGB_FULLRANGE:
238 ocsc_mode = 0; 237 ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
253 case COLOR_SPACE_UNKNOWN: 252 case COLOR_SPACE_UNKNOWN:
254 default: 253 default:
255 break; 254 break;
256 }
257 } 255 }
258 256
259 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); 257 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c3b454..05dc01e54531 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2097 tbl_entry.color_space = color_space; 2097 tbl_entry.color_space = color_space;
2098 //tbl_entry.regval = matrix; 2098 //tbl_entry.regval = matrix;
2099 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry); 2099 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
2100 } else {
2101 pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2100 } 2102 }
2101} 2103}
2102static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) 2104static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a68460edcd..9420dfb94d39 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
64 64
65 void (*opp_set_csc_default)( 65 void (*opp_set_csc_default)(
66 struct dpp *dpp, 66 struct dpp *dpp,
67 const struct default_adjustment *default_adjust); 67 enum dc_color_space colorspace);
68 68
69 void (*opp_set_csc_adjustment)( 69 void (*opp_set_csc_adjustment)(
70 struct dpp *dpp, 70 struct dpp *dpp,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..a0f4d2a2a481 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
168void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb, 168void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
169 int x, int y) 169 int x, int y)
170{ 170{
171 const struct drm_format_info *format = fb->format;
172 unsigned int num_planes = format->num_planes;
171 u32 addr = drm_fb_obj(fb)->dev_addr; 173 u32 addr = drm_fb_obj(fb)->dev_addr;
172 int num_planes = fb->format->num_planes;
173 int i; 174 int i;
174 175
175 if (num_planes > 3) 176 if (num_planes > 3)
176 num_planes = 3; 177 num_planes = 3;
177 178
178 for (i = 0; i < num_planes; i++) 179 addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
180 x * format->cpp[0];
181
182 y /= format->vsub;
183 x /= format->hsub;
184
185 for (i = 1; i < num_planes; i++)
179 addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] + 186 addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
180 x * fb->format->cpp[i]; 187 x * format->cpp[i];
181 for (; i < 3; i++) 188 for (; i < 3; i++)
182 addrs[i] = 0; 189 addrs[i] = 0;
183} 190}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
744 if (plane->fb) 751 if (plane->fb)
745 drm_framebuffer_put(plane->fb); 752 drm_framebuffer_put(plane->fb);
746 753
747 /* Power down the Y/U/V FIFOs */
748 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
749
750 /* Power down most RAMs and FIFOs if this is the primary plane */ 754 /* Power down most RAMs and FIFOs if this is the primary plane */
751 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 755 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
752 sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | 756 sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
753 CFG_PDWN32x32 | CFG_PDWN64x66; 757 CFG_PDWN32x32 | CFG_PDWN64x66;
754 dma_ctrl0_mask = CFG_GRA_ENA; 758 dma_ctrl0_mask = CFG_GRA_ENA;
755 } else { 759 } else {
760 /* Power down the Y/U/V FIFOs */
761 sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
756 dma_ctrl0_mask = CFG_DMA_ENA; 762 dma_ctrl0_mask = CFG_DMA_ENA;
757 } 763 }
758 764
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1225 1231
1226 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", 1232 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
1227 dcrtc); 1233 dcrtc);
1228 if (ret < 0) { 1234 if (ret < 0)
1229 kfree(dcrtc); 1235 goto err_crtc;
1230 return ret;
1231 }
1232 1236
1233 if (dcrtc->variant->init) { 1237 if (dcrtc->variant->init) {
1234 ret = dcrtc->variant->init(dcrtc, dev); 1238 ret = dcrtc->variant->init(dcrtc, dev);
1235 if (ret) { 1239 if (ret)
1236 kfree(dcrtc); 1240 goto err_crtc;
1237 return ret;
1238 }
1239 } 1241 }
1240 1242
1241 /* Ensure AXI pipeline is enabled */ 1243 /* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1246 dcrtc->crtc.port = port; 1248 dcrtc->crtc.port = port;
1247 1249
1248 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 1250 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
1249 if (!primary) 1251 if (!primary) {
1250 return -ENOMEM; 1252 ret = -ENOMEM;
1253 goto err_crtc;
1254 }
1251 1255
1252 ret = armada_drm_plane_init(primary); 1256 ret = armada_drm_plane_init(primary);
1253 if (ret) { 1257 if (ret) {
1254 kfree(primary); 1258 kfree(primary);
1255 return ret; 1259 goto err_crtc;
1256 } 1260 }
1257 1261
1258 ret = drm_universal_plane_init(drm, &primary->base, 0, 1262 ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1263 DRM_PLANE_TYPE_PRIMARY, NULL); 1267 DRM_PLANE_TYPE_PRIMARY, NULL);
1264 if (ret) { 1268 if (ret) {
1265 kfree(primary); 1269 kfree(primary);
1266 return ret; 1270 goto err_crtc;
1267 } 1271 }
1268 1272
1269 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, 1273 ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1282 1286
1283err_crtc_init: 1287err_crtc_init:
1284 primary->base.funcs->destroy(&primary->base); 1288 primary->base.funcs->destroy(&primary->base);
1289err_crtc:
1290 kfree(dcrtc);
1291
1285 return ret; 1292 return ret;
1286} 1293}
1287 1294
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..bfd3514fbe9b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
42}; 42};
43 43
44struct armada_plane_state { 44struct armada_plane_state {
45 u16 src_x;
46 u16 src_y;
45 u32 src_hw; 47 u32 src_hw;
46 u32 dst_hw; 48 u32 dst_hw;
47 u32 dst_yx; 49 u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..aba947696178 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
99{ 99{
100 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane); 100 struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
101 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 101 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
102 const struct drm_format_info *format;
102 struct drm_rect src = { 103 struct drm_rect src = {
103 .x1 = src_x, 104 .x1 = src_x,
104 .y1 = src_y, 105 .y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
117 }; 118 };
118 uint32_t val, ctrl0; 119 uint32_t val, ctrl0;
119 unsigned idx = 0; 120 unsigned idx = 0;
120 bool visible; 121 bool visible, fb_changed;
121 int ret; 122 int ret;
122 123
123 trace_armada_ovl_plane_update(plane, crtc, fb, 124 trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
138 if (!visible) 139 if (!visible)
139 ctrl0 &= ~CFG_DMA_ENA; 140 ctrl0 &= ~CFG_DMA_ENA;
140 141
142 /*
143 * Shifting a YUV packed format image by one pixel causes the U/V
144 * planes to swap. Compensate for it by also toggling the UV swap.
145 */
146 format = fb->format;
147 if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
148 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
149
150 fb_changed = plane->fb != fb ||
151 dplane->base.state.src_x != src.x1 >> 16 ||
152 dplane->base.state.src_y != src.y1 >> 16;
153
141 if (!dcrtc->plane) { 154 if (!dcrtc->plane) {
142 dcrtc->plane = plane; 155 dcrtc->plane = plane;
143 armada_ovl_update_attr(&dplane->prop, dcrtc); 156 armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
145 158
146 /* FIXME: overlay on an interlaced display */ 159 /* FIXME: overlay on an interlaced display */
147 /* Just updating the position/size? */ 160 /* Just updating the position/size? */
148 if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) { 161 if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
149 val = (drm_rect_height(&src) & 0xffff0000) | 162 val = (drm_rect_height(&src) & 0xffff0000) |
150 drm_rect_width(&src) >> 16; 163 drm_rect_width(&src) >> 16;
151 dplane->base.state.src_hw = val; 164 dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
169 if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0) 182 if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
170 armada_drm_plane_work_cancel(dcrtc, &dplane->base); 183 armada_drm_plane_work_cancel(dcrtc, &dplane->base);
171 184
172 if (plane->fb != fb) { 185 if (fb_changed) {
173 u32 addrs[3], pixel_format; 186 u32 addrs[3];
174 int num_planes, hsub;
175 187
176 /* 188 /*
177 * Take a reference on the new framebuffer - we want to 189 * Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
182 if (plane->fb) 194 if (plane->fb)
183 armada_ovl_retire_fb(dplane, plane->fb); 195 armada_ovl_retire_fb(dplane, plane->fb);
184 196
185 src_y = src.y1 >> 16; 197 dplane->base.state.src_y = src_y = src.y1 >> 16;
186 src_x = src.x1 >> 16; 198 dplane->base.state.src_x = src_x = src.x1 >> 16;
187 199
188 armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y); 200 armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
189 201
190 pixel_format = fb->format->format;
191 hsub = drm_format_horz_chroma_subsampling(pixel_format);
192 num_planes = fb->format->num_planes;
193
194 /*
195 * Annoyingly, shifting a YUYV-format image by one pixel
196 * causes the U/V planes to toggle. Toggle the UV swap.
197 * (Unfortunately, this causes momentary colour flickering.)
198 */
199 if (src_x & (hsub - 1) && num_planes == 1)
200 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
201
202 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0], 202 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
203 LCD_SPU_DMA_START_ADDR_Y0); 203 LCD_SPU_DMA_START_ADDR_Y0);
204 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1], 204 armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..49af94627c8a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2777} 2777}
2778 2778
2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, 2779static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2780 unsigned int opcode, int rings) 2780 unsigned int opcode, unsigned long rings)
2781{ 2781{
2782 struct cmd_info *info = NULL; 2782 struct cmd_info *info = NULL;
2783 unsigned int ring; 2783 unsigned int ring;
2784 2784
2785 for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) { 2785 for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
2786 info = find_cmd_entry(gvt, opcode, ring); 2786 info = find_cmd_entry(gvt, opcode, ring);
2787 if (info) 2787 if (info)
2788 break; 2788 break;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e331142badb..64d67ff9bf08 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1359 return ret; 1359 return ret;
1360 } else { 1360 } else {
1361 if (!test_bit(index, spt->post_shadow_bitmap)) { 1361 if (!test_bit(index, spt->post_shadow_bitmap)) {
1362 int type = spt->shadow_page.type;
1363
1362 ppgtt_get_shadow_entry(spt, &se, index); 1364 ppgtt_get_shadow_entry(spt, &se, index);
1363 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1365 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
1364 if (ret) 1366 if (ret)
1365 return ret; 1367 return ret;
1368 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1369 ppgtt_set_shadow_entry(spt, &se, index);
1366 } 1370 }
1367
1368 ppgtt_set_post_shadow(spt, index); 1371 ppgtt_set_post_shadow(spt, index);
1369 } 1372 }
1370 1373
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c582b6..e143004e66d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
2368 */ 2368 */
2369 struct workqueue_struct *wq; 2369 struct workqueue_struct *wq;
2370 2370
2371 /* ordered wq for modesets */
2372 struct workqueue_struct *modeset_wq;
2373
2371 /* Display functions */ 2374 /* Display functions */
2372 struct drm_i915_display_funcs display; 2375 struct drm_i915_display_funcs display;
2373 2376
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18de6569d04a..5cfba89ed586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
467 struct drm_i915_gem_request *rq; 467 struct drm_i915_gem_request *rq;
468 struct intel_engine_cs *engine; 468 struct intel_engine_cs *engine;
469 469
470 if (!dma_fence_is_i915(fence)) 470 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471 return; 471 return;
472 472
473 rq = to_request(fence); 473 rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3866c49bc390..7923dfd9963c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6977,6 +6977,7 @@ enum {
6977#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 6977#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
6978 6978
6979#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) 6979#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
6980#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
6980#define MASK_WAKEMEM (1<<13) 6981#define MASK_WAKEMEM (1<<13)
6981 6982
6982#define SKL_DFSM _MMIO(0x51000) 6983#define SKL_DFSM _MMIO(0x51000)
@@ -7026,6 +7027,8 @@ enum {
7026#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) 7027#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
7027#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 7028#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
7028 7029
7030#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
7031
7029#define GEN7_L3SQCREG1 _MMIO(0xB010) 7032#define GEN7_L3SQCREG1 _MMIO(0xB010)
7030#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 7033#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
7031 7034
@@ -8522,6 +8525,7 @@ enum skl_power_gate {
8522#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 8525#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
8523#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 8526#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
8524#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) 8527#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
8528#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
8525#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) 8529#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
8526#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 8530#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
8527#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) 8531#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62b71c0..60cf4e58389a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
860 860
861static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 861static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
862{ 862{
863 int min_cdclk = skl_calc_cdclk(0, vco);
864 u32 val; 863 u32 val;
865 864
866 WARN_ON(vco != 8100000 && vco != 8640000); 865 WARN_ON(vco != 8100000 && vco != 8640000);
867 866
868 /* select the minimum CDCLK before enabling DPLL 0 */
869 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
870 I915_WRITE(CDCLK_CTL, val);
871 POSTING_READ(CDCLK_CTL);
872
873 /* 867 /*
874 * We always enable DPLL0 with the lowest link rate possible, but still 868 * We always enable DPLL0 with the lowest link rate possible, but still
875 * taking into account the VCO required to operate the eDP panel at the 869 * taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
923{ 917{
924 int cdclk = cdclk_state->cdclk; 918 int cdclk = cdclk_state->cdclk;
925 int vco = cdclk_state->vco; 919 int vco = cdclk_state->vco;
926 u32 freq_select, pcu_ack; 920 u32 freq_select, pcu_ack, cdclk_ctl;
927 int ret; 921 int ret;
928 922
929 WARN_ON((cdclk == 24000) != (vco == 0)); 923 WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
940 return; 934 return;
941 } 935 }
942 936
943 /* set CDCLK_CTL */ 937 /* Choose frequency for this cdclk */
944 switch (cdclk) { 938 switch (cdclk) {
945 case 450000: 939 case 450000:
946 case 432000: 940 case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
968 dev_priv->cdclk.hw.vco != vco) 962 dev_priv->cdclk.hw.vco != vco)
969 skl_dpll0_disable(dev_priv); 963 skl_dpll0_disable(dev_priv);
970 964
965 cdclk_ctl = I915_READ(CDCLK_CTL);
966
967 if (dev_priv->cdclk.hw.vco != vco) {
968 /* Wa Display #1183: skl,kbl,cfl */
969 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
970 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
971 I915_WRITE(CDCLK_CTL, cdclk_ctl);
972 }
973
974 /* Wa Display #1183: skl,kbl,cfl */
975 cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
976 I915_WRITE(CDCLK_CTL, cdclk_ctl);
977 POSTING_READ(CDCLK_CTL);
978
971 if (dev_priv->cdclk.hw.vco != vco) 979 if (dev_priv->cdclk.hw.vco != vco)
972 skl_dpll0_enable(dev_priv, vco); 980 skl_dpll0_enable(dev_priv, vco);
973 981
974 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); 982 /* Wa Display #1183: skl,kbl,cfl */
983 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
984 I915_WRITE(CDCLK_CTL, cdclk_ctl);
985
986 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
987 I915_WRITE(CDCLK_CTL, cdclk_ctl);
988
989 /* Wa Display #1183: skl,kbl,cfl */
990 cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
991 I915_WRITE(CDCLK_CTL, cdclk_ctl);
975 POSTING_READ(CDCLK_CTL); 992 POSTING_READ(CDCLK_CTL);
976 993
977 /* inform PCU of the change */ 994 /* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30cf273d57aa..123585eeb87d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12544,11 +12544,15 @@ static int intel_atomic_commit(struct drm_device *dev,
12544 INIT_WORK(&state->commit_work, intel_atomic_commit_work); 12544 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12545 12545
12546 i915_sw_fence_commit(&intel_state->commit_ready); 12546 i915_sw_fence_commit(&intel_state->commit_ready);
12547 if (nonblock) 12547 if (nonblock && intel_state->modeset) {
12548 queue_work(dev_priv->modeset_wq, &state->commit_work);
12549 } else if (nonblock) {
12548 queue_work(system_unbound_wq, &state->commit_work); 12550 queue_work(system_unbound_wq, &state->commit_work);
12549 else 12551 } else {
12552 if (intel_state->modeset)
12553 flush_workqueue(dev_priv->modeset_wq);
12550 intel_atomic_commit_tail(state); 12554 intel_atomic_commit_tail(state);
12551 12555 }
12552 12556
12553 return 0; 12557 return 0;
12554} 12558}
@@ -14462,6 +14466,8 @@ int intel_modeset_init(struct drm_device *dev)
14462 enum pipe pipe; 14466 enum pipe pipe;
14463 struct intel_crtc *crtc; 14467 struct intel_crtc *crtc;
14464 14468
14469 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
14470
14465 drm_mode_config_init(dev); 14471 drm_mode_config_init(dev);
14466 14472
14467 dev->mode_config.min_width = 0; 14473 dev->mode_config.min_width = 0;
@@ -15270,6 +15276,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
15270 intel_cleanup_gt_powersave(dev_priv); 15276 intel_cleanup_gt_powersave(dev_priv);
15271 15277
15272 intel_teardown_gmbus(dev_priv); 15278 intel_teardown_gmbus(dev_priv);
15279
15280 destroy_workqueue(dev_priv->modeset_wq);
15273} 15281}
15274 15282
15275void intel_connector_attach_encoder(struct intel_connector *connector, 15283void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..6074e04dc99f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
1390 if (ret) 1390 if (ret)
1391 return ret; 1391 return ret;
1392 1392
1393 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1394 ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1395 if (ret)
1396 return ret;
1397
1393 /* WaToEnableHwFixForPushConstHWBug:glk */ 1398 /* WaToEnableHwFixForPushConstHWBug:glk */
1394 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1399 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1395 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1400 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e25607435..e71a8cd50498 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
974 974
975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); 975 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
976 976
977 if (i915_gem_request_completed(request))
978 return;
979
977 if (prio <= READ_ONCE(request->priotree.priority)) 980 if (prio <= READ_ONCE(request->priotree.priority))
978 return; 981 return;
979 982
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430fccdc..55ea5eb3b7df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
590 struct drm_i915_private *dev_priv = to_i915(dev); 590 struct drm_i915_private *dev_priv = to_i915(dev);
591 591
592 if (dev_priv->psr.active) { 592 if (dev_priv->psr.active) {
593 i915_reg_t psr_ctl; 593 i915_reg_t psr_status;
594 u32 psr_status_mask; 594 u32 psr_status_mask;
595 595
596 if (dev_priv->psr.aux_frame_sync) 596 if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
599 0); 599 0);
600 600
601 if (dev_priv->psr.psr2_support) { 601 if (dev_priv->psr.psr2_support) {
602 psr_ctl = EDP_PSR2_CTL; 602 psr_status = EDP_PSR2_STATUS_CTL;
603 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 603 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
604 604
605 I915_WRITE(psr_ctl, 605 I915_WRITE(EDP_PSR2_CTL,
606 I915_READ(psr_ctl) & 606 I915_READ(EDP_PSR2_CTL) &
607 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); 607 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
608 608
609 } else { 609 } else {
610 psr_ctl = EDP_PSR_STATUS_CTL; 610 psr_status = EDP_PSR_STATUS_CTL;
611 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 611 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
612 612
613 I915_WRITE(psr_ctl, 613 I915_WRITE(EDP_PSR_CTL,
614 I915_READ(psr_ctl) & ~EDP_PSR_ENABLE); 614 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
615 } 615 }
616 616
617 /* Wait till PSR is idle */ 617 /* Wait till PSR is idle */
618 if (intel_wait_for_register(dev_priv, 618 if (intel_wait_for_register(dev_priv,
619 psr_ctl, psr_status_mask, 0, 619 psr_status, psr_status_mask, 0,
620 2000)) 620 2000))
621 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 621 DRM_ERROR("Timed out waiting for PSR Idle State\n");
622 622
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c63d3b..7e115f3927f6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
598 598
599 DRM_DEBUG_KMS("Enabling DC5\n"); 599 DRM_DEBUG_KMS("Enabling DC5\n");
600 600
601 /* Wa Display #1183: skl,kbl,cfl */
602 if (IS_GEN9_BC(dev_priv))
603 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
604 SKL_SELECT_ALTERNATE_DC_EXIT);
605
601 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 606 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
602} 607}
603 608
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
625{ 630{
626 DRM_DEBUG_KMS("Disabling DC6\n"); 631 DRM_DEBUG_KMS("Disabling DC6\n");
627 632
633 /* Wa Display #1183: skl,kbl,cfl */
634 if (IS_GEN9_BC(dev_priv))
635 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
636 SKL_SELECT_ALTERNATE_DC_EXIT);
637
628 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 638 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
629} 639}
630 640
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1786 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1796 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1787 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1797 BIT_ULL(POWER_DOMAIN_MODESET) | \
1788 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1798 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1799 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1789 BIT_ULL(POWER_DOMAIN_INIT)) 1800 BIT_ULL(POWER_DOMAIN_INIT))
1790 1801
1791#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1802#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a37b4f3..700fc754f28a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
174 .links = gf119_sor_dp_links, 174 .links = gf119_sor_dp_links,
175 .power = g94_sor_dp_power, 175 .power = g94_sor_dp_power,
176 .pattern = gf119_sor_dp_pattern, 176 .pattern = gf119_sor_dp_pattern,
177 .drive = gf119_sor_dp_drive,
177 .vcpi = gf119_sor_dp_vcpi, 178 .vcpi = gf119_sor_dp_vcpi,
178 .audio = gf119_sor_dp_audio, 179 .audio = gf119_sor_dp_audio,
179 .audio_sym = gf119_sor_dp_audio_sym, 180 .audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626eddf24d5..23db74ae1826 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
78 78
79 /* then read the message */ 79 /* then read the message */
80 msg.len = cnt & 0xf; 80 msg.len = cnt & 0xf;
81 if (msg.len > CEC_MAX_MSG_SIZE - 2)
82 msg.len = CEC_MAX_MSG_SIZE - 2;
81 msg.msg[0] = hdmi_read_reg(core->base, 83 msg.msg[0] = hdmi_read_reg(core->base,
82 HDMI_CEC_RX_CMD_HEADER); 84 HDMI_CEC_RX_CMD_HEADER);
83 msg.msg[1] = hdmi_read_reg(core->base, 85 msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
104 } 106 }
105} 107}
106 108
107static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
108{
109 if (stat1 & 2) {
110 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
111
112 cec_transmit_done(core->adap,
113 CEC_TX_STATUS_NACK |
114 CEC_TX_STATUS_MAX_RETRIES,
115 0, (dbg3 >> 4) & 7, 0, 0);
116 } else if (stat1 & 1) {
117 cec_transmit_done(core->adap,
118 CEC_TX_STATUS_ARB_LOST |
119 CEC_TX_STATUS_MAX_RETRIES,
120 0, 0, 0, 0);
121 } else if (stat1 == 0) {
122 cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
123 0, 0, 0, 0);
124 }
125}
126
127void hdmi4_cec_irq(struct hdmi_core_data *core) 109void hdmi4_cec_irq(struct hdmi_core_data *core)
128{ 110{
129 u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0); 111 u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
132 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0); 114 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
133 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1); 115 hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
134 116
135 if (stat0 & 0x40) 117 if (stat0 & 0x20) {
118 cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
119 0, 0, 0, 0);
136 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7); 120 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
137 else if (stat0 & 0x24) 121 } else if (stat1 & 0x02) {
138 hdmi_cec_transmit_fifo_empty(core, stat1);
139 if (stat1 & 2) {
140 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3); 122 u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
141 123
142 cec_transmit_done(core->adap, 124 cec_transmit_done(core->adap,
143 CEC_TX_STATUS_NACK | 125 CEC_TX_STATUS_NACK |
144 CEC_TX_STATUS_MAX_RETRIES, 126 CEC_TX_STATUS_MAX_RETRIES,
145 0, (dbg3 >> 4) & 7, 0, 0); 127 0, (dbg3 >> 4) & 7, 0, 0);
146 } else if (stat1 & 1) { 128 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
147 cec_transmit_done(core->adap,
148 CEC_TX_STATUS_ARB_LOST |
149 CEC_TX_STATUS_MAX_RETRIES,
150 0, 0, 0, 0);
151 } 129 }
152 if (stat0 & 0x02) 130 if (stat0 & 0x02)
153 hdmi_cec_received_msg(core); 131 hdmi_cec_received_msg(core);
154 if (stat1 & 0x3)
155 REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
156} 132}
157 133
158static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap) 134static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
231 /* 207 /*
232 * Enable CEC interrupts: 208 * Enable CEC interrupts:
233 * Transmit Buffer Full/Empty Change event 209 * Transmit Buffer Full/Empty Change event
234 * Transmitter FIFO Empty event
235 * Receiver FIFO Not Empty event 210 * Receiver FIFO Not Empty event
236 */ 211 */
237 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26); 212 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
238 /* 213 /*
239 * Enable CEC interrupts: 214 * Enable CEC interrupts:
240 * RX FIFO Overrun Error event
241 * Short Pulse Detected event
242 * Frame Retransmit Count Exceeded event 215 * Frame Retransmit Count Exceeded event
243 * Start Bit Irregularity event
244 */ 216 */
245 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f); 217 hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
246 218
247 /* cec calibration enable (self clearing) */ 219 /* cec calibration enable (self clearing) */
248 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03); 220 hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1dedac802..476079f1255f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
2656 name, err); 2656 name, err);
2657 goto remove; 2657 goto remove;
2658 } 2658 }
2659 } else {
2660 /* fall back to the module clock on SOR0 (eDP/LVDS only) */
2661 sor->clk_out = sor->clk;
2659 } 2662 }
2660 2663
2661 sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); 2664 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b5ba6441489f..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1007,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1007 pr_info("Initializing pool allocator\n"); 1007 pr_info("Initializing pool allocator\n");
1008 1008
1009 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1009 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1010 if (!_manager)
1011 return -ENOMEM;
1010 1012
1011 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); 1013 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
1012 1014
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 26eddbb62893..3dd62d75f531 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
209{ 209{
210 struct vc4_dev *vc4 = to_vc4_dev(dev); 210 struct vc4_dev *vc4 = to_vc4_dev(dev);
211 211
212 /* Undo the effects of a previous vc4_irq_uninstall. */
213 enable_irq(dev->irq);
214
215 /* Enable both the render done and out of memory interrupts. */ 212 /* Enable both the render done and out of memory interrupts. */
216 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 213 V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
217 214
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43840b8..493f392b3a0a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
327 return ret; 327 return ret;
328 328
329 vc4_v3d_init_hw(vc4->dev); 329 vc4_v3d_init_hw(vc4->dev);
330
331 /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
332 enable_irq(vc4->dev->irq);
330 vc4_irq_postinstall(vc4->dev); 333 vc4_irq_postinstall(vc4->dev);
331 334
332 return 0; 335 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..87e8af5776a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2731 } 2731 }
2732 2732
2733 view_type = vmw_view_cmd_to_type(header->id); 2733 view_type = vmw_view_cmd_to_type(header->id);
2734 if (view_type == vmw_view_max)
2735 return -EINVAL;
2734 cmd = container_of(header, typeof(*cmd), header); 2736 cmd = container_of(header, typeof(*cmd), header);
2735 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2737 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2736 user_surface_converter, 2738 user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740b3724..641294aef165 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
697 vps->pinned = 0; 697 vps->pinned = 0;
698 698
699 /* Mapping is managed by prepare_fb/cleanup_fb */ 699 /* Mapping is managed by prepare_fb/cleanup_fb */
700 memset(&vps->guest_map, 0, sizeof(vps->guest_map));
701 memset(&vps->host_map, 0, sizeof(vps->host_map)); 700 memset(&vps->host_map, 0, sizeof(vps->host_map));
702 vps->cpp = 0; 701 vps->cpp = 0;
703 702
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
760 759
761 760
762 /* Should have been freed by cleanup_fb */ 761 /* Should have been freed by cleanup_fb */
763 if (vps->guest_map.virtual) {
764 DRM_ERROR("Guest mapping not freed\n");
765 ttm_bo_kunmap(&vps->guest_map);
766 }
767
768 if (vps->host_map.virtual) { 762 if (vps->host_map.virtual) {
769 DRM_ERROR("Host mapping not freed\n"); 763 DRM_ERROR("Host mapping not freed\n");
770 ttm_bo_kunmap(&vps->host_map); 764 ttm_bo_kunmap(&vps->host_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c8389ff21..cd9da2dd79af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
175 int pinned; 175 int pinned;
176 176
177 /* For CPU Blit */ 177 /* For CPU Blit */
178 struct ttm_bo_kmap_obj host_map, guest_map; 178 struct ttm_bo_kmap_obj host_map;
179 unsigned int cpp; 179 unsigned int cpp;
180}; 180};
181 181
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437fd787..b68d74888ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
114 bool defined; 114 bool defined;
115 115
116 /* For CPU Blit */ 116 /* For CPU Blit */
117 struct ttm_bo_kmap_obj host_map, guest_map; 117 struct ttm_bo_kmap_obj host_map;
118 unsigned int cpp; 118 unsigned int cpp;
119}; 119};
120 120
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
695 s32 src_pitch, dst_pitch; 695 s32 src_pitch, dst_pitch;
696 u8 *src, *dst; 696 u8 *src, *dst;
697 bool not_used; 697 bool not_used;
698 698 struct ttm_bo_kmap_obj guest_map;
699 int ret;
699 700
700 if (!dirty->num_hits) 701 if (!dirty->num_hits)
701 return; 702 return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
706 if (width == 0 || height == 0) 707 if (width == 0 || height == 0)
707 return; 708 return;
708 709
710 ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
711 &guest_map);
712 if (ret) {
713 DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
714 ret);
715 goto out_cleanup;
716 }
709 717
710 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 718 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
711 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 719 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
713 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; 721 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
714 722
715 dst_pitch = ddirty->pitch; 723 dst_pitch = ddirty->pitch;
716 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 724 dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
717 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; 725 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
718 726
719 727
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
772 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 780 vmw_fifo_commit(dev_priv, sizeof(*cmd));
773 } 781 }
774 782
783 ttm_bo_kunmap(&guest_map);
775out_cleanup: 784out_cleanup:
776 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; 785 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
777 ddirty->right = ddirty->bottom = S32_MIN; 786 ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
1109{ 1118{
1110 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 1119 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
1111 1120
1112 if (vps->guest_map.virtual)
1113 ttm_bo_kunmap(&vps->guest_map);
1114
1115 if (vps->host_map.virtual) 1121 if (vps->host_map.virtual)
1116 ttm_bo_kunmap(&vps->host_map); 1122 ttm_bo_kunmap(&vps->host_map);
1117 1123
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1277 */ 1283 */
1278 if (vps->content_fb_type == SEPARATE_DMA && 1284 if (vps->content_fb_type == SEPARATE_DMA &&
1279 !(dev_priv->capabilities & SVGA_CAP_3D)) { 1285 !(dev_priv->capabilities & SVGA_CAP_3D)) {
1280
1281 struct vmw_framebuffer_dmabuf *new_vfbd;
1282
1283 new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
1284
1285 ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
1286 NULL);
1287 if (ret)
1288 goto out_srf_unpin;
1289
1290 ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
1291 new_vfbd->buffer->base.num_pages,
1292 &vps->guest_map);
1293
1294 ttm_bo_unreserve(&new_vfbd->buffer->base);
1295
1296 if (ret) {
1297 DRM_ERROR("Failed to map content buffer to CPU\n");
1298 goto out_srf_unpin;
1299 }
1300
1301 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, 1286 ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
1302 vps->surf->res.backup->base.num_pages, 1287 vps->surf->res.backup->base.num_pages,
1303 &vps->host_map); 1288 &vps->host_map);
1304 if (ret) { 1289 if (ret) {
1305 DRM_ERROR("Failed to map display buffer to CPU\n"); 1290 DRM_ERROR("Failed to map display buffer to CPU\n");
1306 ttm_bo_kunmap(&vps->guest_map);
1307 goto out_srf_unpin; 1291 goto out_srf_unpin;
1308 } 1292 }
1309 1293
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1350 stdu->display_srf = vps->surf; 1334 stdu->display_srf = vps->surf;
1351 stdu->content_fb_type = vps->content_fb_type; 1335 stdu->content_fb_type = vps->content_fb_type;
1352 stdu->cpp = vps->cpp; 1336 stdu->cpp = vps->cpp;
1353 memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
1354 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); 1337 memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
1355 1338
1356 if (!stdu->defined) 1339 if (!stdu->defined)
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..66f0268f37a6 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
314} 314}
315#endif 315#endif
316 316
317struct ib_device *__ib_device_get_by_index(u32 ifindex); 317struct ib_device *ib_device_get_by_index(u32 ifindex);
318/* RDMA device netlink */ 318/* RDMA device netlink */
319void nldev_init(void); 319void nldev_init(void);
320void nldev_exit(void); 320void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 30914f3baa5f..465520627e4b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
134 return 0; 134 return 0;
135} 135}
136 136
137struct ib_device *__ib_device_get_by_index(u32 index) 137static struct ib_device *__ib_device_get_by_index(u32 index)
138{ 138{
139 struct ib_device *device; 139 struct ib_device *device;
140 140
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
145 return NULL; 145 return NULL;
146} 146}
147 147
148/*
149 * Caller is responsible to return refrerence count by calling put_device()
150 */
151struct ib_device *ib_device_get_by_index(u32 index)
152{
153 struct ib_device *device;
154
155 down_read(&lists_rwsem);
156 device = __ib_device_get_by_index(index);
157 if (device)
158 get_device(&device->dev);
159
160 up_read(&lists_rwsem);
161 return device;
162}
163
148static struct ib_device *__ib_device_get_by_name(const char *name) 164static struct ib_device *__ib_device_get_by_name(const char *name)
149{ 165{
150 struct ib_device *device; 166 struct ib_device *device;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9a05245a1acf..0dcd1aa6f683 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
142 142
143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
144 144
145 device = __ib_device_get_by_index(index); 145 device = ib_device_get_by_index(index);
146 if (!device) 146 if (!device)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
150 if (!msg) 150 if (!msg) {
151 return -ENOMEM; 151 err = -ENOMEM;
152 goto err;
153 }
152 154
153 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 155 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
154 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 156 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
155 0, 0); 157 0, 0);
156 158
157 err = fill_dev_info(msg, device); 159 err = fill_dev_info(msg, device);
158 if (err) { 160 if (err)
159 nlmsg_free(msg); 161 goto err_free;
160 return err;
161 }
162 162
163 nlmsg_end(msg, nlh); 163 nlmsg_end(msg, nlh);
164 164
165 put_device(&device->dev);
165 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 166 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
167
168err_free:
169 nlmsg_free(msg);
170err:
171 put_device(&device->dev);
172 return err;
166} 173}
167 174
168static int _nldev_get_dumpit(struct ib_device *device, 175static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
220 return -EINVAL; 227 return -EINVAL;
221 228
222 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 229 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
223 device = __ib_device_get_by_index(index); 230 device = ib_device_get_by_index(index);
224 if (!device) 231 if (!device)
225 return -EINVAL; 232 return -EINVAL;
226 233
227 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 234 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
228 if (!rdma_is_port_valid(device, port)) 235 if (!rdma_is_port_valid(device, port)) {
229 return -EINVAL; 236 err = -EINVAL;
237 goto err;
238 }
230 239
231 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 240 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
232 if (!msg) 241 if (!msg) {
233 return -ENOMEM; 242 err = -ENOMEM;
243 goto err;
244 }
234 245
235 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 246 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
236 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 247 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
237 0, 0); 248 0, 0);
238 249
239 err = fill_port_info(msg, device, port); 250 err = fill_port_info(msg, device, port);
240 if (err) { 251 if (err)
241 nlmsg_free(msg); 252 goto err_free;
242 return err;
243 }
244 253
245 nlmsg_end(msg, nlh); 254 nlmsg_end(msg, nlh);
255 put_device(&device->dev);
246 256
247 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 257 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
258
259err_free:
260 nlmsg_free(msg);
261err:
262 put_device(&device->dev);
263 return err;
248} 264}
249 265
250static int nldev_port_get_dumpit(struct sk_buff *skb, 266static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
265 return -EINVAL; 281 return -EINVAL;
266 282
267 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 283 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
268 device = __ib_device_get_by_index(ifindex); 284 device = ib_device_get_by_index(ifindex);
269 if (!device) 285 if (!device)
270 return -EINVAL; 286 return -EINVAL;
271 287
@@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
299 nlmsg_end(skb, nlh); 315 nlmsg_end(skb, nlh);
300 } 316 }
301 317
302out: cb->args[0] = idx; 318out:
319 put_device(&device->dev);
320 cb->args[0] = idx;
303 return skb->len; 321 return skb->len;
304} 322}
305 323
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
642 goto err_free_mr; 642 goto err_free_mr;
643 643
644 mr->max_pages = max_num_sg; 644 mr->max_pages = max_num_sg;
645
646 err = mlx4_mr_enable(dev->dev, &mr->mmr); 645 err = mlx4_mr_enable(dev->dev, &mr->mmr);
647 if (err) 646 if (err)
648 goto err_free_pl; 647 goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
653 return &mr->ibmr; 652 return &mr->ibmr;
654 653
655err_free_pl: 654err_free_pl:
655 mr->ibmr.device = pd->device;
656 mlx4_free_priv_pages(mr); 656 mlx4_free_priv_pages(mr);
657err_free_mr: 657err_free_mr:
658 (void) mlx4_mr_free(dev->dev, &mr->mmr); 658 (void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..8880351df179 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
902 return 0; 902 return 0;
903} 903}
904 904
905static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 905static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
906 struct net_device *dev) 906 struct net_device *dev)
907{ 907{
908 struct ipoib_dev_priv *priv = ipoib_priv(dev); 908 struct ipoib_dev_priv *priv = ipoib_priv(dev);
909 struct rdma_netdev *rn = netdev_priv(dev); 909 struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
917 spin_unlock_irqrestore(&priv->lock, flags); 917 spin_unlock_irqrestore(&priv->lock, flags);
918 ++dev->stats.tx_dropped; 918 ++dev->stats.tx_dropped;
919 dev_kfree_skb_any(skb); 919 dev_kfree_skb_any(skb);
920 return; 920 return NULL;
921 }
922
923 /* To avoid race condition, make sure that the
924 * neigh will be added only once.
925 */
926 if (unlikely(!list_empty(&neigh->list))) {
927 spin_unlock_irqrestore(&priv->lock, flags);
928 return neigh;
921 } 929 }
922 930
923 path = __path_find(dev, daddr + 4); 931 path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
956 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 964 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
957 IPOIB_QPN(daddr)); 965 IPOIB_QPN(daddr));
958 ipoib_neigh_put(neigh); 966 ipoib_neigh_put(neigh);
959 return; 967 return NULL;
960 } 968 }
961 } else { 969 } else {
962 neigh->ah = NULL; 970 neigh->ah = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
973 981
974 spin_unlock_irqrestore(&priv->lock, flags); 982 spin_unlock_irqrestore(&priv->lock, flags);
975 ipoib_neigh_put(neigh); 983 ipoib_neigh_put(neigh);
976 return; 984 return NULL;
977 985
978err_path: 986err_path:
979 ipoib_neigh_free(neigh); 987 ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
983 991
984 spin_unlock_irqrestore(&priv->lock, flags); 992 spin_unlock_irqrestore(&priv->lock, flags);
985 ipoib_neigh_put(neigh); 993 ipoib_neigh_put(neigh);
994
995 return NULL;
986} 996}
987 997
988static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 998static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1091 case htons(ETH_P_TIPC): 1101 case htons(ETH_P_TIPC):
1092 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1102 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1093 if (unlikely(!neigh)) { 1103 if (unlikely(!neigh)) {
1094 neigh_add_path(skb, phdr->hwaddr, dev); 1104 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1095 return NETDEV_TX_OK; 1105 if (likely(!neigh))
1106 return NETDEV_TX_OK;
1096 } 1107 }
1097 break; 1108 break;
1098 case htons(ETH_P_ARP): 1109 case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
816 spin_lock_irqsave(&priv->lock, flags); 816 spin_lock_irqsave(&priv->lock, flags);
817 if (!neigh) { 817 if (!neigh) {
818 neigh = ipoib_neigh_alloc(daddr, dev); 818 neigh = ipoib_neigh_alloc(daddr, dev);
819 if (neigh) { 819 /* Make sure that the neigh will be added only
820 * once to mcast list.
821 */
822 if (neigh && list_empty(&neigh->list)) {
820 kref_get(&mcast->ah->ref); 823 kref_get(&mcast->ah->ref);
821 neigh->ah = mcast->ah; 824 neigh->ah = mcast->ah;
822 list_add_tail(&neigh->list, &mcast->neigh_list); 825 list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..bfa576aa9f03 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 attr->qp_state = IB_QPS_INIT; 1015 attr->qp_state = IB_QPS_INIT;
1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | 1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1017 IB_ACCESS_REMOTE_WRITE;
1018 attr->port_num = ch->sport->port; 1017 attr->port_num = ch->sport->port;
1019 attr->pkey_index = 0; 1018 attr->pkey_index = 0;
1020 1019
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2078 goto destroy_ib; 2077 goto destroy_ib;
2079 } 2078 }
2080 2079
2081 guid = (__be16 *)&param->primary_path->sgid.global.interface_id; 2080 guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
2082 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", 2081 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
2083 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), 2082 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
2084 be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); 2083 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) 163#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
164#define DELTA(x,y) ((y)-(x)) 164#define DELTA(x,y) ((y)-(x))
165#define TIME_NAME "TSC" 165#define TIME_NAME "TSC"
166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) 166#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
167#define GET_TIME(x) do { x = get_cycles(); } while (0) 167#define GET_TIME(x) do { x = get_cycles(); } while (0)
168#define DELTA(x,y) ((y)-(x)) 168#define DELTA(x,y) ((y)-(x))
169#define TIME_NAME "get_cycles" 169#define TIME_NAME "get_cycles"
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1651 return union_desc; 1651 return union_desc;
1652 1652
1653 dev_err(&intf->dev, 1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)", 1654 "Union descriptor too short (%d vs %zd)\n",
1655 union_desc->bLength, sizeof(*union_desc)); 1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL; 1656 return NULL;
1657 } 1657 }
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
326 0, width, 0, 0); 326 0, width, 0, 0);
327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 327 input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
328 0, height, 0, 0); 328 0, height, 0, 0);
329 input_set_abs_params(mtouch, ABS_MT_PRESSURE,
330 0, 255, 0, 0);
331 329
332 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); 330 ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
333 if (ret) { 331 if (ret) {
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1613 case 5: 1613 case 5:
1614 etd->hw_version = 3; 1614 etd->hw_version = 3;
1615 break; 1615 break;
1616 case 6 ... 14: 1616 case 6 ... 15:
1617 etd->hw_version = 4; 1617 etd->hw_version = 4;
1618 break; 1618 break;
1619 default: 1619 default:
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..a458e5ec9e41 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/irq.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/async.h> 32#include <linux/async.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
1261 } 1262 }
1262 1263
1263 /* 1264 /*
1264 * Systems using device tree should set up interrupt via DTS, 1265 * Platform code (ACPI, DTS) should normally set up interrupt
1265 * the rest will use the default falling edge interrupts. 1266 * for us, but in case it did not let's fall back to using falling
1267 * edge to be compatible with older Chromebooks.
1266 */ 1268 */
1267 irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING; 1269 irqflags = irq_get_trigger_type(client->irq);
1270 if (!irqflags)
1271 irqflags = IRQF_TRIGGER_FALLING;
1268 1272
1269 error = devm_request_threaded_irq(&client->dev, client->irq, 1273 error = devm_request_threaded_irq(&client->dev, client->irq,
1270 NULL, elants_i2c_irq, 1274 NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
10#include <linux/of.h> 10#include <linux/of.h>
11#include <linux/firmware.h> 11#include <linux/firmware.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/gpio/machine.h>
15#include <linux/i2c.h> 14#include <linux/i2c.h>
16#include <linux/acpi.h> 15#include <linux/acpi.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 1698 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1699 domain->geometry.aperture_end = (1UL << ias) - 1; 1699 domain->geometry.aperture_end = (1UL << ias) - 1;
1700 domain->geometry.force_aperture = true; 1700 domain->geometry.force_aperture = true;
1701 smmu_domain->pgtbl_ops = pgtbl_ops;
1702 1701
1703 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1702 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1704 if (ret < 0) 1703 if (ret < 0) {
1705 free_io_pgtable_ops(pgtbl_ops); 1704 free_io_pgtable_ops(pgtbl_ops);
1705 return ret;
1706 }
1706 1707
1707 return ret; 1708 smmu_domain->pgtbl_ops = pgtbl_ops;
1709 return 0;
1708} 1710}
1709 1711
1710static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) 1712static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1731 1733
1732static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) 1734static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1733{ 1735{
1734 int i; 1736 int i, j;
1735 struct arm_smmu_master_data *master = fwspec->iommu_priv; 1737 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1736 struct arm_smmu_device *smmu = master->smmu; 1738 struct arm_smmu_device *smmu = master->smmu;
1737 1739
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1739 u32 sid = fwspec->ids[i]; 1741 u32 sid = fwspec->ids[i];
1740 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); 1742 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1741 1743
1744 /* Bridged PCI devices may end up with duplicated IDs */
1745 for (j = 0; j < i; j++)
1746 if (fwspec->ids[j] == sid)
1747 break;
1748 if (j < i)
1749 continue;
1750
1742 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); 1751 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1743 } 1752 }
1744} 1753}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index f3654fd2eaf3..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -186,8 +186,9 @@ void led_blink_set(struct led_classdev *led_cdev,
186 unsigned long *delay_on, 186 unsigned long *delay_on,
187 unsigned long *delay_off) 187 unsigned long *delay_off)
188{ 188{
189 led_stop_software_blink(led_cdev); 189 del_timer_sync(&led_cdev->blink_timer);
190 190
191 clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
191 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); 192 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
192 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); 193 clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
193 194
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 590fb9aad77d..c3ed885c155c 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1543,6 +1543,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1544 1544
1545 pci_disable_device(pcidev); 1545 pci_disable_device(pcidev);
1546 free_irq(pcr->irq, (void *)pcr);
1547 if (pcr->msi_en)
1548 pci_disable_msi(pcr->pci);
1546} 1549}
1547 1550
1548#else /* CONFIG_PM */ 1551#else /* CONFIG_PM */
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235d5742..157e1d9e7725 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27#include <linux/of_device.h> 28#include <linux/of_device.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/mmc/host.h> 30#include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
667 return 0; 668 return 0;
668} 669}
669EXPORT_SYMBOL_GPL(renesas_sdhi_remove); 670EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
671
672MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a62a4a..555c7f133eb8 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
1424struct s3cmci_reg { 1424struct s3cmci_reg {
1425 unsigned short addr; 1425 unsigned short addr;
1426 unsigned char *name; 1426 unsigned char *name;
1427} debug_regs[] = { 1427};
1428
1429static const struct s3cmci_reg debug_regs[] = {
1428 DBG_REG(CON), 1430 DBG_REG(CON),
1429 DBG_REG(PRE), 1431 DBG_REG(PRE),
1430 DBG_REG(CMDARG), 1432 DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
1446static int s3cmci_regs_show(struct seq_file *seq, void *v) 1448static int s3cmci_regs_show(struct seq_file *seq, void *v)
1447{ 1449{
1448 struct s3cmci_host *host = seq->private; 1450 struct s3cmci_host *host = seq->private;
1449 struct s3cmci_reg *rptr = debug_regs; 1451 const struct s3cmci_reg *rptr = debug_regs;
1450 1452
1451 for (; rptr->name; rptr++) 1453 for (; rptr->name; rptr++)
1452 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, 1454 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..9285f60e5783 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
963 963
964 switch (command) { 964 switch (command) {
965 case NAND_CMD_READ0: 965 case NAND_CMD_READ0:
966 case NAND_CMD_READOOB:
966 case NAND_CMD_PAGEPROG: 967 case NAND_CMD_PAGEPROG:
967 info->use_ecc = 1; 968 info->use_ecc = 1;
968 break; 969 break;
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063b0ea8..6e5cf9d9cd99 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
413 return dev->of_node == data; 413 return dev->of_node == data;
414} 414}
415 415
416/* Note this function returns a reference to the mux_chip dev. */
416static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) 417static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
417{ 418{
418 struct device *dev; 419 struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
466 (!args.args_count && (mux_chip->controllers > 1))) { 467 (!args.args_count && (mux_chip->controllers > 1))) {
467 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", 468 dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
468 np, args.np); 469 np, args.np);
470 put_device(&mux_chip->dev);
469 return ERR_PTR(-EINVAL); 471 return ERR_PTR(-EINVAL);
470 } 472 }
471 473
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
476 if (controller >= mux_chip->controllers) { 478 if (controller >= mux_chip->controllers) {
477 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", 479 dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
478 np, controller, args.np); 480 np, controller, args.np);
481 put_device(&mux_chip->dev);
479 return ERR_PTR(-EINVAL); 482 return ERR_PTR(-EINVAL);
480 } 483 }
481 484
482 get_device(&mux_chip->dev);
483 return &mux_chip->mux[controller]; 485 return &mux_chip->mux[controller];
484} 486}
485EXPORT_SYMBOL_GPL(mux_control_get); 487EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0626dcfd1f3d..760d2c07e3a2 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 data = be32_to_cpup((__be32 *)&cf->data[0]); 526 data = be32_to_cpup((__be32 *)&cf->data[0]);
527 flexcan_write(data, &priv->tx_mb->data[0]); 527 flexcan_write(data, &priv->tx_mb->data[0]);
528 } 528 }
529 if (cf->can_dlc > 3) { 529 if (cf->can_dlc > 4) {
530 data = be32_to_cpup((__be32 *)&cf->data[4]); 530 data = be32_to_cpup((__be32 *)&cf->data[4]);
531 flexcan_write(data, &priv->tx_mb->data[1]); 531 flexcan_write(data, &priv->tx_mb->data[1]);
532 } 532 }
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b00358297424..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
395 395
396 if (dev->can.state == CAN_STATE_ERROR_WARNING || 396 if (dev->can.state == CAN_STATE_ERROR_WARNING ||
397 dev->can.state == CAN_STATE_ERROR_PASSIVE) { 397 dev->can.state == CAN_STATE_ERROR_PASSIVE) {
398 cf->can_id |= CAN_ERR_CRTL;
398 cf->data[1] = (txerr > rxerr) ? 399 cf->data[1] = (txerr > rxerr) ?
399 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; 400 CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
400 } 401 }
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..8bf80ad9dc44 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", 449 dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
450 rc); 450 rc);
451 451
452 return rc; 452 return (rc > 0) ? 0 : rc;
453} 453}
454 454
455static void gs_usb_xmit_callback(struct urb *urb) 455static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..b4c4a2c76437 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
194 tbp = peer_tb; 194 tbp = peer_tb;
195 } 195 }
196 196
197 if (tbp[IFLA_IFNAME]) { 197 if (ifmp && tbp[IFLA_IFNAME]) {
198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 198 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
199 name_assign_type = NET_NAME_USER; 199 name_assign_type = NET_NAME_USER;
200 } else { 200 } else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..4498ab897d94 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
1500{ 1500{
1501 struct b53_device *dev = ds->priv; 1501 struct b53_device *dev = ds->priv;
1502 1502
1503 /* Older models support a different tag format that we do not 1503 /* Older models (5325, 5365) support a different tag format that we do
1504 * support in net/dsa/tag_brcm.c yet. 1504 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
1505 * mode to be turned on which means we need to specifically manage ARL
1506 * misses on multicast addresses (TBD).
1505 */ 1507 */
1506 if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) 1508 if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
1509 !b53_can_enable_brcm_tags(ds, port))
1507 return DSA_TAG_PROTO_NONE; 1510 return DSA_TAG_PROTO_NONE;
1508 1511
1509 /* Broadcom BCM58xx chips have a flow accelerator on Port 8 1512 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
602 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 602 struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 604 unsigned int cur_rx, cur_tx; /* The next free ring entry */
605 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 605 unsigned int dirty_tx; /* The ring entries to be free()ed. */
606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */ 606 struct vortex_extra_stats xstats; /* NIC-specific extra stats */
607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 607 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ 608 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
618 618
619 /* The remainder are related to chip state, mostly media selection. */ 619 /* The remainder are related to chip state, mostly media selection. */
620 struct timer_list timer; /* Media selection timer. */ 620 struct timer_list timer; /* Media selection timer. */
621 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
622 int options; /* User-settable misc. driver options. */ 621 int options; /* User-settable misc. driver options. */
623 unsigned int media_override:4, /* Passed-in media type. */ 622 unsigned int media_override:4, /* Passed-in media type. */
624 default_media:4, /* Read from the EEPROM/Wn3_Config. */ 623 default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
760static int mdio_read(struct net_device *dev, int phy_id, int location); 759static int mdio_read(struct net_device *dev, int phy_id, int location);
761static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 760static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
762static void vortex_timer(struct timer_list *t); 761static void vortex_timer(struct timer_list *t);
763static void rx_oom_timer(struct timer_list *t);
764static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, 762static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 763 struct net_device *dev);
766static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, 764static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
1601 1599
1602 timer_setup(&vp->timer, vortex_timer, 0); 1600 timer_setup(&vp->timer, vortex_timer, 0);
1603 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); 1601 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1604 timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
1605 1602
1606 if (vortex_debug > 1) 1603 if (vortex_debug > 1)
1607 pr_debug("%s: Initial media type %s.\n", 1604 pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
1676 window_write16(vp, 0x0040, 4, Wn4_NetDiag); 1673 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1677 1674
1678 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1675 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1679 vp->cur_rx = vp->dirty_rx = 0; 1676 vp->cur_rx = 0;
1680 /* Initialize the RxEarly register as recommended. */ 1677 /* Initialize the RxEarly register as recommended. */
1681 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); 1678 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1682 iowrite32(0x0020, ioaddr + PktStatus); 1679 iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
1729 struct vortex_private *vp = netdev_priv(dev); 1726 struct vortex_private *vp = netdev_priv(dev);
1730 int i; 1727 int i;
1731 int retval; 1728 int retval;
1729 dma_addr_t dma;
1732 1730
1733 /* Use the now-standard shared IRQ implementation. */ 1731 /* Use the now-standard shared IRQ implementation. */
1734 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1732 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
1753 break; /* Bad news! */ 1751 break; /* Bad news! */
1754 1752
1755 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1756 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1757 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma);
1757 } 1759 }
1758 if (i != RX_RING_SIZE) { 1760 if (i != RX_RING_SIZE) {
1759 pr_emerg("%s: no memory for rx ring\n", dev->name); 1761 pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 int len = (skb->len + 3) & ~3; 2069 int len = (skb->len + 3) & ~3;
2068 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2069 PCI_DMA_TODEVICE); 2071 PCI_DMA_TODEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK;
2076 }
2077
2070 spin_lock_irq(&vp->window_lock); 2078 spin_lock_irq(&vp->window_lock);
2071 window_set(vp, 7); 2079 window_set(vp, 7);
2072 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); 2080 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
2593 int entry = vp->cur_rx % RX_RING_SIZE; 2601 int entry = vp->cur_rx % RX_RING_SIZE;
2594 void __iomem *ioaddr = vp->ioaddr; 2602 void __iomem *ioaddr = vp->ioaddr;
2595 int rx_status; 2603 int rx_status;
2596 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; 2604 int rx_work_limit = RX_RING_SIZE;
2597 2605
2598 if (vortex_debug > 5) 2606 if (vortex_debug > 5)
2599 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); 2607 pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
2614 } else { 2622 } else {
2615 /* The packet length: up to 4.5K!. */ 2623 /* The packet length: up to 4.5K!. */
2616 int pkt_len = rx_status & 0x1fff; 2624 int pkt_len = rx_status & 0x1fff;
2617 struct sk_buff *skb; 2625 struct sk_buff *skb, *newskb;
2626 dma_addr_t newdma;
2618 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); 2627 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2619 2628
2620 if (vortex_debug > 4) 2629 if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
2633 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2634 vp->rx_copy++; 2643 vp->rx_copy++;
2635 } else { 2644 } else {
2645 /* Pre-allocate the replacement skb. If it or its
2646 * mapping fails then recycle the buffer thats already
2647 * in place
2648 */
2649 newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2650 if (!newskb) {
2651 dev->stats.rx_dropped++;
2652 goto clear_complete;
2653 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2657 dev->stats.rx_dropped++;
2658 consume_skb(newskb);
2659 goto clear_complete;
2660 }
2661
2636 /* Pass up the skbuff already on the Rx ring. */ 2662 /* Pass up the skbuff already on the Rx ring. */
2637 skb = vp->rx_skbuff[entry]; 2663 skb = vp->rx_skbuff[entry];
2638 vp->rx_skbuff[entry] = NULL; 2664 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2639 skb_put(skb, pkt_len); 2666 skb_put(skb, pkt_len);
2640 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2641 vp->rx_nocopy++; 2668 vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
2653 netif_rx(skb); 2680 netif_rx(skb);
2654 dev->stats.rx_packets++; 2681 dev->stats.rx_packets++;
2655 } 2682 }
2656 entry = (++vp->cur_rx) % RX_RING_SIZE;
2657 }
2658 /* Refill the Rx ring buffers. */
2659 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2660 struct sk_buff *skb;
2661 entry = vp->dirty_rx % RX_RING_SIZE;
2662 if (vp->rx_skbuff[entry] == NULL) {
2663 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2664 if (skb == NULL) {
2665 static unsigned long last_jif;
2666 if (time_after(jiffies, last_jif + 10 * HZ)) {
2667 pr_warn("%s: memory shortage\n",
2668 dev->name);
2669 last_jif = jiffies;
2670 }
2671 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2672 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2673 break; /* Bad news! */
2674 }
2675 2683
2676 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2684clear_complete:
2677 vp->rx_skbuff[entry] = skb;
2678 }
2679 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2685 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2680 iowrite16(UpUnstall, ioaddr + EL3_CMD); 2686 iowrite16(UpUnstall, ioaddr + EL3_CMD);
2687 entry = (++vp->cur_rx) % RX_RING_SIZE;
2681 } 2688 }
2682 return 0; 2689 return 0;
2683} 2690}
2684 2691
2685/*
2686 * If we've hit a total OOM refilling the Rx ring we poll once a second
2687 * for some memory. Otherwise there is no way to restart the rx process.
2688 */
2689static void
2690rx_oom_timer(struct timer_list *t)
2691{
2692 struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
2693 struct net_device *dev = vp->mii.dev;
2694
2695 spin_lock_irq(&vp->lock);
2696 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2697 boomerang_rx(dev);
2698 if (vortex_debug > 1) {
2699 pr_debug("%s: rx_oom_timer %s\n", dev->name,
2700 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2701 }
2702 spin_unlock_irq(&vp->lock);
2703}
2704
2705static void 2692static void
2706vortex_down(struct net_device *dev, int final_down) 2693vortex_down(struct net_device *dev, int final_down)
2707{ 2694{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
2711 netdev_reset_queue(dev); 2698 netdev_reset_queue(dev);
2712 netif_stop_queue(dev); 2699 netif_stop_queue(dev);
2713 2700
2714 del_timer_sync(&vp->rx_oom_timer);
2715 del_timer_sync(&vp->timer); 2701 del_timer_sync(&vp->timer);
2716 2702
2717 /* Turn off statistics ASAP. We update dev->stats below. */ 2703 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..fbe21a817bd8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
75MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 75MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter);
80static int ena_restore_device(struct ena_adapter *adapter);
78 81
79static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
80{ 83{
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
1565 1568
1566static int ena_up_complete(struct ena_adapter *adapter) 1569static int ena_up_complete(struct ena_adapter *adapter)
1567{ 1570{
1568 int rc, i; 1571 int rc;
1569 1572
1570 rc = ena_rss_configure(adapter); 1573 rc = ena_rss_configure(adapter);
1571 if (rc) 1574 if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1584 1587
1585 ena_napi_enable_all(adapter); 1588 ena_napi_enable_all(adapter);
1586 1589
1587 /* Enable completion queues interrupt */
1588 for (i = 0; i < adapter->num_queues; i++)
1589 ena_unmask_interrupt(&adapter->tx_ring[i],
1590 &adapter->rx_ring[i]);
1591
1592 /* schedule napi in case we had pending packets
1593 * from the last time we disable napi
1594 */
1595 for (i = 0; i < adapter->num_queues; i++)
1596 napi_schedule(&adapter->ena_napi[i].napi);
1597
1598 return 0; 1590 return 0;
1599} 1591}
1600 1592
@@ -1731,7 +1723,7 @@ create_err:
1731 1723
1732static int ena_up(struct ena_adapter *adapter) 1724static int ena_up(struct ena_adapter *adapter)
1733{ 1725{
1734 int rc; 1726 int rc, i;
1735 1727
1736 netdev_dbg(adapter->netdev, "%s\n", __func__); 1728 netdev_dbg(adapter->netdev, "%s\n", __func__);
1737 1729
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
1774 1766
1775 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1767 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1776 1768
1769 /* Enable completion queues interrupt */
1770 for (i = 0; i < adapter->num_queues; i++)
1771 ena_unmask_interrupt(&adapter->tx_ring[i],
1772 &adapter->rx_ring[i]);
1773
1774 /* schedule napi in case we had pending packets
1775 * from the last time we disable napi
1776 */
1777 for (i = 0; i < adapter->num_queues; i++)
1778 napi_schedule(&adapter->ena_napi[i].napi);
1779
1777 return rc; 1780 return rc;
1778 1781
1779err_up: 1782err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
1884 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1887 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1885 ena_down(adapter); 1888 ena_down(adapter);
1886 1889
1890 /* Check for device status and issue reset if needed*/
1891 check_for_admin_com_state(adapter);
1892 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893 netif_err(adapter, ifdown, adapter->netdev,
1894 "Destroy failure, restarting device\n");
1895 ena_dump_stats_to_dmesg(adapter);
1896 /* rtnl lock already obtained in dev_ioctl() layer */
1897 ena_destroy_device(adapter);
1898 ena_restore_device(adapter);
1899 }
1900
1887 return 0; 1901 return 0;
1888} 1902}
1889 1903
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2544 2558
2545 ena_com_set_admin_running_state(ena_dev, false); 2559 ena_com_set_admin_running_state(ena_dev, false);
2546 2560
2547 ena_close(netdev); 2561 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2562 ena_down(adapter);
2548 2563
2549 /* Before releasing the ENA resources, a device reset is required. 2564 /* Before releasing the ENA resources, a device reset is required.
2550 * (to prevent the device from accessing them). 2565 * (to prevent the device from accessing them).
2551 * In case the reset flag is set and the device is up, ena_close 2566 * In case the reset flag is set and the device is up, ena_down()
2552 * already perform the reset, so it can be skipped. 2567 * already perform the reset, so it can be skipped.
2553 */ 2568 */
2554 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2569 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..c9617675f934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); 70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 if (vf_id >= bp->pf.max_vfs) { 73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); 74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 3d201d7324bd..d8fee26cd45e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
421 } 421 }
422 422
423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */ 423 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
424 if (is_wildcard(&l3_mask, sizeof(l3_mask)) && 424 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { 425 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; 426 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
427 } else { 427 } else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..d8424ed16c33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -344,7 +344,6 @@ struct adapter_params {
344 344
345 unsigned int sf_size; /* serial flash size in bytes */ 345 unsigned int sf_size; /* serial flash size in bytes */
346 unsigned int sf_nsec; /* # of flash sectors */ 346 unsigned int sf_nsec; /* # of flash sectors */
347 unsigned int sf_fw_start; /* start of FW image in flash */
348 347
349 unsigned int fw_vers; /* firmware version */ 348 unsigned int fw_vers; /* firmware version */
350 unsigned int bs_vers; /* bootstrap version */ 349 unsigned int bs_vers; /* bootstrap version */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..375ef86a84da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2844,8 +2844,6 @@ enum {
2844 SF_RD_DATA_FAST = 0xb, /* read flash */ 2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */ 2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2847
2848 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2849}; 2847};
2850 2848
2851/** 2849/**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3558 const __be32 *p = (const __be32 *)fw_data; 3556 const __be32 *p = (const __be32 *)fw_data;
3559 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3557 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3560 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3558 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3561 unsigned int fw_img_start = adap->params.sf_fw_start; 3559 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3562 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 3560 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3561 unsigned int fw_start = FLASH_FW_START;
3563 3562
3564 if (!size) { 3563 if (!size) {
3565 dev_err(adap->pdev_dev, "FW image has no data\n"); 3564 dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3575 "FW image size differs from size in FW header\n"); 3574 "FW image size differs from size in FW header\n");
3576 return -EINVAL; 3575 return -EINVAL;
3577 } 3576 }
3578 if (size > FW_MAX_SIZE) { 3577 if (size > fw_size) {
3579 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 3578 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3580 FW_MAX_SIZE); 3579 fw_size);
3581 return -EFBIG; 3580 return -EFBIG;
3582 } 3581 }
3583 if (!t4_fw_matches_chip(adap, hdr)) 3582 if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3604 */ 3603 */
3605 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3604 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3606 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3605 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3607 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 3606 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3608 if (ret) 3607 if (ret)
3609 goto out; 3608 goto out;
3610 3609
3611 addr = fw_img_start; 3610 addr = fw_start;
3612 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3611 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3613 addr += SF_PAGE_SIZE; 3612 addr += SF_PAGE_SIZE;
3614 fw_data += SF_PAGE_SIZE; 3613 fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3618 } 3617 }
3619 3618
3620 ret = t4_write_flash(adap, 3619 ret = t4_write_flash(adap,
3621 fw_img_start + offsetof(struct fw_hdr, fw_ver), 3620 fw_start + offsetof(struct fw_hdr, fw_ver),
3622 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 3621 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3623out: 3622out:
3624 if (ret) 3623 if (ret)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8184d2fca9be..a74300a4459c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3469,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
3469 goto failed_regulator; 3469 goto failed_regulator;
3470 } 3470 }
3471 } else { 3471 } else {
3472 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3473 ret = -EPROBE_DEFER;
3474 goto failed_regulator;
3475 }
3472 fep->reg_phy = NULL; 3476 fep->reg_phy = NULL;
3473 } 3477 }
3474 3478
@@ -3552,8 +3556,9 @@ failed_clk_ipg:
3552failed_clk: 3556failed_clk:
3553 if (of_phy_is_fixed_link(np)) 3557 if (of_phy_is_fixed_link(np))
3554 of_phy_deregister_fixed_link(np); 3558 of_phy_deregister_fixed_link(np);
3555failed_phy:
3556 of_node_put(phy_node); 3559 of_node_put(phy_node);
3560failed_phy:
3561 dev_id--;
3557failed_ioremap: 3562failed_ioremap:
3558 free_netdev(ndev); 3563 free_netdev(ndev);
3559 3564
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
319 now = tmr_cnt_read(etsects); 319 now = tmr_cnt_read(etsects);
320 now += delta; 320 now += delta;
321 tmr_cnt_write(etsects, now); 321 tmr_cnt_write(etsects, now);
322 set_fipers(etsects);
322 323
323 spin_unlock_irqrestore(&etsects->lock, flags); 324 spin_unlock_irqrestore(&etsects->lock, flags);
324 325
325 set_fipers(etsects);
326
327 return 0; 326 return 0;
328} 327}
329 328
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
331enum e1000_state_t { 331enum e1000_state_t {
332 __E1000_TESTING, 332 __E1000_TESTING,
333 __E1000_RESETTING, 333 __E1000_RESETTING,
334 __E1000_DOWN 334 __E1000_DOWN,
335 __E1000_DISABLED
335}; 336};
336 337
337#undef pr_fmt 338#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
946{ 946{
947 struct net_device *netdev; 947 struct net_device *netdev;
948 struct e1000_adapter *adapter; 948 struct e1000_adapter *adapter = NULL;
949 struct e1000_hw *hw; 949 struct e1000_hw *hw;
950 950
951 static int cards_found; 951 static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
955 u16 tmp = 0; 955 u16 tmp = 0;
956 u16 eeprom_apme_mask = E1000_EEPROM_APME; 956 u16 eeprom_apme_mask = E1000_EEPROM_APME;
957 int bars, need_ioport; 957 int bars, need_ioport;
958 bool disable_dev = false;
958 959
959 /* do not allocate ioport bars when not needed */ 960 /* do not allocate ioport bars when not needed */
960 need_ioport = e1000_is_need_ioport(pdev); 961 need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
1259 iounmap(hw->ce4100_gbe_mdio_base_virt); 1260 iounmap(hw->ce4100_gbe_mdio_base_virt);
1260 iounmap(hw->hw_addr); 1261 iounmap(hw->hw_addr);
1261err_ioremap: 1262err_ioremap:
1263 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1262 free_netdev(netdev); 1264 free_netdev(netdev);
1263err_alloc_etherdev: 1265err_alloc_etherdev:
1264 pci_release_selected_regions(pdev, bars); 1266 pci_release_selected_regions(pdev, bars);
1265err_pci_reg: 1267err_pci_reg:
1266 pci_disable_device(pdev); 1268 if (!adapter || disable_dev)
1269 pci_disable_device(pdev);
1267 return err; 1270 return err;
1268} 1271}
1269 1272
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
1281 struct net_device *netdev = pci_get_drvdata(pdev); 1284 struct net_device *netdev = pci_get_drvdata(pdev);
1282 struct e1000_adapter *adapter = netdev_priv(netdev); 1285 struct e1000_adapter *adapter = netdev_priv(netdev);
1283 struct e1000_hw *hw = &adapter->hw; 1286 struct e1000_hw *hw = &adapter->hw;
1287 bool disable_dev;
1284 1288
1285 e1000_down_and_stop(adapter); 1289 e1000_down_and_stop(adapter);
1286 e1000_release_manageability(adapter); 1290 e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
1299 iounmap(hw->flash_address); 1303 iounmap(hw->flash_address);
1300 pci_release_selected_regions(pdev, adapter->bars); 1304 pci_release_selected_regions(pdev, adapter->bars);
1301 1305
1306 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1302 free_netdev(netdev); 1307 free_netdev(netdev);
1303 1308
1304 pci_disable_device(pdev); 1309 if (disable_dev)
1310 pci_disable_device(pdev);
1305} 1311}
1306 1312
1307/** 1313/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5156 if (netif_running(netdev)) 5162 if (netif_running(netdev))
5157 e1000_free_irq(adapter); 5163 e1000_free_irq(adapter);
5158 5164
5159 pci_disable_device(pdev); 5165 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5166 pci_disable_device(pdev);
5160 5167
5161 return 0; 5168 return 0;
5162} 5169}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
5200 pr_err("Cannot enable PCI device from suspend\n"); 5207 pr_err("Cannot enable PCI device from suspend\n");
5201 return err; 5208 return err;
5202 } 5209 }
5210
5211 /* flush memory to make sure state is correct */
5212 smp_mb__before_atomic();
5213 clear_bit(__E1000_DISABLED, &adapter->flags);
5203 pci_set_master(pdev); 5214 pci_set_master(pdev);
5204 5215
5205 pci_enable_wake(pdev, PCI_D3hot, 0); 5216 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5274 5285
5275 if (netif_running(netdev)) 5286 if (netif_running(netdev))
5276 e1000_down(adapter); 5287 e1000_down(adapter);
5277 pci_disable_device(pdev); 5288
5289 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5290 pci_disable_device(pdev);
5278 5291
5279 /* Request a slot slot reset. */ 5292 /* Request a slot slot reset. */
5280 return PCI_ERS_RESULT_NEED_RESET; 5293 return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5302 pr_err("Cannot re-enable PCI device after reset.\n"); 5315 pr_err("Cannot re-enable PCI device after reset.\n");
5303 return PCI_ERS_RESULT_DISCONNECT; 5316 return PCI_ERS_RESULT_DISCONNECT;
5304 } 5317 }
5318
5319 /* flush memory to make sure state is correct */
5320 smp_mb__before_atomic();
5321 clear_bit(__E1000_DISABLED, &adapter->flags);
5305 pci_set_master(pdev); 5322 pci_set_master(pdev);
5306 5323
5307 pci_enable_wake(pdev, PCI_D3hot, 0); 5324 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1370 **/ 1373 **/
1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1372{ 1375{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1382 * Change or Rx Sequence Error interrupt. 1385 * Change or Rx Sequence Error interrupt.
1383 */ 1386 */
1384 if (!mac->get_link_status) 1387 if (!mac->get_link_status)
1385 return 0; 1388 return 1;
1386 1389
1387 /* First we want to see if the MII Status Register reports 1390 /* First we want to see if the MII Status Register reports
1388 * link. If so, then we want to get the current speed/duplex 1391 * link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1613 * different link partner. 1616 * different link partner.
1614 */ 1617 */
1615 ret_val = e1000e_config_fc_after_link_up(hw); 1618 ret_val = e1000e_config_fc_after_link_up(hw);
1616 if (ret_val) 1619 if (ret_val) {
1617 e_dbg("Error configuring flow control\n"); 1620 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1618 1623
1619 return ret_val; 1624 return 1;
1620} 1625}
1621 1626
1622static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..42dcaefc4c19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1573 else 1573 else
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1575 1575
1576 /* Copy the address first, so that we avoid a possible race with
1577 * .set_rx_mode(). If we copy after changing the address in the filter
1578 * list, we might open ourselves to a narrow race window where
1579 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1580 * from passing.
1581 */
1582 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1583
1576 spin_lock_bh(&vsi->mac_filter_hash_lock); 1584 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr); 1585 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data); 1586 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1587 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) { 1588 if (vsi->type == I40E_VSI_MAIN) {
1582 i40e_status ret; 1589 i40e_status ret;
1583 1590
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev); 1930 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi; 1931 struct i40e_vsi *vsi = np->vsi;
1925 1932
1933 /* Under some circumstances, we might receive a request to delete
1934 * our own device address from our uc list. Because we store the
1935 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1936 * such requests and not delete our device address from this list.
1937 */
1938 if (ether_addr_equal(addr, netdev->dev_addr))
1939 return 0;
1940
1926 i40e_del_mac_filter(vsi, addr); 1941 i40e_del_mac_filter(vsi, addr);
1927 1942
1928 return 0; 1943 return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6038 /* Set Bit 7 to be valid */ 6053 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID; 6054 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6040 6055
6041 /* Set L4type to both TCP and UDP support */ 6056 /* Set L4type for TCP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; 6057 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6043 6058
6044 /* Set cloud filter mode */ 6059 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; 6060 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6969 is_valid_ether_addr(filter->src_mac)) || 6984 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) && 6985 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac))) 6986 is_multicast_ether_addr(filter->src_mac)))
6972 return -EINVAL; 6987 return -EOPNOTSUPP;
6973 6988
6974 /* Make sure port is specified, otherwise bail out, for channel 6989 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
6975 * specific cloud filter needs 'L4 port' to be non-zero 6990 * ports are not supported via big buffer now.
6976 */ 6991 */
6977 if (!filter->dst_port) 6992 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
6978 return -EINVAL; 6993 return -EOPNOTSUPP;
6979 6994
6980 /* adding filter using src_port/src_ip is not supported at this stage */ 6995 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 || 6996 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6)) 6997 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6983 return -EINVAL; 6998 return -EOPNOTSUPP;
6984 6999
6985 /* copy element needed to add cloud filter from filter */ 7000 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element); 7001 i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6991 is_multicast_ether_addr(filter->src_mac)) { 7006 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */ 7007 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4) 7008 if (filter->dst_ipv4)
6994 return -EINVAL; 7009 return -EOPNOTSUPP;
6995 7010
6996 /* since we validated that L4 port must be valid before 7011 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value 7012 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7356 7371
7357 if (tc < 0) { 7372 if (tc < 0) {
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); 7373 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7359 return -EINVAL; 7374 return -EOPNOTSUPP;
7360 } 7375 }
7361 7376
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 7377 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..5bc2748ac468 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3047 /* Walk through fragments adding latest fragment, testing it, and 3047 /* Walk through fragments adding latest fragment, testing it, and
3048 * then removing stale fragments from the sum. 3048 * then removing stale fragments from the sum.
3049 */ 3049 */
3050 stale = &skb_shinfo(skb)->frags[0]; 3050 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3051 for (;;) { 3051 int stale_size = skb_frag_size(stale);
3052
3052 sum += skb_frag_size(frag++); 3053 sum += skb_frag_size(frag++);
3053 3054
3055 /* The stale fragment may present us with a smaller
3056 * descriptor than the actual fragment size. To account
3057 * for that we need to remove all the data on the front and
3058 * figure out what the remainder would be in the last
3059 * descriptor associated with the fragment.
3060 */
3061 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3062 int align_pad = -(stale->page_offset) &
3063 (I40E_MAX_READ_REQ_SIZE - 1);
3064
3065 sum -= align_pad;
3066 stale_size -= align_pad;
3067
3068 do {
3069 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3070 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3071 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3072 }
3073
3054 /* if sum is negative we failed to make sufficient progress */ 3074 /* if sum is negative we failed to make sufficient progress */
3055 if (sum < 0) 3075 if (sum < 0)
3056 return true; 3076 return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
3058 if (!nr_frags--) 3078 if (!nr_frags--)
3059 break; 3079 break;
3060 3080
3061 sum -= skb_frag_size(stale++); 3081 sum -= stale_size;
3062 } 3082 }
3063 3083
3064 return false; 3084 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..1ba29bb85b67 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2012 /* Walk through fragments adding latest fragment, testing it, and 2012 /* Walk through fragments adding latest fragment, testing it, and
2013 * then removing stale fragments from the sum. 2013 * then removing stale fragments from the sum.
2014 */ 2014 */
2015 stale = &skb_shinfo(skb)->frags[0]; 2015 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2016 for (;;) { 2016 int stale_size = skb_frag_size(stale);
2017
2017 sum += skb_frag_size(frag++); 2018 sum += skb_frag_size(frag++);
2018 2019
2020 /* The stale fragment may present us with a smaller
2021 * descriptor than the actual fragment size. To account
2022 * for that we need to remove all the data on the front and
2023 * figure out what the remainder would be in the last
2024 * descriptor associated with the fragment.
2025 */
2026 if (stale_size > I40E_MAX_DATA_PER_TXD) {
2027 int align_pad = -(stale->page_offset) &
2028 (I40E_MAX_READ_REQ_SIZE - 1);
2029
2030 sum -= align_pad;
2031 stale_size -= align_pad;
2032
2033 do {
2034 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2035 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
2036 } while (stale_size > I40E_MAX_DATA_PER_TXD);
2037 }
2038
2019 /* if sum is negative we failed to make sufficient progress */ 2039 /* if sum is negative we failed to make sufficient progress */
2020 if (sum < 0) 2040 if (sum < 0)
2021 return true; 2041 return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
2023 if (!nr_frags--) 2043 if (!nr_frags--)
2024 break; 2044 break;
2025 2045
2026 sum -= skb_frag_size(stale++); 2046 sum -= stale_size;
2027 } 2047 }
2028 2048
2029 return false; 2049 return false;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..6ef20e5cc77d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1643 return 0; 1643 return 0;
1644 } 1644 }
1645 1645
1646 wmb(); /* reset needs to be written before we read control register */ 1646 /* Reset needs to be written before we read control register, and
1647 * we must wait for the HW to become responsive once again
1648 */
1649 wmb();
1650 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1651
1647 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1652 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648 do { 1653 do {
1649 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1654 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
59#define MLXSW_PCI_SW_RESET 0xF0010 59#define MLXSW_PCI_SW_RESET 0xF0010
60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
62#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
62#define MLXSW_PCI_FW_READY 0xA1844 63#define MLXSW_PCI_FW_READY 0xA1844
63#define MLXSW_PCI_FW_READY_MASK 0xFFFF 64#define MLXSW_PCI_FW_READY_MASK 0xFFFF
64#define MLXSW_PCI_FW_READY_MAGIC 0x5E 65#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bd8d28de152..c3837ca7a705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4376,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4376 } 4376 }
4377 if (!info->linking) 4377 if (!info->linking)
4378 break; 4378 break;
4379 if (netdev_has_any_upper_dev(upper_dev)) { 4379 if (netdev_has_any_upper_dev(upper_dev) &&
4380 (!netif_is_bridge_master(upper_dev) ||
4381 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4382 upper_dev))) {
4380 NL_SET_ERR_MSG(extack, 4383 NL_SET_ERR_MSG(extack,
4381 "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4384 "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4382 return -EINVAL; 4385 return -EINVAL;
@@ -4504,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4504 u16 vid) 4507 u16 vid)
4505{ 4508{
4506 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4509 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4507 struct netdev_notifier_changeupper_info *info = ptr; 4511 struct netdev_notifier_changeupper_info *info = ptr;
4508 struct netlink_ext_ack *extack; 4512 struct netlink_ext_ack *extack;
4509 struct net_device *upper_dev; 4513 struct net_device *upper_dev;
@@ -4520,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4520 } 4524 }
4521 if (!info->linking) 4525 if (!info->linking)
4522 break; 4526 break;
4523 if (netdev_has_any_upper_dev(upper_dev)) { 4527 if (netdev_has_any_upper_dev(upper_dev) &&
4528 (!netif_is_bridge_master(upper_dev) ||
4529 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4530 upper_dev))) {
4524 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); 4531 NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4525 return -EINVAL; 4532 return -EINVAL;
4526 } 4533 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..05ce1befd9b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 365void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
366 struct net_device *brport_dev, 366 struct net_device *brport_dev,
367 struct net_device *br_dev); 367 struct net_device *br_dev);
368bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
369 const struct net_device *br_dev);
368 370
369/* spectrum.c */ 371/* spectrum.c */
370int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 372int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..b5397da94d7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
46 int tclass_num, u32 min, u32 max, 46 int tclass_num, u32 min, u32 max,
47 u32 probability, bool is_ecn) 47 u32 probability, bool is_ecn)
48{ 48{
49 char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 49 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
50 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
50 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
51 int err; 52 int err;
52 53
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (err) 61 if (err)
61 return err; 62 return err;
62 63
63 mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
64 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
65 66
66 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
67} 68}
68 69
69static int 70static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index be657b8533f0..434b3922b34f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3228{ 3228{
3229 if (!removing) 3229 if (!removing)
3230 nh->should_offload = 1; 3230 nh->should_offload = 1;
3231 else if (nh->offloaded) 3231 else
3232 nh->should_offload = 0; 3232 nh->should_offload = 0;
3233 nh->update = 1; 3233 nh->update = 1;
3234} 3234}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
152 return NULL; 152 return NULL;
153} 153}
154 154
155bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
156 const struct net_device *br_dev)
157{
158 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
159}
160
155static struct mlxsw_sp_bridge_device * 161static struct mlxsw_sp_bridge_device *
156mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 162mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
157 struct net_device *br_dev) 163 struct net_device *br_dev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..99b0487b6d82 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
568 return err; 568 return err;
569 } 569 }
570 nn_writeb(nn, ctrl_offset, entry->entry); 570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
571 572
572 return 0; 573 return 0;
573} 574}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
582 unsigned int vector_idx) 583 unsigned int vector_idx)
583{ 584{
584 nn_writeb(nn, ctrl_offset, 0xff); 585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
585 free_irq(nn->irq_entries[vector_idx].vector, nn); 587 free_irq(nn->irq_entries[vector_idx].vector, nn);
586} 588}
587 589
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 75323000c364..b9e2846589f8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
147 [FWNLCR0] = 0x0090, 147 [FWNLCR0] = 0x0090,
148 [FWALCR0] = 0x0094, 148 [FWALCR0] = 0x0094,
149 [TXNLCR1] = 0x00a0, 149 [TXNLCR1] = 0x00a0,
150 [TXALCR1] = 0x00a0, 150 [TXALCR1] = 0x00a4,
151 [RXNLCR1] = 0x00a8, 151 [RXNLCR1] = 0x00a8,
152 [RXALCR1] = 0x00ac, 152 [RXALCR1] = 0x00ac,
153 [FWNLCR1] = 0x00b0, 153 [FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
399 [FWNLCR0] = 0x0090, 399 [FWNLCR0] = 0x0090,
400 [FWALCR0] = 0x0094, 400 [FWALCR0] = 0x0094,
401 [TXNLCR1] = 0x00a0, 401 [TXNLCR1] = 0x00a0,
402 [TXALCR1] = 0x00a0, 402 [TXALCR1] = 0x00a4,
403 [RXNLCR1] = 0x00a8, 403 [RXNLCR1] = 0x00a8,
404 [RXALCR1] = 0x00ac, 404 [RXALCR1] = 0x00ac,
405 [FWNLCR1] = 0x00b0, 405 [FWNLCR1] = 0x00b0,
@@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3225 /* ioremap the TSU registers */ 3225 /* ioremap the TSU registers */
3226 if (mdp->cd->tsu) { 3226 if (mdp->cd->tsu) {
3227 struct resource *rtsu; 3227 struct resource *rtsu;
3228
3228 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 3229 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3229 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 3230 if (!rtsu) {
3230 if (IS_ERR(mdp->tsu_addr)) { 3231 dev_err(&pdev->dev, "no TSU resource\n");
3231 ret = PTR_ERR(mdp->tsu_addr); 3232 ret = -ENODEV;
3233 goto out_release;
3234 }
3235 /* We can only request the TSU region for the first port
3236 * of the two sharing this TSU for the probe to succeed...
3237 */
3238 if (devno % 2 == 0 &&
3239 !devm_request_mem_region(&pdev->dev, rtsu->start,
3240 resource_size(rtsu),
3241 dev_name(&pdev->dev))) {
3242 dev_err(&pdev->dev, "can't request TSU resource.\n");
3243 ret = -EBUSY;
3244 goto out_release;
3245 }
3246 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3247 resource_size(rtsu));
3248 if (!mdp->tsu_addr) {
3249 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3250 ret = -ENOMEM;
3232 goto out_release; 3251 goto out_release;
3233 } 3252 }
3234 mdp->port = devno % 2; 3253 mdp->port = devno % 2;
3235 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 3254 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3236 } 3255 }
3237 3256
3238 /* initialize first or needed device */ 3257 /* Need to init only the first port of the two sharing a TSU */
3239 if (!devno || pd->needs_init) { 3258 if (devno % 2 == 0) {
3240 if (mdp->cd->chip_reset) 3259 if (mdp->cd->chip_reset)
3241 mdp->cd->chip_reset(ndev); 3260 mdp->cd->chip_reset(ndev);
3242 3261
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 337d53d12e94..c0af0bc4e714 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
364bool stmmac_eee_init(struct stmmac_priv *priv) 364bool stmmac_eee_init(struct stmmac_priv *priv)
365{ 365{
366 struct net_device *ndev = priv->dev; 366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
367 unsigned long flags; 368 unsigned long flags;
368 bool ret = false; 369 bool ret = false;
369 370
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
375
370 /* Using PCS we cannot dial with the phy registers at this stage 376 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE. 377 * so we do not support extra feature like EEE.
372 */ 378 */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..0a48b3073d3d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
825 if (IS_ERR(rt)) 825 if (IS_ERR(rt))
826 return PTR_ERR(rt); 826 return PTR_ERR(rt);
827 827
828 if (skb_dst(skb)) {
829 int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
830 GENEVE_BASE_HLEN - info->options_len - 14;
831
832 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
833 }
834
828 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 835 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
829 if (geneve->collect_md) { 836 if (geneve->collect_md) {
830 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 837 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
864 if (IS_ERR(dst)) 871 if (IS_ERR(dst))
865 return PTR_ERR(dst); 872 return PTR_ERR(dst);
866 873
874 if (skb_dst(skb)) {
875 int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
876 GENEVE_BASE_HLEN - info->options_len - 14;
877
878 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
879 }
880
867 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 881 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
868 if (geneve->collect_md) { 882 if (geneve->collect_md) {
869 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 883 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1444 return 0; 1444 return 0;
1445 1445
1446unregister_netdev: 1446unregister_netdev:
1447 /* macvlan_uninit would free the macvlan port */
1447 unregister_netdevice(dev); 1448 unregister_netdevice(dev);
1449 return err;
1448destroy_macvlan_port: 1450destroy_macvlan_port:
1449 if (create) 1451 /* the macvlan port may be freed by macvlan_uninit when fail to register.
1452 * so we destroy the macvlan port only when it's valid.
1453 */
1454 if (create && macvlan_port_get_rtnl(dev))
1450 macvlan_port_destroy(port->dev); 1455 macvlan_port_destroy(port->dev);
1451 return err; 1456 return err;
1452} 1457}
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
118 118
119 data->regulator = devm_regulator_get(&pdev->dev, "phy"); 119 data->regulator = devm_regulator_get(&pdev->dev, "phy");
120 if (IS_ERR(data->regulator)) { 120 if (IS_ERR(data->regulator)) {
121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 121 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
122 return -EPROBE_DEFER; 122 ret = -EPROBE_DEFER;
123 goto err_out_free_mdiobus;
124 }
123 125
124 dev_info(&pdev->dev, "no regulator found\n"); 126 dev_info(&pdev->dev, "no regulator found\n");
125 data->regulator = NULL; 127 data->regulator = NULL;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 827f3f92560e..249ce5cbea22 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1296,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1296 switch (cmd) { 1296 switch (cmd) {
1297 case SIOCGMIIPHY: 1297 case SIOCGMIIPHY:
1298 mii->phy_id = pl->phydev->mdio.addr; 1298 mii->phy_id = pl->phydev->mdio.addr;
1299 /* fall through */
1299 1300
1300 case SIOCGMIIREG: 1301 case SIOCGMIIREG:
1301 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); 1302 ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1318,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
1318 switch (cmd) { 1319 switch (cmd) {
1319 case SIOCGMIIPHY: 1320 case SIOCGMIIPHY:
1320 mii->phy_id = 0; 1321 mii->phy_id = 0;
1322 /* fall through */
1321 1323
1322 case SIOCGMIIREG: 1324 case SIOCGMIIREG:
1323 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); 1325 ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1429,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
1429 WARN_ON(!lockdep_rtnl_is_held()); 1431 WARN_ON(!lockdep_rtnl_is_held());
1430 1432
1431 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1433 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
1434 queue_work(system_power_efficient_wq, &pl->resolve);
1432 flush_work(&pl->resolve); 1435 flush_work(&pl->resolve);
1433
1434 netif_carrier_off(pl->netdev);
1435} 1436}
1436 1437
1437static void phylink_sfp_link_up(void *upstream) 1438static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..ab64a142b832 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
356void sfp_unregister_upstream(struct sfp_bus *bus) 356void sfp_unregister_upstream(struct sfp_bus *bus)
357{ 357{
358 rtnl_lock(); 358 rtnl_lock();
359 sfp_unregister_bus(bus); 359 if (bus->sfp)
360 sfp_unregister_bus(bus);
360 bus->upstream = NULL; 361 bus->upstream = NULL;
361 bus->netdev = NULL; 362 bus->netdev = NULL;
362 rtnl_unlock(); 363 rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
459void sfp_unregister_socket(struct sfp_bus *bus) 460void sfp_unregister_socket(struct sfp_bus *bus)
460{ 461{
461 rtnl_lock(); 462 rtnl_lock();
462 sfp_unregister_bus(bus); 463 if (bus->netdev)
464 sfp_unregister_bus(bus);
463 bus->sfp_dev = NULL; 465 bus->sfp_dev = NULL;
464 bus->sfp = NULL; 466 bus->sfp = NULL;
465 bus->socket_ops = NULL; 467 bus->socket_ops = NULL;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3000ddd1c7e2..728819feab44 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1103 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1103 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1104 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1104 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1105 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1105 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1106 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..987f1252a3cf 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
384 } 384 }
385 } 385 }
386 386
387 if (changed & IEEE80211_CONF_CHANGE_PS) {
388 list_for_each_entry(tmp, &wcn->vif_list, list) {
389 vif = wcn36xx_priv_to_vif(tmp);
390 if (hw->conf.flags & IEEE80211_CONF_PS) {
391 if (vif->bss_conf.ps) /* ps allowed ? */
392 wcn36xx_pmc_enter_bmps_state(wcn, vif);
393 } else {
394 wcn36xx_pmc_exit_bmps_state(wcn, vif);
395 }
396 }
397 }
398
387 mutex_unlock(&wcn->conf_mutex); 399 mutex_unlock(&wcn->conf_mutex);
388 400
389 return 0; 401 return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
747 vif_priv->dtim_period = bss_conf->dtim_period; 759 vif_priv->dtim_period = bss_conf->dtim_period;
748 } 760 }
749 761
750 if (changed & BSS_CHANGED_PS) {
751 wcn36xx_dbg(WCN36XX_DBG_MAC,
752 "mac bss PS set %d\n",
753 bss_conf->ps);
754 if (bss_conf->ps) {
755 wcn36xx_pmc_enter_bmps_state(wcn, vif);
756 } else {
757 wcn36xx_pmc_exit_bmps_state(wcn, vif);
758 }
759 }
760
761 if (changed & BSS_CHANGED_BSSID) { 762 if (changed & BSS_CHANGED_BSSID) {
762 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", 763 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
763 bss_conf->bssid); 764 bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
46 46
47 if (WCN36XX_BMPS != vif_priv->pw_state) { 47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); 48 /* Unbalanced call or last BMPS enter failed */
49 return -EINVAL; 49 wcn36xx_dbg(WCN36XX_DBG_PMC,
50 "Not in BMPS mode, no need to exit\n");
51 return -EALREADY;
50 } 52 }
51 wcn36xx_smd_exit_bmps(wcn, vif); 53 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER; 54 vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..403e65c309d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
670 return index & (q->n_window - 1); 670 return index & (q->n_window - 1);
671} 671}
672 672
673static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 673static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
674 struct iwl_txq *txq, int idx) 674 struct iwl_txq *txq, int idx)
675{ 675{
676 return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 idx); 677
678 if (trans->cfg->use_tfh)
679 idx = iwl_pcie_get_cmd_index(txq, idx);
680
681 return txq->tfds + trans_pcie->tfd_size * idx;
678} 682}
679 683
680static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 684static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
171 171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{ 173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
177 * idx is bounded by n_window 175 * idx is bounded by n_window
178 */ 176 */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
181 lockdep_assert_held(&txq->lock); 179 lockdep_assert_held(&txq->lock);
182 180
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx)); 182 iwl_pcie_get_tfd(trans, txq, idx));
185 183
186 /* free SKB */ 184 /* free SKB */
187 if (txq->entries) { 185 if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
364 struct sk_buff *skb, 362 struct sk_buff *skb,
365 struct iwl_cmd_meta *out_meta) 363 struct iwl_cmd_meta *out_meta)
366{ 364{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
369 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
370 struct iwl_tfh_tfd *tfd = 367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
371 iwl_pcie_get_tfd(trans_pcie, txq, idx);
372 dma_addr_t tb_phys; 368 dma_addr_t tb_phys;
373 bool amsdu; 369 bool amsdu;
374 int i, len, tb1_len, tb2_len, hdr_len; 370 int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
565 u8 group_id = iwl_cmd_groupid(cmd->id); 561 u8 group_id = iwl_cmd_groupid(cmd->id);
566 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 562 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
567 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 563 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
568 struct iwl_tfh_tfd *tfd = 564 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
569 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
570 565
571 memset(tfd, 0, sizeof(*tfd)); 566 memset(tfd, 0, sizeof(*tfd));
572 567
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373{ 373{
374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
375 int i, num_tbs; 375 int i, num_tbs;
376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 376 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
377 377
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2018 } 2018 }
2019 2019
2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2020 trace_iwlwifi_dev_tx(trans->dev, skb,
2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2021 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2022 trans_pcie->tfd_size, 2022 trans_pcie->tfd_size,
2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2024 hdr_len); 2024 hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2092 IEEE80211_CCMP_HDR_LEN : 0; 2092 IEEE80211_CCMP_HDR_LEN : 0;
2093 2093
2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2094 trace_iwlwifi_dev_tx(trans->dev, skb,
2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2095 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2096 trans_pcie->tfd_size, 2096 trans_pcie->tfd_size,
2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2098 2098
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2426 IWL_FIRST_TB_SIZE); 2426 IWL_FIRST_TB_SIZE);
2427 2427
2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2428 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2429 /* Set up entry for this TFD in Tx byte-count array */ 2429 /* Set up entry for this TFD in Tx byte-count array */
2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1326 1326
1327 netif_carrier_off(netdev); 1327 netif_carrier_off(netdev);
1328 1328
1329 xenbus_switch_state(dev, XenbusStateInitialising);
1329 return netdev; 1330 return netdev;
1330 1331
1331 exit: 1332 exit:
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index cf074b1ce219..10041ac4032c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -35,6 +35,7 @@
35#include "pmem.h" 35#include "pmem.h"
36#include "pfn.h" 36#include "pfn.h"
37#include "nd.h" 37#include "nd.h"
38#include "nd-core.h"
38 39
39static struct device *to_dev(struct pmem_device *pmem) 40static struct device *to_dev(struct pmem_device *pmem)
40{ 41{
@@ -334,7 +335,8 @@ static int pmem_attach_disk(struct device *dev,
334 dev_warn(dev, "unable to guarantee persistence of writes\n"); 335 dev_warn(dev, "unable to guarantee persistence of writes\n");
335 fua = 0; 336 fua = 0;
336 } 337 }
337 wbc = nvdimm_has_cache(nd_region); 338 wbc = nvdimm_has_cache(nd_region) &&
339 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
338 340
339 if (!devm_request_mem_region(dev, res->start, resource_size(res), 341 if (!devm_request_mem_region(dev, res->start, resource_size(res),
340 dev_name(&ndns->dev))) { 342 dev_name(&ndns->dev))) {
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index abaf38c61220..e6d01911e092 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -528,6 +528,18 @@ static ssize_t resource_show(struct device *dev,
528} 528}
529static DEVICE_ATTR_RO(resource); 529static DEVICE_ATTR_RO(resource);
530 530
531static ssize_t persistence_domain_show(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 struct nd_region *nd_region = to_nd_region(dev);
535 unsigned long flags = nd_region->flags;
536
537 return sprintf(buf, "%s%s\n",
538 flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "",
539 flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : "");
540}
541static DEVICE_ATTR_RO(persistence_domain);
542
531static struct attribute *nd_region_attributes[] = { 543static struct attribute *nd_region_attributes[] = {
532 &dev_attr_size.attr, 544 &dev_attr_size.attr,
533 &dev_attr_nstype.attr, 545 &dev_attr_nstype.attr,
@@ -543,6 +555,7 @@ static struct attribute *nd_region_attributes[] = {
543 &dev_attr_init_namespaces.attr, 555 &dev_attr_init_namespaces.attr,
544 &dev_attr_badblocks.attr, 556 &dev_attr_badblocks.attr,
545 &dev_attr_resource.attr, 557 &dev_attr_resource.attr,
558 &dev_attr_persistence_domain.attr,
546 NULL, 559 NULL,
547}; 560};
548 561
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1e46e60b8f10..839650e0926a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
1335 struct nvme_ns *ns, struct nvme_id_ns *id) 1335 struct nvme_ns *ns, struct nvme_id_ns *id)
1336{ 1336{
1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
1338 unsigned short bs = 1 << ns->lba_shift;
1338 unsigned stream_alignment = 0; 1339 unsigned stream_alignment = 0;
1339 1340
1340 if (ns->ctrl->nr_streams && ns->sws && ns->sgs) 1341 if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
1343 blk_mq_freeze_queue(disk->queue); 1344 blk_mq_freeze_queue(disk->queue);
1344 blk_integrity_unregister(disk); 1345 blk_integrity_unregister(disk);
1345 1346
1346 blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift); 1347 blk_queue_logical_block_size(disk->queue, bs);
1348 blk_queue_physical_block_size(disk->queue, bs);
1349 blk_queue_io_min(disk->queue, bs);
1350
1347 if (ns->ms && !ns->ext && 1351 if (ns->ms && !ns->ext &&
1348 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1352 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1349 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1353 nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -2987,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2987 mutex_unlock(&ns->ctrl->namespaces_mutex); 2991 mutex_unlock(&ns->ctrl->namespaces_mutex);
2988 2992
2989 synchronize_srcu(&ns->head->srcu); 2993 synchronize_srcu(&ns->head->srcu);
2994 nvme_mpath_check_last_path(ns);
2990 nvme_put_ns(ns); 2995 nvme_put_ns(ns);
2991} 2996}
2992 2997
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6816a0..894c2ccb3891 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
74 return NULL; 74 return NULL;
75 75
76 kref_init(&host->ref); 76 kref_init(&host->ref);
77 uuid_gen(&host->id);
77 snprintf(host->nqn, NVMF_NQN_SIZE, 78 snprintf(host->nqn, NVMF_NQN_SIZE,
78 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); 79 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
79 80
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..a00eabd06427 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
417 rcu_assign_pointer(head->current_path, NULL); 417 rcu_assign_pointer(head->current_path, NULL);
418} 418}
419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 419struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
420
421static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
422{
423 struct nvme_ns_head *head = ns->head;
424
425 if (head->disk && list_empty(&head->list))
426 kblockd_schedule_work(&head->requeue_work);
427}
428
420#else 429#else
421static inline void nvme_failover_req(struct request *req) 430static inline void nvme_failover_req(struct request *req)
422{ 431{
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
448static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 457static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
449{ 458{
450} 459}
460static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
461{
462}
451#endif /* CONFIG_NVME_MULTIPATH */ 463#endif /* CONFIG_NVME_MULTIPATH */
452 464
453#ifdef CONFIG_NVM 465#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c9082..d53550e612bc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req)
448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 448 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
449} 449}
450 450
451static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
452{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
454 unsigned int avg_seg_size;
455
456 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
457 blk_rq_nr_phys_segments(req));
458
459 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
460 return false;
461 if (!iod->nvmeq->qid)
462 return false;
463 if (!sgl_threshold || avg_seg_size < sgl_threshold)
464 return false;
465 return true;
466}
467
451static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) 468static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
452{ 469{
453 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 470 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
454 int nseg = blk_rq_nr_phys_segments(rq); 471 int nseg = blk_rq_nr_phys_segments(rq);
455 unsigned int size = blk_rq_payload_bytes(rq); 472 unsigned int size = blk_rq_payload_bytes(rq);
456 473
474 iod->use_sgl = nvme_pci_use_sgls(dev, rq);
475
457 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 476 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
458 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, 477 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
459 iod->use_sgl); 478 iod->use_sgl);
@@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
604 dma_addr_t prp_dma; 623 dma_addr_t prp_dma;
605 int nprps, i; 624 int nprps, i;
606 625
607 iod->use_sgl = false;
608
609 length -= (page_size - offset); 626 length -= (page_size - offset);
610 if (length <= 0) { 627 if (length <= 0) {
611 iod->first_dma = 0; 628 iod->first_dma = 0;
@@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
715 int entries = iod->nents, i = 0; 732 int entries = iod->nents, i = 0;
716 dma_addr_t sgl_dma; 733 dma_addr_t sgl_dma;
717 734
718 iod->use_sgl = true;
719
720 /* setting the transfer type as SGL */ 735 /* setting the transfer type as SGL */
721 cmd->flags = NVME_CMD_SGL_METABUF; 736 cmd->flags = NVME_CMD_SGL_METABUF;
722 737
@@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
770 return BLK_STS_OK; 785 return BLK_STS_OK;
771} 786}
772 787
773static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
774{
775 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
776 unsigned int avg_seg_size;
777
778 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
779 blk_rq_nr_phys_segments(req));
780
781 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
782 return false;
783 if (!iod->nvmeq->qid)
784 return false;
785 if (!sgl_threshold || avg_seg_size < sgl_threshold)
786 return false;
787 return true;
788}
789
790static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 788static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
791 struct nvme_command *cmnd) 789 struct nvme_command *cmnd)
792{ 790{
@@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
806 DMA_ATTR_NO_WARN)) 804 DMA_ATTR_NO_WARN))
807 goto out; 805 goto out;
808 806
809 if (nvme_pci_use_sgls(dev, req)) 807 if (iod->use_sgl)
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 808 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
811 else 809 else
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 810 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af56596be6..2a0bba7f50cf 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 974 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
975 nvme_start_queues(&ctrl->ctrl); 975 nvme_start_queues(&ctrl->ctrl);
976 976
977 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
978 /* state change failure should never happen */
979 WARN_ON_ONCE(1);
980 return;
981 }
982
977 nvme_rdma_reconnect_or_remove(ctrl); 983 nvme_rdma_reconnect_or_remove(ctrl);
978} 984}
979 985
980static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 986static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
981{ 987{
982 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) 988 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
983 return; 989 return;
984 990
985 queue_work(nvme_wq, &ctrl->err_work); 991 queue_work(nvme_wq, &ctrl->err_work);
@@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1753 nvme_stop_ctrl(&ctrl->ctrl); 1759 nvme_stop_ctrl(&ctrl->ctrl);
1754 nvme_rdma_shutdown_ctrl(ctrl, false); 1760 nvme_rdma_shutdown_ctrl(ctrl, false);
1755 1761
1762 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1763 /* state change failure should never happen */
1764 WARN_ON_ONCE(1);
1765 return;
1766 }
1767
1756 ret = nvme_rdma_configure_admin_queue(ctrl, false); 1768 ret = nvme_rdma_configure_admin_queue(ctrl, false);
1757 if (ret) 1769 if (ret)
1758 goto out_fail; 1770 goto out_fail;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9de55ab..6a018a0bd6ce 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1085 const char *buf, size_t count) 1085 const char *buf, size_t count)
1086{ 1086{
1087 struct fcloop_nport *nport = NULL, *tmpport; 1087 struct fcloop_nport *nport = NULL, *tmpport;
1088 struct fcloop_tport *tport; 1088 struct fcloop_tport *tport = NULL;
1089 u64 nodename, portname; 1089 u64 nodename, portname;
1090 unsigned long flags; 1090 unsigned long flags;
1091 int ret; 1091 int ret;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 3481e69738b5..a327be1d264b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -231,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
231 rc = of_mdiobus_register_phy(mdio, child, addr); 231 rc = of_mdiobus_register_phy(mdio, child, addr);
232 else 232 else
233 rc = of_mdiobus_register_device(mdio, child, addr); 233 rc = of_mdiobus_register_device(mdio, child, addr);
234 if (rc) 234
235 if (rc == -ENODEV)
236 dev_err(&mdio->dev,
237 "MDIO device at address %d is missing.\n",
238 addr);
239 else if (rc)
235 goto unregister; 240 goto unregister;
236 } 241 }
237 242
@@ -255,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
255 260
256 if (of_mdiobus_child_is_phy(child)) { 261 if (of_mdiobus_child_is_phy(child)) {
257 rc = of_mdiobus_register_phy(mdio, child, addr); 262 rc = of_mdiobus_register_phy(mdio, child, addr);
258 if (rc) 263 if (rc && rc != -ENODEV)
259 goto unregister; 264 goto unregister;
260 } 265 }
261 } 266 }
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 303 struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 304 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
305 305
306 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 306 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
307 307
308 /* Clear the matching bit in the IMR register */ 308 /* Clear the matching bit in the IMR register */
309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 309 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 316 int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
317 u32 tmp; 317 u32 tmp;
318 318
319 DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 319 DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
320 320
321 /* 321 /*
322 ** clear pending IRQ bits 322 ** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
396 if (mask) { 396 if (mask) {
397 if (--ilr_loop > 0) 397 if (--ilr_loop > 0)
398 goto ilr_again; 398 goto ilr_again;
399 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", 399 printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
400 dino_dev->hba.base_addr, mask); 400 dino_dev->hba.base_addr, mask);
401 return IRQ_NONE; 401 return IRQ_NONE;
402 } 402 }
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
553 struct pci_dev *dev; 553 struct pci_dev *dev;
554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); 554 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
555 555
556 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", 556 DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
557 __func__, bus, bus->busn_res.start, 557 __func__, bus, bus->busn_res.start,
558 bus->bridge->platform_data); 558 bus->bridge->platform_data);
559 559
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */ 854 res->flags = IORESOURCE_IO; /* do not mark it busy ! */
855 if (request_resource(&ioport_resource, res) < 0) { 855 if (request_resource(&ioport_resource, res) < 0) {
856 printk(KERN_ERR "%s: request I/O Port region failed " 856 printk(KERN_ERR "%s: request I/O Port region failed "
857 "0x%lx/%lx (hpa 0x%p)\n", 857 "0x%lx/%lx (hpa 0x%px)\n",
858 name, (unsigned long)res->start, (unsigned long)res->end, 858 name, (unsigned long)res->start, (unsigned long)res->end,
859 dino_dev->hba.base_addr); 859 dino_dev->hba.base_addr);
860 return 1; 860 return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
106 return retval; 106 return retval;
107 } 107 }
108 108
109 printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); 109 printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
110 return 0; 110 return 0;
111} 111}
112 112
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
1458 class_unregister(&wmi_bus_class); 1458 class_unregister(&wmi_bus_class);
1459} 1459}
1460 1460
1461subsys_initcall(acpi_wmi_init); 1461subsys_initcall_sync(acpi_wmi_init);
1462module_exit(acpi_wmi_exit); 1462module_exit(acpi_wmi_exit);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606e0df8..ee14d8e45c97 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2803 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2803 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2804 } 2804 }
2805 2805
2806
2807 /*
2808 * For path verification work we need to stick with the path that was
2809 * originally chosen so that the per path configuration data is
2810 * assigned correctly.
2811 */
2812 if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
2813 erp->lpm = cqr->lpm;
2814 }
2815
2806 if (device->features & DASD_FEATURE_ERPLOG) { 2816 if (device->features & DASD_FEATURE_ERPLOG) {
2807 /* print current erp_chain */ 2817 /* print current erp_chain */
2808 dev_err(&device->cdev->dev, 2818 dev_err(&device->cdev->dev,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba15a53..614b44e70a28 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
17CFLAGS_sclp_early_core.o += -march=z900 17CFLAGS_sclp_early_core.o += -march=z900
18endif 18endif
19 19
20CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
21
20obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 22obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
21 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ 23 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
22 sclp_early.o sclp_early_core.o 24 sclp_early.o sclp_early_core.o
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df14c9d..372ce9913e6d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
765 break; 765 break;
766 case ASHMEM_SET_SIZE: 766 case ASHMEM_SET_SIZE:
767 ret = -EINVAL; 767 ret = -EINVAL;
768 mutex_lock(&ashmem_mutex);
768 if (!asma->file) { 769 if (!asma->file) {
769 ret = 0; 770 ret = 0;
770 asma->size = (size_t)arg; 771 asma->size = (size_t)arg;
771 } 772 }
773 mutex_unlock(&ashmem_mutex);
772 break; 774 break;
773 case ASHMEM_GET_SIZE: 775 case ASHMEM_GET_SIZE:
774 ret = asma->size; 776 ret = asma->size;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7dec2f5..1b3efb14aec7 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1147 1147
1148 udc = kzalloc(sizeof(*udc), GFP_KERNEL); 1148 udc = kzalloc(sizeof(*udc), GFP_KERNEL);
1149 if (!udc) 1149 if (!udc)
1150 goto err1; 1150 goto err_put_gadget;
1151
1152 ret = device_add(&gadget->dev);
1153 if (ret)
1154 goto err2;
1155 1151
1156 device_initialize(&udc->dev); 1152 device_initialize(&udc->dev);
1157 udc->dev.release = usb_udc_release; 1153 udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1160 udc->dev.parent = parent; 1156 udc->dev.parent = parent;
1161 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); 1157 ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
1162 if (ret) 1158 if (ret)
1163 goto err3; 1159 goto err_put_udc;
1160
1161 ret = device_add(&gadget->dev);
1162 if (ret)
1163 goto err_put_udc;
1164 1164
1165 udc->gadget = gadget; 1165 udc->gadget = gadget;
1166 gadget->udc = udc; 1166 gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1170 1170
1171 ret = device_add(&udc->dev); 1171 ret = device_add(&udc->dev);
1172 if (ret) 1172 if (ret)
1173 goto err4; 1173 goto err_unlist_udc;
1174 1174
1175 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); 1175 usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
1176 udc->vbus = true; 1176 udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1178 /* pick up one of pending gadget drivers */ 1178 /* pick up one of pending gadget drivers */
1179 ret = check_pending_gadget_drivers(udc); 1179 ret = check_pending_gadget_drivers(udc);
1180 if (ret) 1180 if (ret)
1181 goto err5; 1181 goto err_del_udc;
1182 1182
1183 mutex_unlock(&udc_lock); 1183 mutex_unlock(&udc_lock);
1184 1184
1185 return 0; 1185 return 0;
1186 1186
1187err5: 1187 err_del_udc:
1188 device_del(&udc->dev); 1188 device_del(&udc->dev);
1189 1189
1190err4: 1190 err_unlist_udc:
1191 list_del(&udc->list); 1191 list_del(&udc->list);
1192 mutex_unlock(&udc_lock); 1192 mutex_unlock(&udc_lock);
1193 1193
1194err3:
1195 put_device(&udc->dev);
1196 device_del(&gadget->dev); 1194 device_del(&gadget->dev);
1197 1195
1198err2: 1196 err_put_udc:
1199 kfree(udc); 1197 put_device(&udc->dev);
1200 1198
1201err1: 1199 err_put_gadget:
1202 put_device(&gadget->dev); 1200 put_device(&gadget->dev);
1203 return ret; 1201 return ret;
1204} 1202}
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf68b463..f723f7b8c9ac 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
279 if (gpio_is_valid(hub->gpio_reset)) { 279 if (gpio_is_valid(hub->gpio_reset)) {
280 err = devm_gpio_request_one(dev, hub->gpio_reset, 280 err = devm_gpio_request_one(dev, hub->gpio_reset,
281 GPIOF_OUT_INIT_LOW, "usb3503 reset"); 281 GPIOF_OUT_INIT_LOW, "usb3503 reset");
282 /* Datasheet defines a hardware reset to be at least 100us */
283 usleep_range(100, 10000);
282 if (err) { 284 if (err) {
283 dev_err(dev, 285 dev_err(dev,
284 "unable to request GPIO %d as reset pin (%d)\n", 286 "unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753ab99b..f932f40302df 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
1004 break; 1004 break;
1005 1005
1006 case MON_IOCQ_RING_SIZE: 1006 case MON_IOCQ_RING_SIZE:
1007 mutex_lock(&rp->fetch_lock);
1007 ret = rp->b_size; 1008 ret = rp->b_size;
1009 mutex_unlock(&rp->fetch_lock);
1008 break; 1010 break;
1009 1011
1010 case MON_IOCT_RING_SIZE: 1012 case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
1231 unsigned long offset, chunk_idx; 1233 unsigned long offset, chunk_idx;
1232 struct page *pageptr; 1234 struct page *pageptr;
1233 1235
1236 mutex_lock(&rp->fetch_lock);
1234 offset = vmf->pgoff << PAGE_SHIFT; 1237 offset = vmf->pgoff << PAGE_SHIFT;
1235 if (offset >= rp->b_size) 1238 if (offset >= rp->b_size) {
1239 mutex_unlock(&rp->fetch_lock);
1236 return VM_FAULT_SIGBUS; 1240 return VM_FAULT_SIGBUS;
1241 }
1237 chunk_idx = offset / CHUNK_SIZE; 1242 chunk_idx = offset / CHUNK_SIZE;
1238 pageptr = rp->b_vec[chunk_idx].pg; 1243 pageptr = rp->b_vec[chunk_idx].pg;
1239 get_page(pageptr); 1244 get_page(pageptr);
1245 mutex_unlock(&rp->fetch_lock);
1240 vmf->page = pageptr; 1246 vmf->page = pageptr;
1241 return 0; 1247 return 0;
1242} 1248}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273bf5beb..06d502b3e913 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ 124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
127 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
127 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 128 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
128 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 129 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
129 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 130 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
174 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 175 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
175 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 176 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
176 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 177 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
178 { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
177 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ 179 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
178 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 180 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
179 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 181 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index e6127fb21c12..a7d08ae0adad 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -143,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
143 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 143 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
144 US_FL_NO_ATA_1X), 144 US_FL_NO_ATA_1X),
145 145
146/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
147UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
148 "Norelsys",
149 "NS1068X",
150 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
151 US_FL_IGNORE_UAS),
152
146/* Reported-by: Takeo Nakayama <javhera@gmx.com> */ 153/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
147UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, 154UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
148 "JMicron", 155 "JMicron",
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 7b219d9109b4..ee2bbce24584 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", 91 dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
92 udev->devnum, udev->devpath, usb_speed_string(udev->speed)); 92 udev->devnum, udev->devpath, usb_speed_string(udev->speed));
93 93
94 pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); 94 pr_debug("tt hub ttport %d\n", udev->ttport);
95 95
96 dev_dbg(dev, " "); 96 dev_dbg(dev, " ");
97 for (i = 0; i < 16; i++) 97 for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
124 } 124 }
125 pr_debug("\n"); 125 pr_debug("\n");
126 126
127 dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); 127 dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
128 128 udev->bus->bus_name);
129 dev_dbg(dev,
130 "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
131 &udev->descriptor, udev->config,
132 udev->actconfig, udev->rawdescriptors);
133 129
134 dev_dbg(dev, "have_langid %d, string_langid %d\n", 130 dev_dbg(dev, "have_langid %d, string_langid %d\n",
135 udev->have_langid, udev->string_langid); 131 udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
237 233
238 dev = &urb->dev->dev; 234 dev = &urb->dev->dev;
239 235
240 dev_dbg(dev, " urb :%p\n", urb);
241 dev_dbg(dev, " dev :%p\n", urb->dev);
242
243 usbip_dump_usb_device(urb->dev); 236 usbip_dump_usb_device(urb->dev);
244 237
245 dev_dbg(dev, " pipe :%08x ", urb->pipe); 238 dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
248 241
249 dev_dbg(dev, " status :%d\n", urb->status); 242 dev_dbg(dev, " status :%d\n", urb->status);
250 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); 243 dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
251 dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
252 dev_dbg(dev, " transfer_buffer_length:%d\n", 244 dev_dbg(dev, " transfer_buffer_length:%d\n",
253 urb->transfer_buffer_length); 245 urb->transfer_buffer_length);
254 dev_dbg(dev, " actual_length :%d\n", urb->actual_length); 246 dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
255 dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
256 247
257 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) 248 if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
258 usbip_dump_usb_ctrlrequest( 249 usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
262 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); 253 dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
263 dev_dbg(dev, " interval :%d\n", urb->interval); 254 dev_dbg(dev, " interval :%d\n", urb->interval);
264 dev_dbg(dev, " error_count :%d\n", urb->error_count); 255 dev_dbg(dev, " error_count :%d\n", urb->error_count);
265 dev_dbg(dev, " context :%p\n", urb->context);
266 dev_dbg(dev, " complete :%p\n", urb->complete);
267} 256}
268EXPORT_SYMBOL_GPL(usbip_dump_urb); 257EXPORT_SYMBOL_GPL(usbip_dump_urb);
269 258
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e30989148..1e8a23d92cb4 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
120 urb_p->new = 1; 120 urb_p->new = 1;
121 urb_p->seqnum = pdu->base.seqnum; 121 urb_p->seqnum = pdu->base.seqnum;
122 122
123 if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
124 /* validate packet size and number of packets */
125 unsigned int maxp, packets, bytes;
126
127 maxp = usb_endpoint_maxp(urb_p->ep->desc);
128 maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
129 bytes = pdu->u.cmd_submit.transfer_buffer_length;
130 packets = DIV_ROUND_UP(bytes, maxp);
131
132 if (pdu->u.cmd_submit.number_of_packets < 0 ||
133 pdu->u.cmd_submit.number_of_packets > packets) {
134 dev_err(&udc->gadget.dev,
135 "CMD_SUBMIT: isoc invalid num packets %d\n",
136 pdu->u.cmd_submit.number_of_packets);
137 ret = -EMSGSIZE;
138 goto free_urbp;
139 }
140 }
141
123 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); 142 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
124 if (ret) { 143 if (ret) {
125 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); 144 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0919ec..3ccb17c3e840 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
85 memset(&pdu_header, 0, sizeof(pdu_header)); 85 memset(&pdu_header, 0, sizeof(pdu_header));
86 memset(&msg, 0, sizeof(msg)); 86 memset(&msg, 0, sizeof(msg));
87 87
88 if (urb->actual_length > 0 && !urb->transfer_buffer) {
89 dev_err(&udc->gadget.dev,
90 "urb: actual_length %d transfer_buffer null\n",
91 urb->actual_length);
92 return -1;
93 }
94
88 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) 95 if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
89 iovnum = 2 + urb->number_of_packets; 96 iovnum = 2 + urb->number_of_packets;
90 else 97 else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
100 107
101 /* 1. setup usbip_header */ 108 /* 1. setup usbip_header */
102 setup_ret_submit_pdu(&pdu_header, urb_p); 109 setup_ret_submit_pdu(&pdu_header, urb_p);
103 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 110 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
104 pdu_header.base.seqnum, urb); 111 pdu_header.base.seqnum);
105 usbip_header_correct_endian(&pdu_header, 1); 112 usbip_header_correct_endian(&pdu_header, 1);
106 113
107 iov[iovnum].iov_base = &pdu_header; 114 iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3b053b..bd56653b9bbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
380 } 380 }
381 range = 0; 381 range = 0;
382 while (range < pages) { 382 while (range < pages) {
383 if (map->unmap_ops[offset+range].handle == -1) { 383 if (map->unmap_ops[offset+range].handle == -1)
384 range--;
385 break; 384 break;
386 }
387 range++; 385 range++;
388 } 386 }
389 err = __unmap_grant_pages(map, offset, range); 387 err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
1073out_unlock_put: 1071out_unlock_put:
1074 mutex_unlock(&priv->lock); 1072 mutex_unlock(&priv->lock);
1075out_put_map: 1073out_put_map:
1076 if (use_ptemod) 1074 if (use_ptemod) {
1077 map->vma = NULL; 1075 map->vma = NULL;
1076 unmap_grant_pages(map, 0, map->count);
1077 }
1078 gntdev_put_map(priv, map); 1078 gntdev_put_map(priv, map);
1079 return err; 1079 return err;
1080} 1080}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index d1e1d8d2b9d5..4c789e61554b 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
805 pvcalls_exit(); 805 pvcalls_exit();
806 return ret; 806 return ret;
807 } 807 }
808 map2 = kzalloc(sizeof(*map2), GFP_KERNEL); 808 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
809 if (map2 == NULL) { 809 if (map2 == NULL) {
810 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 810 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
811 (void *)&map->passive.flags); 811 (void *)&map->passive.flags);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ff8d5bf4354f..23c7f395d718 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -895,20 +895,38 @@ error:
895 * However, if we didn't have a callback promise outstanding, or it was 895 * However, if we didn't have a callback promise outstanding, or it was
896 * outstanding on a different server, then it won't break it either... 896 * outstanding on a different server, then it won't break it either...
897 */ 897 */
898static int afs_dir_remove_link(struct dentry *dentry, struct key *key) 898static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
899 unsigned long d_version_before,
900 unsigned long d_version_after)
899{ 901{
902 bool dir_valid;
900 int ret = 0; 903 int ret = 0;
901 904
905 /* There were no intervening changes on the server if the version
906 * number we got back was incremented by exactly 1.
907 */
908 dir_valid = (d_version_after == d_version_before + 1);
909
902 if (d_really_is_positive(dentry)) { 910 if (d_really_is_positive(dentry)) {
903 struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); 911 struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
904 912
905 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 913 if (dir_valid) {
906 kdebug("AFS_VNODE_DELETED"); 914 drop_nlink(&vnode->vfs_inode);
907 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); 915 if (vnode->vfs_inode.i_nlink == 0) {
908 916 set_bit(AFS_VNODE_DELETED, &vnode->flags);
909 ret = afs_validate(vnode, key); 917 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
910 if (ret == -ESTALE) 918 }
911 ret = 0; 919 ret = 0;
920 } else {
921 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
922
923 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
924 kdebug("AFS_VNODE_DELETED");
925
926 ret = afs_validate(vnode, key);
927 if (ret == -ESTALE)
928 ret = 0;
929 }
912 _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret); 930 _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
913 } 931 }
914 932
@@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
923 struct afs_fs_cursor fc; 941 struct afs_fs_cursor fc;
924 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 942 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
925 struct key *key; 943 struct key *key;
944 unsigned long d_version = (unsigned long)dentry->d_fsdata;
926 int ret; 945 int ret;
927 946
928 _enter("{%x:%u},{%pd}", 947 _enter("{%x:%u},{%pd}",
@@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
955 afs_vnode_commit_status(&fc, dvnode, fc.cb_break); 974 afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
956 ret = afs_end_vnode_operation(&fc); 975 ret = afs_end_vnode_operation(&fc);
957 if (ret == 0) 976 if (ret == 0)
958 ret = afs_dir_remove_link(dentry, key); 977 ret = afs_dir_remove_link(
978 dentry, key, d_version,
979 (unsigned long)dvnode->status.data_version);
959 } 980 }
960 981
961error_key: 982error_key:
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 3415eb7484f6..1e81864ef0b2 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -377,6 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
377 } 377 }
378 378
379 read_sequnlock_excl(&vnode->cb_lock); 379 read_sequnlock_excl(&vnode->cb_lock);
380
381 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
382 clear_nlink(&vnode->vfs_inode);
383
380 if (valid) 384 if (valid)
381 goto valid; 385 goto valid;
382 386
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ea1460b9b71a..e1126659f043 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
885{ 885{
886 struct afs_net *net = call->net; 886 struct afs_net *net = call->net;
887 enum afs_call_state state; 887 enum afs_call_state state;
888 u32 remote_abort; 888 u32 remote_abort = 0;
889 int ret; 889 int ret;
890 890
891 _enter("{%s,%zu},,%zu,%d", 891 _enter("{%s,%zu},,%zu,%d",
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb5f8a3df577..9370e2feb999 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
198 ret = afs_fill_page(vnode, key, pos + copied, 198 ret = afs_fill_page(vnode, key, pos + copied,
199 len - copied, page); 199 len - copied, page);
200 if (ret < 0) 200 if (ret < 0)
201 return ret; 201 goto out;
202 } 202 }
203 SetPageUptodate(page); 203 SetPageUptodate(page);
204 } 204 }
@@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping,
206 set_page_dirty(page); 206 set_page_dirty(page);
207 if (PageDirty(page)) 207 if (PageDirty(page))
208 _debug("dirtied"); 208 _debug("dirtied");
209 ret = copied;
210
211out:
209 unlock_page(page); 212 unlock_page(page);
210 put_page(page); 213 put_page(page);
211 214 return ret;
212 return copied;
213} 215}
214 216
215/* 217/*
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5d73f79ded8b..056276101c63 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
87 87
88 spin_lock(&root->inode_lock); 88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90
90 if (node) { 91 if (node) {
91 if (btrfs_inode->delayed_node) { 92 if (btrfs_inode->delayed_node) {
92 refcount_inc(&node->refs); /* can be accessed */ 93 refcount_inc(&node->refs); /* can be accessed */
@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
94 spin_unlock(&root->inode_lock); 95 spin_unlock(&root->inode_lock);
95 return node; 96 return node;
96 } 97 }
97 btrfs_inode->delayed_node = node; 98
98 /* can be accessed and cached in the inode */ 99 /*
99 refcount_add(2, &node->refs); 100 * It's possible that we're racing into the middle of removing
101 * this node from the radix tree. In this case, the refcount
102 * was zero and it should never go back to one. Just return
103 * NULL like it was never in the radix at all; our release
104 * function is in the process of removing it.
105 *
106 * Some implementations of refcount_inc refuse to bump the
107 * refcount once it has hit zero. If we don't do this dance
108 * here, refcount_inc() may decide to just WARN_ONCE() instead
109 * of actually bumping the refcount.
110 *
111 * If this node is properly in the radix, we want to bump the
112 * refcount twice, once for the inode and once for this get
113 * operation.
114 */
115 if (refcount_inc_not_zero(&node->refs)) {
116 refcount_inc(&node->refs);
117 btrfs_inode->delayed_node = node;
118 } else {
119 node = NULL;
120 }
121
100 spin_unlock(&root->inode_lock); 122 spin_unlock(&root->inode_lock);
101 return node; 123 return node;
102 } 124 }
@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
254 mutex_unlock(&delayed_node->mutex); 276 mutex_unlock(&delayed_node->mutex);
255 277
256 if (refcount_dec_and_test(&delayed_node->refs)) { 278 if (refcount_dec_and_test(&delayed_node->refs)) {
257 bool free = false;
258 struct btrfs_root *root = delayed_node->root; 279 struct btrfs_root *root = delayed_node->root;
280
259 spin_lock(&root->inode_lock); 281 spin_lock(&root->inode_lock);
260 if (refcount_read(&delayed_node->refs) == 0) { 282 /*
261 radix_tree_delete(&root->delayed_nodes_tree, 283 * Once our refcount goes to zero, nobody is allowed to bump it
262 delayed_node->inode_id); 284 * back up. We can delete it now.
263 free = true; 285 */
264 } 286 ASSERT(refcount_read(&delayed_node->refs) == 0);
287 radix_tree_delete(&root->delayed_nodes_tree,
288 delayed_node->inode_id);
265 spin_unlock(&root->inode_lock); 289 spin_unlock(&root->inode_lock);
266 if (free) 290 kmem_cache_free(delayed_node_cache, delayed_node);
267 kmem_cache_free(delayed_node_cache, delayed_node);
268 } 291 }
269} 292}
270 293
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 49810b70afd3..a25684287501 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -237,7 +237,6 @@ static struct btrfs_device *__alloc_device(void)
237 kfree(dev); 237 kfree(dev);
238 return ERR_PTR(-ENOMEM); 238 return ERR_PTR(-ENOMEM);
239 } 239 }
240 bio_get(dev->flush_bio);
241 240
242 INIT_LIST_HEAD(&dev->dev_list); 241 INIT_LIST_HEAD(&dev->dev_list);
243 INIT_LIST_HEAD(&dev->dev_alloc_list); 242 INIT_LIST_HEAD(&dev->dev_alloc_list);
diff --git a/fs/exec.c b/fs/exec.c
index 5688b5e1b937..7eb8d21bcab9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1349,9 +1349,14 @@ void setup_new_exec(struct linux_binprm * bprm)
1349 1349
1350 current->sas_ss_sp = current->sas_ss_size = 0; 1350 current->sas_ss_sp = current->sas_ss_size = 0;
1351 1351
1352 /* Figure out dumpability. */ 1352 /*
1353 * Figure out dumpability. Note that this checking only of current
1354 * is wrong, but userspace depends on it. This should be testing
1355 * bprm->secureexec instead.
1356 */
1353 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || 1357 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1354 bprm->secureexec) 1358 !(uid_eq(current_euid(), current_uid()) &&
1359 gid_eq(current_egid(), current_gid())))
1355 set_dumpable(current->mm, suid_dumpable); 1360 set_dumpable(current->mm, suid_dumpable);
1356 else 1361 else
1357 set_dumpable(current->mm, SUID_DUMP_USER); 1362 set_dumpable(current->mm, SUID_DUMP_USER);
diff --git a/fs/super.c b/fs/super.c
index 7ff1349609e4..06bd25d90ba5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -517,7 +517,11 @@ retry:
517 hlist_add_head(&s->s_instances, &type->fs_supers); 517 hlist_add_head(&s->s_instances, &type->fs_supers);
518 spin_unlock(&sb_lock); 518 spin_unlock(&sb_lock);
519 get_filesystem(type); 519 get_filesystem(type);
520 register_shrinker(&s->s_shrink); 520 err = register_shrinker(&s->s_shrink);
521 if (err) {
522 deactivate_locked_super(s);
523 s = ERR_PTR(err);
524 }
521 return s; 525 return s;
522} 526}
523 527
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ac9a4e65ca49..41a75f9f23fd 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -570,11 +570,14 @@ out:
570static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 570static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
571 struct userfaultfd_wait_queue *ewq) 571 struct userfaultfd_wait_queue *ewq)
572{ 572{
573 struct userfaultfd_ctx *release_new_ctx;
574
573 if (WARN_ON_ONCE(current->flags & PF_EXITING)) 575 if (WARN_ON_ONCE(current->flags & PF_EXITING))
574 goto out; 576 goto out;
575 577
576 ewq->ctx = ctx; 578 ewq->ctx = ctx;
577 init_waitqueue_entry(&ewq->wq, current); 579 init_waitqueue_entry(&ewq->wq, current);
580 release_new_ctx = NULL;
578 581
579 spin_lock(&ctx->event_wqh.lock); 582 spin_lock(&ctx->event_wqh.lock);
580 /* 583 /*
@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
601 new = (struct userfaultfd_ctx *) 604 new = (struct userfaultfd_ctx *)
602 (unsigned long) 605 (unsigned long)
603 ewq->msg.arg.reserved.reserved1; 606 ewq->msg.arg.reserved.reserved1;
604 607 release_new_ctx = new;
605 userfaultfd_ctx_put(new);
606 } 608 }
607 break; 609 break;
608 } 610 }
@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
617 __set_current_state(TASK_RUNNING); 619 __set_current_state(TASK_RUNNING);
618 spin_unlock(&ctx->event_wqh.lock); 620 spin_unlock(&ctx->event_wqh.lock);
619 621
622 if (release_new_ctx) {
623 struct vm_area_struct *vma;
624 struct mm_struct *mm = release_new_ctx->mm;
625
626 /* the various vma->vm_userfaultfd_ctx still points to it */
627 down_write(&mm->mmap_sem);
628 for (vma = mm->mmap; vma; vma = vma->vm_next)
629 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
630 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
631 up_write(&mm->mmap_sem);
632
633 userfaultfd_ctx_put(release_new_ctx);
634 }
635
620 /* 636 /*
621 * ctx may go away after this if the userfault pseudo fd is 637 * ctx may go away after this if the userfault pseudo fd is
622 * already released. 638 * already released.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 21e2d70884e1..4fc526a27a94 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -399,7 +399,7 @@ xfs_map_blocks(
399 (ip->i_df.if_flags & XFS_IFEXTENTS)); 399 (ip->i_df.if_flags & XFS_IFEXTENTS));
400 ASSERT(offset <= mp->m_super->s_maxbytes); 400 ASSERT(offset <= mp->m_super->s_maxbytes);
401 401
402 if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) 402 if (offset > mp->m_super->s_maxbytes - count)
403 count = mp->m_super->s_maxbytes - offset; 403 count = mp->m_super->s_maxbytes - offset;
404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
405 offset_fsb = XFS_B_TO_FSBT(mp, offset); 405 offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
1312 lockmode = xfs_ilock_data_map_shared(ip); 1312 lockmode = xfs_ilock_data_map_shared(ip);
1313 1313
1314 ASSERT(offset <= mp->m_super->s_maxbytes); 1314 ASSERT(offset <= mp->m_super->s_maxbytes);
1315 if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) 1315 if (offset > mp->m_super->s_maxbytes - size)
1316 size = mp->m_super->s_maxbytes - offset; 1316 size = mp->m_super->s_maxbytes - offset;
1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1318 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1318 offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7ab52a8bc0a9..66e1edbfb2b2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
1006 } 1006 }
1007 1007
1008 ASSERT(offset <= mp->m_super->s_maxbytes); 1008 ASSERT(offset <= mp->m_super->s_maxbytes);
1009 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 1009 if (offset > mp->m_super->s_maxbytes - length)
1010 length = mp->m_super->s_maxbytes - offset; 1010 length = mp->m_super->s_maxbytes - offset;
1011 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1011 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1012 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1012 end_fsb = XFS_B_TO_FSB(mp, offset + length);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index ec952dfad359..b897b11afb2c 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -48,7 +48,7 @@
48STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 48STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
49STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 49STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
50 50
51 51STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
52STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); 52STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
53/* 53/*
54 * We use the batch lookup interface to iterate over the dquots as it 54 * We use the batch lookup interface to iterate over the dquots as it
@@ -695,9 +695,17 @@ xfs_qm_init_quotainfo(
695 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; 695 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
696 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 696 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
697 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 697 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
698 register_shrinker(&qinf->qi_shrinker); 698
699 error = register_shrinker(&qinf->qi_shrinker);
700 if (error)
701 goto out_free_inos;
702
699 return 0; 703 return 0;
700 704
705out_free_inos:
706 mutex_destroy(&qinf->qi_quotaofflock);
707 mutex_destroy(&qinf->qi_tree_lock);
708 xfs_qm_destroy_quotainos(qinf);
701out_free_lru: 709out_free_lru:
702 list_lru_destroy(&qinf->qi_lru); 710 list_lru_destroy(&qinf->qi_lru);
703out_free_qinf: 711out_free_qinf:
@@ -706,7 +714,6 @@ out_free_qinf:
706 return error; 714 return error;
707} 715}
708 716
709
710/* 717/*
711 * Gets called when unmounting a filesystem or when all quotas get 718 * Gets called when unmounting a filesystem or when all quotas get
712 * turned off. 719 * turned off.
@@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo(
723 730
724 unregister_shrinker(&qi->qi_shrinker); 731 unregister_shrinker(&qi->qi_shrinker);
725 list_lru_destroy(&qi->qi_lru); 732 list_lru_destroy(&qi->qi_lru);
726 733 xfs_qm_destroy_quotainos(qi);
727 if (qi->qi_uquotaip) { 734 mutex_destroy(&qi->qi_tree_lock);
728 IRELE(qi->qi_uquotaip);
729 qi->qi_uquotaip = NULL; /* paranoia */
730 }
731 if (qi->qi_gquotaip) {
732 IRELE(qi->qi_gquotaip);
733 qi->qi_gquotaip = NULL;
734 }
735 if (qi->qi_pquotaip) {
736 IRELE(qi->qi_pquotaip);
737 qi->qi_pquotaip = NULL;
738 }
739 mutex_destroy(&qi->qi_quotaofflock); 735 mutex_destroy(&qi->qi_quotaofflock);
740 kmem_free(qi); 736 kmem_free(qi);
741 mp->m_quotainfo = NULL; 737 mp->m_quotainfo = NULL;
@@ -1600,6 +1596,24 @@ error_rele:
1600} 1596}
1601 1597
1602STATIC void 1598STATIC void
1599xfs_qm_destroy_quotainos(
1600 xfs_quotainfo_t *qi)
1601{
1602 if (qi->qi_uquotaip) {
1603 IRELE(qi->qi_uquotaip);
1604 qi->qi_uquotaip = NULL; /* paranoia */
1605 }
1606 if (qi->qi_gquotaip) {
1607 IRELE(qi->qi_gquotaip);
1608 qi->qi_gquotaip = NULL;
1609 }
1610 if (qi->qi_pquotaip) {
1611 IRELE(qi->qi_pquotaip);
1612 qi->qi_pquotaip = NULL;
1613 }
1614}
1615
1616STATIC void
1603xfs_qm_dqfree_one( 1617xfs_qm_dqfree_one(
1604 struct xfs_dquot *dqp) 1618 struct xfs_dquot *dqp)
1605{ 1619{
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 6db3b4668b1a..ffe364fa4040 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -145,9 +145,9 @@
145 145
146#define ACPI_ADDRESS_RANGE_MAX 2 146#define ACPI_ADDRESS_RANGE_MAX 2
147 147
148/* Maximum number of While() loops before abort */ 148/* Maximum time (default 30s) of While() loops before abort */
149 149
150#define ACPI_MAX_LOOP_COUNT 0x000FFFFF 150#define ACPI_MAX_LOOP_TIMEOUT 30
151 151
152/****************************************************************************** 152/******************************************************************************
153 * 153 *
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17d61b1f2511..3c46f0ef5f7a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -130,8 +130,9 @@ struct acpi_exception_info {
130#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020) 130#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
131#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021) 131#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
132#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022) 132#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
133#define AE_END_OF_TABLE EXCEP_ENV (0x0023)
133 134
134#define AE_CODE_ENV_MAX 0x0022 135#define AE_CODE_ENV_MAX 0x0023
135 136
136/* 137/*
137 * Programmer exceptions 138 * Programmer exceptions
@@ -195,7 +196,7 @@ struct acpi_exception_info {
195#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E) 196#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E)
196#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) 197#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
197#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) 198#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
198#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) 199#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
199#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) 200#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
200#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) 201#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
201 202
@@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
275 EXCEP_TXT("AE_DECIMAL_OVERFLOW", 276 EXCEP_TXT("AE_DECIMAL_OVERFLOW",
276 "Overflow during ASCII decimal-to-binary conversion"), 277 "Overflow during ASCII decimal-to-binary conversion"),
277 EXCEP_TXT("AE_OCTAL_OVERFLOW", 278 EXCEP_TXT("AE_OCTAL_OVERFLOW",
278 "Overflow during ASCII octal-to-binary conversion") 279 "Overflow during ASCII octal-to-binary conversion"),
280 EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
279}; 281};
280 282
281static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { 283static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
368 "The length of a Resource Descriptor in the AML is incorrect"), 370 "The length of a Resource Descriptor in the AML is incorrect"),
369 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", 371 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
370 "A memory, I/O, or PCI configuration address is invalid"), 372 "A memory, I/O, or PCI configuration address is invalid"),
371 EXCEP_TXT("AE_AML_INFINITE_LOOP", 373 EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
372 "An apparent infinite AML While loop, method was aborted"), 374 "An AML While loop exceeded the maximum execution time"),
373 EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", 375 EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
374 "A namespace node is uninitialized or unresolved"), 376 "A namespace node is uninitialized or unresolved"),
375 EXCEP_TXT("AE_AML_TARGET_TYPE", 377 EXCEP_TXT("AE_AML_TARGET_TYPE",
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 79287629c888..c9608b0b80c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
91bool acpi_dev_found(const char *hid); 91bool acpi_dev_found(const char *hid);
92bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); 92bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
93 93
94const char *
95acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
96
94#ifdef CONFIG_ACPI 97#ifdef CONFIG_ACPI
95 98
96#include <linux/proc_fs.h> 99#include <linux/proc_fs.h>
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e1dd1a8d42b6..c589c3e12d90 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20170831 49#define ACPI_CA_VERSION 0x20171215
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
260ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); 260ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
261 261
262/* 262/*
263 * Maximum number of While() loop iterations before forced method abort. 263 * Maximum timeout for While() loop iterations before forced method abort.
264 * This mechanism is intended to prevent infinite loops during interpreter 264 * This mechanism is intended to prevent infinite loops during interpreter
265 * execution within a host kernel. 265 * execution within a host kernel.
266 */ 266 */
267ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT); 267ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
268 268
269/* 269/*
270 * This mechanism is used to trace a specified AML method. The method is 270 * This mechanism is used to trace a specified AML method. The method is
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7a89e6de94da..4c304bf4d591 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -69,9 +69,10 @@
69#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ 69#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
70#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ 70#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
71#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ 71#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
72#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */ 72#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
73#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ 73#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
74#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ 74#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
75#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
75#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ 76#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
76#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ 77#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
77#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ 78#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
@@ -1149,7 +1150,8 @@ enum acpi_nfit_type {
1149 ACPI_NFIT_TYPE_CONTROL_REGION = 4, 1150 ACPI_NFIT_TYPE_CONTROL_REGION = 4,
1150 ACPI_NFIT_TYPE_DATA_REGION = 5, 1151 ACPI_NFIT_TYPE_DATA_REGION = 5,
1151 ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, 1152 ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
1152 ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ 1153 ACPI_NFIT_TYPE_CAPABILITIES = 7,
1154 ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
1153}; 1155};
1154 1156
1155/* 1157/*
@@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address {
1162 struct acpi_nfit_header header; 1164 struct acpi_nfit_header header;
1163 u16 range_index; 1165 u16 range_index;
1164 u16 flags; 1166 u16 flags;
1165 u32 reserved; /* Reseved, must be zero */ 1167 u32 reserved; /* Reserved, must be zero */
1166 u32 proximity_domain; 1168 u32 proximity_domain;
1167 u8 range_guid[16]; 1169 u8 range_guid[16];
1168 u64 address; 1170 u64 address;
@@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address {
1281 u64 hint_address[1]; /* Variable length */ 1283 u64 hint_address[1]; /* Variable length */
1282}; 1284};
1283 1285
1286/* 7: Platform Capabilities Structure */
1287
1288struct acpi_nfit_capabilities {
1289 struct acpi_nfit_header header;
1290 u8 highest_capability;
1291 u8 reserved[3]; /* Reserved, must be zero */
1292 u32 capabilities;
1293 u32 reserved2;
1294};
1295
1296/* Capabilities Flags */
1297
1298#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */
1299#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */
1300#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */
1301
1302/*
1303 * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
1304 */
1305struct nfit_device_handle {
1306 u32 handle;
1307};
1308
1309/* Device handle construction and extraction macros */
1310
1311#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F
1312#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0
1313#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00
1314#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000
1315#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000
1316
1317#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0
1318#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4
1319#define ACPI_NFIT_MEMORY_ID_OFFSET 8
1320#define ACPI_NFIT_SOCKET_ID_OFFSET 12
1321#define ACPI_NFIT_NODE_ID_OFFSET 16
1322
1323/* Macro to construct a NFIT/NVDIMM device handle */
1324
1325#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
1326 ((dimm) | \
1327 ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \
1328 ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \
1329 ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \
1330 ((node) << ACPI_NFIT_NODE_ID_OFFSET))
1331
1332/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
1333
1334#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
1335 ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
1336
1337#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
1338 (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
1339
1340#define ACPI_NFIT_GET_MEMORY_ID(handle) \
1341 (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET)
1342
1343#define ACPI_NFIT_GET_SOCKET_ID(handle) \
1344 (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET)
1345
1346#define ACPI_NFIT_GET_NODE_ID(handle) \
1347 (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
1348
1284/******************************************************************************* 1349/*******************************************************************************
1285 * 1350 *
1286 * PDTT - Processor Debug Trigger Table (ACPI 6.2) 1351 * PDTT - Platform Debug Trigger Table (ACPI 6.2)
1287 * Version 0 1352 * Version 0
1288 * 1353 *
1289 ******************************************************************************/ 1354 ******************************************************************************/
@@ -1301,14 +1366,14 @@ struct acpi_table_pdtt {
1301 * starting at array_offset. 1366 * starting at array_offset.
1302 */ 1367 */
1303struct acpi_pdtt_channel { 1368struct acpi_pdtt_channel {
1304 u16 sub_channel_id; 1369 u8 subchannel_id;
1370 u8 flags;
1305}; 1371};
1306 1372
1307/* Mask and Flags for above */ 1373/* Flags for above */
1308 1374
1309#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF 1375#define ACPI_PDTT_RUNTIME_TRIGGER (1)
1310#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8) 1376#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
1311#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
1312 1377
1313/******************************************************************************* 1378/*******************************************************************************
1314 * 1379 *
@@ -1376,6 +1441,20 @@ struct acpi_pptt_cache {
1376#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ 1441#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
1377#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ 1442#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
1378 1443
1444/* Attributes describing cache */
1445#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */
1446#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */
1447#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */
1448#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */
1449
1450#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */
1451#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */
1452#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */
1453#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */
1454
1455#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */
1456#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */
1457
1379/* 2: ID Structure */ 1458/* 2: ID Structure */
1380 1459
1381struct acpi_pptt_id { 1460struct acpi_pptt_id {
@@ -1405,6 +1484,68 @@ struct acpi_table_sbst {
1405 1484
1406/******************************************************************************* 1485/*******************************************************************************
1407 * 1486 *
1487 * SDEV - Secure Devices Table (ACPI 6.2)
1488 * Version 1
1489 *
1490 ******************************************************************************/
1491
1492struct acpi_table_sdev {
1493 struct acpi_table_header header; /* Common ACPI table header */
1494};
1495
1496struct acpi_sdev_header {
1497 u8 type;
1498 u8 flags;
1499 u16 length;
1500};
1501
1502/* Values for subtable type above */
1503
1504enum acpi_sdev_type {
1505 ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
1506 ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
1507 ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */
1508};
1509
1510/* Values for flags above */
1511
1512#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
1513
1514/*
1515 * SDEV subtables
1516 */
1517
1518/* 0: Namespace Device Based Secure Device Structure */
1519
1520struct acpi_sdev_namespace {
1521 struct acpi_sdev_header header;
1522 u16 device_id_offset;
1523 u16 device_id_length;
1524 u16 vendor_data_offset;
1525 u16 vendor_data_length;
1526};
1527
1528/* 1: PCIe Endpoint Device Based Device Structure */
1529
1530struct acpi_sdev_pcie {
1531 struct acpi_sdev_header header;
1532 u16 segment;
1533 u16 start_bus;
1534 u16 path_offset;
1535 u16 path_length;
1536 u16 vendor_data_offset;
1537 u16 vendor_data_length;
1538};
1539
1540/* 1a: PCIe Endpoint path entry */
1541
1542struct acpi_sdev_pcie_path {
1543 u8 device;
1544 u8 function;
1545};
1546
1547/*******************************************************************************
1548 *
1408 * SLIT - System Locality Distance Information Table 1549 * SLIT - System Locality Distance Information Table
1409 * Version 1 1550 * Version 1
1410 * 1551 *
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 686b6f8c09dc..0d60d5df14f8 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 {
810 u8 pxm; 810 u8 pxm;
811 u8 reserved1; 811 u8 reserved1;
812 u16 reserved2; 812 u16 reserved2;
813 u32 id_mapping_index;
813}; 814};
814 815
815/* Values for Model field above */ 816/* Values for Model field above */
@@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types {
1246 * TCPA - Trusted Computing Platform Alliance table 1247 * TCPA - Trusted Computing Platform Alliance table
1247 * Version 2 1248 * Version 2
1248 * 1249 *
1250 * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
1251 *
1249 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", 1252 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
1250 * Version 1.2, Revision 8 1253 * Version 1.2, Revision 8
1251 * February 27, 2017 1254 * February 27, 2017
@@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server {
1310 * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table 1313 * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
1311 * Version 4 1314 * Version 4
1312 * 1315 *
1316 * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
1317 *
1313 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", 1318 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
1314 * Version 1.2, Revision 8 1319 * Version 1.2, Revision 8
1315 * February 27, 2017 1320 * February 27, 2017
@@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 {
1329/* Values for start_method above */ 1334/* Values for start_method above */
1330 1335
1331#define ACPI_TPM2_NOT_ALLOWED 0 1336#define ACPI_TPM2_NOT_ALLOWED 0
1337#define ACPI_TPM2_RESERVED1 1
1332#define ACPI_TPM2_START_METHOD 2 1338#define ACPI_TPM2_START_METHOD 2
1339#define ACPI_TPM2_RESERVED3 3
1340#define ACPI_TPM2_RESERVED4 4
1341#define ACPI_TPM2_RESERVED5 5
1333#define ACPI_TPM2_MEMORY_MAPPED 6 1342#define ACPI_TPM2_MEMORY_MAPPED 6
1334#define ACPI_TPM2_COMMAND_BUFFER 7 1343#define ACPI_TPM2_COMMAND_BUFFER 7
1335#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 1344#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
1345#define ACPI_TPM2_RESERVED9 9
1346#define ACPI_TPM2_RESERVED10 10
1336#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */ 1347#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
1348#define ACPI_TPM2_RESERVED 12
1337 1349
1338/* Trailer appears after any start_method subtables */ 1350/* Optional trailer appears after any start_method subtables */
1339 1351
1340struct acpi_tpm2_trailer { 1352struct acpi_tpm2_trailer {
1353 u8 method_parameters[12];
1341 u32 minimum_log_length; /* Minimum length for the event log area */ 1354 u32 minimum_log_length; /* Minimum length for the event log area */
1342 u64 log_address; /* Address of the event log area */ 1355 u64 log_address; /* Address of the event log area */
1343}; 1356};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f077edb9b81..31f1be74dd16 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -468,6 +468,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
468#define ACPI_NSEC_PER_MSEC 1000000L 468#define ACPI_NSEC_PER_MSEC 1000000L
469#define ACPI_NSEC_PER_SEC 1000000000L 469#define ACPI_NSEC_PER_SEC 1000000000L
470 470
471#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0)
472
471/* Owner IDs are used to track namespace nodes for selective deletion */ 473/* Owner IDs are used to track namespace nodes for selective deletion */
472 474
473typedef u8 acpi_owner_id; 475typedef u8 acpi_owner_id;
@@ -1299,6 +1301,8 @@ typedef enum {
1299#define ACPI_OSI_WIN_7 0x0B 1301#define ACPI_OSI_WIN_7 0x0B
1300#define ACPI_OSI_WIN_8 0x0C 1302#define ACPI_OSI_WIN_8 0x0C
1301#define ACPI_OSI_WIN_10 0x0D 1303#define ACPI_OSI_WIN_10 0x0D
1304#define ACPI_OSI_WIN_10_RS1 0x0E
1305#define ACPI_OSI_WIN_10_RS2 0x0F
1302 1306
1303/* Definitions of getopt */ 1307/* Definitions of getopt */
1304 1308
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 38d9c5861ed8..f38227a78eae 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -18,6 +18,7 @@
18#include <linux/if_alg.h> 18#include <linux/if_alg.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/atomic.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
23#include <crypto/aead.h> 24#include <crypto/aead.h>
@@ -150,7 +151,7 @@ struct af_alg_ctx {
150 struct crypto_wait wait; 151 struct crypto_wait wait;
151 152
152 size_t used; 153 size_t used;
153 size_t rcvused; 154 atomic_t rcvused;
154 155
155 bool more; 156 bool more;
156 bool merge; 157 bool merge;
@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
215 struct af_alg_ctx *ctx = ask->private; 216 struct af_alg_ctx *ctx = ask->private;
216 217
217 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 218 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
218 ctx->rcvused, 0); 219 atomic_read(&ctx->rcvused), 0);
219} 220}
220 221
221/** 222/**
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dc1ebfeeb5ec..d918f1ea84e6 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -640,6 +640,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
640 return false; 640 return false;
641} 641}
642 642
643static inline const char *
644acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
645{
646 return NULL;
647}
648
643static inline bool is_acpi_node(struct fwnode_handle *fwnode) 649static inline bool is_acpi_node(struct fwnode_handle *fwnode)
644{ 650{
645 return false; 651 return false;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e55e4255a210..0b25cf87b6d6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,7 +43,14 @@ struct bpf_map_ops {
43}; 43};
44 44
45struct bpf_map { 45struct bpf_map {
46 atomic_t refcnt; 46 /* 1st cacheline with read-mostly members of which some
47 * are also accessed in fast-path (e.g. ops, max_entries).
48 */
49 const struct bpf_map_ops *ops ____cacheline_aligned;
50 struct bpf_map *inner_map_meta;
51#ifdef CONFIG_SECURITY
52 void *security;
53#endif
47 enum bpf_map_type map_type; 54 enum bpf_map_type map_type;
48 u32 key_size; 55 u32 key_size;
49 u32 value_size; 56 u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
52 u32 pages; 59 u32 pages;
53 u32 id; 60 u32 id;
54 int numa_node; 61 int numa_node;
55 struct user_struct *user; 62 bool unpriv_array;
56 const struct bpf_map_ops *ops; 63 /* 7 bytes hole */
57 struct work_struct work; 64
65 /* 2nd cacheline with misc members to avoid false sharing
66 * particularly with refcounting.
67 */
68 struct user_struct *user ____cacheline_aligned;
69 atomic_t refcnt;
58 atomic_t usercnt; 70 atomic_t usercnt;
59 struct bpf_map *inner_map_meta; 71 struct work_struct work;
60 char name[BPF_OBJ_NAME_LEN]; 72 char name[BPF_OBJ_NAME_LEN];
61#ifdef CONFIG_SECURITY
62 void *security;
63#endif
64}; 73};
65 74
66/* function argument constraints */ 75/* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
221struct bpf_array { 230struct bpf_array {
222 struct bpf_map map; 231 struct bpf_map map;
223 u32 elem_size; 232 u32 elem_size;
233 u32 index_mask;
224 /* 'ownership' of prog_array is claimed by the first program that 234 /* 'ownership' of prog_array is claimed by the first program that
225 * is going to use this map or by the first program which FD is stored 235 * is going to use this map or by the first program which FD is stored
226 * in the map to make sure that all callers and callees have the same 236 * in the map to make sure that all callers and callees have the same
@@ -419,6 +429,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
419 attr->numa_node : NUMA_NO_NODE; 429 attr->numa_node : NUMA_NO_NODE;
420} 430}
421 431
432struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
433
422#else /* !CONFIG_BPF_SYSCALL */ 434#else /* !CONFIG_BPF_SYSCALL */
423static inline struct bpf_prog *bpf_prog_get(u32 ufd) 435static inline struct bpf_prog *bpf_prog_get(u32 ufd)
424{ 436{
@@ -506,6 +518,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
506{ 518{
507 return 0; 519 return 0;
508} 520}
521
522static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
523 enum bpf_prog_type type)
524{
525 return ERR_PTR(-EOPNOTSUPP);
526}
509#endif /* CONFIG_BPF_SYSCALL */ 527#endif /* CONFIG_BPF_SYSCALL */
510 528
511static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 529static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -514,6 +532,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
514 return bpf_prog_get_type_dev(ufd, type, false); 532 return bpf_prog_get_type_dev(ufd, type, false);
515} 533}
516 534
535bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
536
517int bpf_prog_offload_compile(struct bpf_prog *prog); 537int bpf_prog_offload_compile(struct bpf_prog *prog);
518void bpf_prog_offload_destroy(struct bpf_prog *prog); 538void bpf_prog_offload_destroy(struct bpf_prog *prog);
519 539
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 94a59ba7d422..519e94915d18 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -32,7 +32,6 @@ struct completion {
32#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
33static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
34static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
35static inline void complete_release_commit(struct completion *x) {}
36 35
37#define COMPLETION_INITIALIZER(work) \ 36#define COMPLETION_INITIALIZER(work) \
38 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 37 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a04ef7c15c6a..7b01bc11c692 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
47extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 47extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 48extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
49 49
50extern ssize_t cpu_show_meltdown(struct device *dev,
51 struct device_attribute *attr, char *buf);
52extern ssize_t cpu_show_spectre_v1(struct device *dev,
53 struct device_attribute *attr, char *buf);
54extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf);
56
50extern __printf(4, 5) 57extern __printf(4, 5)
51struct device *cpu_device_create(struct device *parent, void *drvdata, 58struct device *cpu_device_create(struct device *parent, void *drvdata,
52 const struct attribute_group **groups, 59 const struct attribute_group **groups,
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 06097ef30449..b511f6d24b42 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value) 42 vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
43#define VMCOREINFO_SYMBOL(name) \ 43#define VMCOREINFO_SYMBOL(name) \
44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) 44 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
45#define VMCOREINFO_SYMBOL_ARRAY(name) \
46 vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
45#define VMCOREINFO_SIZE(name) \ 47#define VMCOREINFO_SIZE(name) \
46 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ 48 vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
47 (unsigned long)sizeof(name)) 49 (unsigned long)sizeof(name))
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d813f7b04da7..29fdf8029cf6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -140,11 +140,13 @@ struct efi_boot_memmap {
140 140
141struct capsule_info { 141struct capsule_info {
142 efi_capsule_header_t header; 142 efi_capsule_header_t header;
143 efi_capsule_header_t *capsule;
143 int reset_type; 144 int reset_type;
144 long index; 145 long index;
145 size_t count; 146 size_t count;
146 size_t total_size; 147 size_t total_size;
147 phys_addr_t *pages; 148 struct page **pages;
149 phys_addr_t *phys;
148 size_t page_bytes_remain; 150 size_t page_bytes_remain;
149}; 151};
150 152
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index f4ff47d4a893..fe0c349684fa 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
755{ 755{
756 if (fscache_cookie_valid(cookie) && PageFsCache(page)) 756 if (fscache_cookie_valid(cookie) && PageFsCache(page))
757 return __fscache_maybe_release_page(cookie, page, gfp); 757 return __fscache_maybe_release_page(cookie, page, gfp);
758 return false; 758 return true;
759} 759}
760 760
761/** 761/**
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 46cb57d5eb13..1b3996ff3f16 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -27,22 +27,18 @@
27# define trace_hardirq_enter() \ 27# define trace_hardirq_enter() \
28do { \ 28do { \
29 current->hardirq_context++; \ 29 current->hardirq_context++; \
30 crossrelease_hist_start(XHLOCK_HARD); \
31} while (0) 30} while (0)
32# define trace_hardirq_exit() \ 31# define trace_hardirq_exit() \
33do { \ 32do { \
34 current->hardirq_context--; \ 33 current->hardirq_context--; \
35 crossrelease_hist_end(XHLOCK_HARD); \
36} while (0) 34} while (0)
37# define lockdep_softirq_enter() \ 35# define lockdep_softirq_enter() \
38do { \ 36do { \
39 current->softirq_context++; \ 37 current->softirq_context++; \
40 crossrelease_hist_start(XHLOCK_SOFT); \
41} while (0) 38} while (0)
42# define lockdep_softirq_exit() \ 39# define lockdep_softirq_exit() \
43do { \ 40do { \
44 current->softirq_context--; \ 41 current->softirq_context--; \
45 crossrelease_hist_end(XHLOCK_SOFT); \
46} while (0) 42} while (0)
47# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 43# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
48#else 44#else
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f8109ddb5ef1..ff855ed965fb 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -47,6 +47,17 @@ enum {
47 47
48 /* region flag indicating to direct-map persistent memory by default */ 48 /* region flag indicating to direct-map persistent memory by default */
49 ND_REGION_PAGEMAP = 0, 49 ND_REGION_PAGEMAP = 0,
50 /*
51 * Platform ensures entire CPU store data path is flushed to pmem on
52 * system power loss.
53 */
54 ND_REGION_PERSIST_CACHE = 1,
55 /*
56 * Platform provides mechanisms to automatically flush outstanding
57 * write data from memory controler to pmem on system power loss.
58 * (ADR)
59 */
60 ND_REGION_PERSIST_MEMCTRL = 2,
50 61
51 /* mark newly adjusted resources as requiring a label update */ 62 /* mark newly adjusted resources as requiring a label update */
52 DPA_RESOURCE_ADJUSTED = 1 << 0, 63 DPA_RESOURCE_ADJUSTED = 1 << 0,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2e75dc34bff5..3251d9c0d313 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -475,8 +475,6 @@ enum xhlock_context_t {
475#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 475#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
476 { .name = (_name), .key = (void *)(_key), } 476 { .name = (_name), .key = (void *)(_key), }
477 477
478static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
479static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
480static inline void lockdep_invariant_state(bool force) {} 478static inline void lockdep_invariant_state(bool force) {}
481static inline void lockdep_init_task(struct task_struct *task) {} 479static inline void lockdep_init_task(struct task_struct *task) {}
482static inline void lockdep_free_task(struct task_struct *task) {} 480static inline void lockdep_free_task(struct task_struct *task) {}
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index ff3642d267f7..94081e9a5010 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -17,7 +17,6 @@ struct sh_eth_plat_data {
17 unsigned char mac_addr[ETH_ALEN]; 17 unsigned char mac_addr[ETH_ALEN];
18 unsigned no_ether_link:1; 18 unsigned no_ether_link:1;
19 unsigned ether_link_active_low:1; 19 unsigned ether_link_active_low:1;
20 unsigned needs_init:1;
21}; 20};
22 21
23#endif 22#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2f8f93da5dc2..9a5ccf03a59b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -966,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
966void sctp_transport_burst_reset(struct sctp_transport *); 966void sctp_transport_burst_reset(struct sctp_transport *);
967unsigned long sctp_transport_timeout(struct sctp_transport *); 967unsigned long sctp_transport_timeout(struct sctp_transport *);
968void sctp_transport_reset(struct sctp_transport *t); 968void sctp_transport_reset(struct sctp_transport *t);
969void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); 969bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
970void sctp_transport_immediate_rtx(struct sctp_transport *); 970void sctp_transport_immediate_rtx(struct sctp_transport *);
971void sctp_transport_dst_release(struct sctp_transport *t); 971void sctp_transport_dst_release(struct sctp_transport *t);
972void sctp_transport_dst_confirm(struct sctp_transport *t); 972void sctp_transport_dst_confirm(struct sctp_transport *t);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 13223396dc64..f96391e84a8a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
146 np_applied:1, 146 np_applied:1,
147 instance_applied:1, 147 instance_applied:1,
148 version:2, 148 version:2,
149reserved_flags2:2; 149 reserved_flags2:2;
150#elif defined(__BIG_ENDIAN_BITFIELD) 150#elif defined(__BIG_ENDIAN_BITFIELD)
151 u8 reserved_flags2:2, 151 u8 reserved_flags2:2,
152 version:2, 152 version:2,
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 3ee3bf7c8526..144de4d2f385 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,6 +23,7 @@
23#define _UAPI_LINUX_IF_ETHER_H 23#define _UAPI_LINUX_IF_ETHER_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/libc-compat.h>
26 27
27/* 28/*
28 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 29 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -149,11 +150,13 @@
149 * This is an Ethernet frame header. 150 * This is an Ethernet frame header.
150 */ 151 */
151 152
153#if __UAPI_DEF_ETHHDR
152struct ethhdr { 154struct ethhdr {
153 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 155 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
154 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 156 unsigned char h_source[ETH_ALEN]; /* source ether addr */
155 __be16 h_proto; /* packet type ID field */ 157 __be16 h_proto; /* packet type ID field */
156} __attribute__((packed)); 158} __attribute__((packed));
159#endif
157 160
158 161
159#endif /* _UAPI_LINUX_IF_ETHER_H */ 162#endif /* _UAPI_LINUX_IF_ETHER_H */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 282875cf8056..fc29efaa918c 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -168,47 +168,106 @@
168 168
169/* If we did not see any headers from any supported C libraries, 169/* If we did not see any headers from any supported C libraries,
170 * or we are being included in the kernel, then define everything 170 * or we are being included in the kernel, then define everything
171 * that we need. */ 171 * that we need. Check for previous __UAPI_* definitions to give
172 * unsupported C libraries a way to opt out of any kernel definition. */
172#else /* !defined(__GLIBC__) */ 173#else /* !defined(__GLIBC__) */
173 174
174/* Definitions for if.h */ 175/* Definitions for if.h */
176#ifndef __UAPI_DEF_IF_IFCONF
175#define __UAPI_DEF_IF_IFCONF 1 177#define __UAPI_DEF_IF_IFCONF 1
178#endif
179#ifndef __UAPI_DEF_IF_IFMAP
176#define __UAPI_DEF_IF_IFMAP 1 180#define __UAPI_DEF_IF_IFMAP 1
181#endif
182#ifndef __UAPI_DEF_IF_IFNAMSIZ
177#define __UAPI_DEF_IF_IFNAMSIZ 1 183#define __UAPI_DEF_IF_IFNAMSIZ 1
184#endif
185#ifndef __UAPI_DEF_IF_IFREQ
178#define __UAPI_DEF_IF_IFREQ 1 186#define __UAPI_DEF_IF_IFREQ 1
187#endif
179/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ 188/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
189#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
180#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 190#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
191#endif
181/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ 192/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
193#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
182#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 194#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
195#endif
183 196
184/* Definitions for in.h */ 197/* Definitions for in.h */
198#ifndef __UAPI_DEF_IN_ADDR
185#define __UAPI_DEF_IN_ADDR 1 199#define __UAPI_DEF_IN_ADDR 1
200#endif
201#ifndef __UAPI_DEF_IN_IPPROTO
186#define __UAPI_DEF_IN_IPPROTO 1 202#define __UAPI_DEF_IN_IPPROTO 1
203#endif
204#ifndef __UAPI_DEF_IN_PKTINFO
187#define __UAPI_DEF_IN_PKTINFO 1 205#define __UAPI_DEF_IN_PKTINFO 1
206#endif
207#ifndef __UAPI_DEF_IP_MREQ
188#define __UAPI_DEF_IP_MREQ 1 208#define __UAPI_DEF_IP_MREQ 1
209#endif
210#ifndef __UAPI_DEF_SOCKADDR_IN
189#define __UAPI_DEF_SOCKADDR_IN 1 211#define __UAPI_DEF_SOCKADDR_IN 1
212#endif
213#ifndef __UAPI_DEF_IN_CLASS
190#define __UAPI_DEF_IN_CLASS 1 214#define __UAPI_DEF_IN_CLASS 1
215#endif
191 216
192/* Definitions for in6.h */ 217/* Definitions for in6.h */
218#ifndef __UAPI_DEF_IN6_ADDR
193#define __UAPI_DEF_IN6_ADDR 1 219#define __UAPI_DEF_IN6_ADDR 1
220#endif
221#ifndef __UAPI_DEF_IN6_ADDR_ALT
194#define __UAPI_DEF_IN6_ADDR_ALT 1 222#define __UAPI_DEF_IN6_ADDR_ALT 1
223#endif
224#ifndef __UAPI_DEF_SOCKADDR_IN6
195#define __UAPI_DEF_SOCKADDR_IN6 1 225#define __UAPI_DEF_SOCKADDR_IN6 1
226#endif
227#ifndef __UAPI_DEF_IPV6_MREQ
196#define __UAPI_DEF_IPV6_MREQ 1 228#define __UAPI_DEF_IPV6_MREQ 1
229#endif
230#ifndef __UAPI_DEF_IPPROTO_V6
197#define __UAPI_DEF_IPPROTO_V6 1 231#define __UAPI_DEF_IPPROTO_V6 1
232#endif
233#ifndef __UAPI_DEF_IPV6_OPTIONS
198#define __UAPI_DEF_IPV6_OPTIONS 1 234#define __UAPI_DEF_IPV6_OPTIONS 1
235#endif
236#ifndef __UAPI_DEF_IN6_PKTINFO
199#define __UAPI_DEF_IN6_PKTINFO 1 237#define __UAPI_DEF_IN6_PKTINFO 1
238#endif
239#ifndef __UAPI_DEF_IP6_MTUINFO
200#define __UAPI_DEF_IP6_MTUINFO 1 240#define __UAPI_DEF_IP6_MTUINFO 1
241#endif
201 242
202/* Definitions for ipx.h */ 243/* Definitions for ipx.h */
244#ifndef __UAPI_DEF_SOCKADDR_IPX
203#define __UAPI_DEF_SOCKADDR_IPX 1 245#define __UAPI_DEF_SOCKADDR_IPX 1
246#endif
247#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
204#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 248#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
249#endif
250#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
205#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 251#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
252#endif
253#ifndef __UAPI_DEF_IPX_CONFIG_DATA
206#define __UAPI_DEF_IPX_CONFIG_DATA 1 254#define __UAPI_DEF_IPX_CONFIG_DATA 1
255#endif
256#ifndef __UAPI_DEF_IPX_ROUTE_DEF
207#define __UAPI_DEF_IPX_ROUTE_DEF 1 257#define __UAPI_DEF_IPX_ROUTE_DEF 1
258#endif
208 259
209/* Definitions for xattr.h */ 260/* Definitions for xattr.h */
261#ifndef __UAPI_DEF_XATTR
210#define __UAPI_DEF_XATTR 1 262#define __UAPI_DEF_XATTR 1
263#endif
211 264
212#endif /* __GLIBC__ */ 265#endif /* __GLIBC__ */
213 266
267/* Definitions for if_ether.h */
268/* allow libcs like musl to deactivate this, glibc does not implement this. */
269#ifndef __UAPI_DEF_ETHHDR
270#define __UAPI_DEF_ETHHDR 1
271#endif
272
214#endif /* _UAPI_LIBC_COMPAT_H */ 273#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 3fea7709a441..57ccfb32e87f 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
36 36
37#define NF_CT_STATE_INVALID_BIT (1 << 0) 37#define NF_CT_STATE_INVALID_BIT (1 << 0)
38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) 38#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
39#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) 39#define NF_CT_STATE_UNTRACKED_BIT (1 << 6)
40 40
41/* Bitset representing status of connection. */ 41/* Bitset representing status of connection. */
42enum ip_conntrack_status { 42enum ip_conntrack_status {
diff --git a/init/Kconfig b/init/Kconfig
index 690a381adee0..a9a2e2c86671 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,6 +461,7 @@ endmenu # "CPU/Task time and stats accounting"
461 461
462config CPU_ISOLATION 462config CPU_ISOLATION
463 bool "CPU isolation" 463 bool "CPU isolation"
464 depends on SMP || COMPILE_TEST
464 default y 465 default y
465 help 466 help
466 Make sure that CPUs running critical tasks are not disturbed by 467 Make sure that CPUs running critical tasks are not disturbed by
@@ -1396,6 +1397,13 @@ config BPF_SYSCALL
1396 Enable the bpf() system call that allows to manipulate eBPF 1397 Enable the bpf() system call that allows to manipulate eBPF
1397 programs and maps via file descriptors. 1398 programs and maps via file descriptors.
1398 1399
1400config BPF_JIT_ALWAYS_ON
1401 bool "Permanently enable BPF JIT and remove BPF interpreter"
1402 depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1403 help
1404 Enables BPF JIT and removes BPF interpreter to avoid
1405 speculative execution of BPF instructions by the interpreter
1406
1399config USERFAULTFD 1407config USERFAULTFD
1400 bool "Enable userfaultfd() system call" 1408 bool "Enable userfaultfd() system call"
1401 select ANON_INODES 1409 select ANON_INODES
diff --git a/kernel/acct.c b/kernel/acct.c
index d15c0ee4d955..addf7732fb56 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
102{ 102{
103 struct kstatfs sbuf; 103 struct kstatfs sbuf;
104 104
105 if (time_is_before_jiffies(acct->needcheck)) 105 if (time_is_after_jiffies(acct->needcheck))
106 goto out; 106 goto out;
107 107
108 /* May block */ 108 /* May block */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7c25426d3cf5..aaa319848e7d 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53{ 53{
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr); 55 int numa_node = bpf_map_attr_numa_node(attr);
56 u32 elem_size, index_mask, max_entries;
57 bool unpriv = !capable(CAP_SYS_ADMIN);
56 struct bpf_array *array; 58 struct bpf_array *array;
57 u64 array_size; 59 u64 array_size;
58 u32 elem_size;
59 60
60 /* check sanity of attributes */ 61 /* check sanity of attributes */
61 if (attr->max_entries == 0 || attr->key_size != 4 || 62 if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
72 73
73 elem_size = round_up(attr->value_size, 8); 74 elem_size = round_up(attr->value_size, 8);
74 75
76 max_entries = attr->max_entries;
77 index_mask = roundup_pow_of_two(max_entries) - 1;
78
79 if (unpriv)
80 /* round up array size to nearest power of 2,
81 * since cpu will speculate within index_mask limits
82 */
83 max_entries = index_mask + 1;
84
75 array_size = sizeof(*array); 85 array_size = sizeof(*array);
76 if (percpu) 86 if (percpu)
77 array_size += (u64) attr->max_entries * sizeof(void *); 87 array_size += (u64) max_entries * sizeof(void *);
78 else 88 else
79 array_size += (u64) attr->max_entries * elem_size; 89 array_size += (u64) max_entries * elem_size;
80 90
81 /* make sure there is no u32 overflow later in round_up() */ 91 /* make sure there is no u32 overflow later in round_up() */
82 if (array_size >= U32_MAX - PAGE_SIZE) 92 if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +96,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array = bpf_map_area_alloc(array_size, numa_node); 96 array = bpf_map_area_alloc(array_size, numa_node);
87 if (!array) 97 if (!array)
88 return ERR_PTR(-ENOMEM); 98 return ERR_PTR(-ENOMEM);
99 array->index_mask = index_mask;
100 array->map.unpriv_array = unpriv;
89 101
90 /* copy mandatory map attributes */ 102 /* copy mandatory map attributes */
91 array->map.map_type = attr->map_type; 103 array->map.map_type = attr->map_type;
@@ -121,12 +133,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
121 if (unlikely(index >= array->map.max_entries)) 133 if (unlikely(index >= array->map.max_entries))
122 return NULL; 134 return NULL;
123 135
124 return array->value + array->elem_size * index; 136 return array->value + array->elem_size * (index & array->index_mask);
125} 137}
126 138
127/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 139/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
128static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 140static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
129{ 141{
142 struct bpf_array *array = container_of(map, struct bpf_array, map);
130 struct bpf_insn *insn = insn_buf; 143 struct bpf_insn *insn = insn_buf;
131 u32 elem_size = round_up(map->value_size, 8); 144 u32 elem_size = round_up(map->value_size, 8);
132 const int ret = BPF_REG_0; 145 const int ret = BPF_REG_0;
@@ -135,7 +148,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
135 148
136 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 149 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
137 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 150 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
138 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 151 if (map->unpriv_array) {
152 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
153 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
154 } else {
155 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
156 }
139 157
140 if (is_power_of_2(elem_size)) { 158 if (is_power_of_2(elem_size)) {
141 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 159 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +175,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
157 if (unlikely(index >= array->map.max_entries)) 175 if (unlikely(index >= array->map.max_entries))
158 return NULL; 176 return NULL;
159 177
160 return this_cpu_ptr(array->pptrs[index]); 178 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
161} 179}
162 180
163int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 181int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +195,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
177 */ 195 */
178 size = round_up(map->value_size, 8); 196 size = round_up(map->value_size, 8);
179 rcu_read_lock(); 197 rcu_read_lock();
180 pptr = array->pptrs[index]; 198 pptr = array->pptrs[index & array->index_mask];
181 for_each_possible_cpu(cpu) { 199 for_each_possible_cpu(cpu) {
182 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 200 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
183 off += size; 201 off += size;
@@ -225,10 +243,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
225 return -EEXIST; 243 return -EEXIST;
226 244
227 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 245 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
228 memcpy(this_cpu_ptr(array->pptrs[index]), 246 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
229 value, map->value_size); 247 value, map->value_size);
230 else 248 else
231 memcpy(array->value + array->elem_size * index, 249 memcpy(array->value +
250 array->elem_size * (index & array->index_mask),
232 value, map->value_size); 251 value, map->value_size);
233 return 0; 252 return 0;
234} 253}
@@ -262,7 +281,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
262 */ 281 */
263 size = round_up(map->value_size, 8); 282 size = round_up(map->value_size, 8);
264 rcu_read_lock(); 283 rcu_read_lock();
265 pptr = array->pptrs[index]; 284 pptr = array->pptrs[index & array->index_mask];
266 for_each_possible_cpu(cpu) { 285 for_each_possible_cpu(cpu) {
267 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 286 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
268 off += size; 287 off += size;
@@ -613,6 +632,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
613static u32 array_of_map_gen_lookup(struct bpf_map *map, 632static u32 array_of_map_gen_lookup(struct bpf_map *map,
614 struct bpf_insn *insn_buf) 633 struct bpf_insn *insn_buf)
615{ 634{
635 struct bpf_array *array = container_of(map, struct bpf_array, map);
616 u32 elem_size = round_up(map->value_size, 8); 636 u32 elem_size = round_up(map->value_size, 8);
617 struct bpf_insn *insn = insn_buf; 637 struct bpf_insn *insn = insn_buf;
618 const int ret = BPF_REG_0; 638 const int ret = BPF_REG_0;
@@ -621,7 +641,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
621 641
622 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 642 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
623 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 643 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
624 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 644 if (map->unpriv_array) {
645 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
646 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
647 } else {
648 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
649 }
625 if (is_power_of_2(elem_size)) 650 if (is_power_of_2(elem_size))
626 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 651 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
627 else 652 else
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 86b50aa26ee8..51ec2dda7f08 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
767} 767}
768EXPORT_SYMBOL_GPL(__bpf_call_base); 768EXPORT_SYMBOL_GPL(__bpf_call_base);
769 769
770#ifndef CONFIG_BPF_JIT_ALWAYS_ON
770/** 771/**
771 * __bpf_prog_run - run eBPF program on a given context 772 * __bpf_prog_run - run eBPF program on a given context
772 * @ctx: is the data we are operating on 773 * @ctx: is the data we are operating on
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1317EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1318EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1318}; 1319};
1319 1320
1321#else
1322static unsigned int __bpf_prog_ret0(const void *ctx,
1323 const struct bpf_insn *insn)
1324{
1325 return 0;
1326}
1327#endif
1328
1320bool bpf_prog_array_compatible(struct bpf_array *array, 1329bool bpf_prog_array_compatible(struct bpf_array *array,
1321 const struct bpf_prog *fp) 1330 const struct bpf_prog *fp)
1322{ 1331{
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1364 */ 1373 */
1365struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1374struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1366{ 1375{
1376#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1367 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1377 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1368 1378
1369 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1379 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380#else
1381 fp->bpf_func = __bpf_prog_ret0;
1382#endif
1370 1383
1371 /* eBPF JITs can rewrite the program in case constant 1384 /* eBPF JITs can rewrite the program in case constant
1372 * blinding is active. However, in case of error during 1385 * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1376 */ 1389 */
1377 if (!bpf_prog_is_dev_bound(fp->aux)) { 1390 if (!bpf_prog_is_dev_bound(fp->aux)) {
1378 fp = bpf_int_jit_compile(fp); 1391 fp = bpf_int_jit_compile(fp);
1392#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393 if (!fp->jited) {
1394 *err = -ENOTSUPP;
1395 return fp;
1396 }
1397#endif
1379 } else { 1398 } else {
1380 *err = bpf_prog_offload_compile(fp); 1399 *err = bpf_prog_offload_compile(fp);
1381 if (*err) 1400 if (*err)
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 01aaef1a77c5..5bb5e49ef4c3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -368,7 +368,45 @@ out:
368 putname(pname); 368 putname(pname);
369 return ret; 369 return ret;
370} 370}
371EXPORT_SYMBOL_GPL(bpf_obj_get_user); 371
372static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
373{
374 struct bpf_prog *prog;
375 int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
376 if (ret)
377 return ERR_PTR(ret);
378
379 if (inode->i_op == &bpf_map_iops)
380 return ERR_PTR(-EINVAL);
381 if (inode->i_op != &bpf_prog_iops)
382 return ERR_PTR(-EACCES);
383
384 prog = inode->i_private;
385
386 ret = security_bpf_prog(prog);
387 if (ret < 0)
388 return ERR_PTR(ret);
389
390 if (!bpf_prog_get_ok(prog, &type, false))
391 return ERR_PTR(-EINVAL);
392
393 return bpf_prog_inc(prog);
394}
395
396struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
397{
398 struct bpf_prog *prog;
399 struct path path;
400 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
401 if (ret)
402 return ERR_PTR(ret);
403 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
404 if (!IS_ERR(prog))
405 touch_atime(&path);
406 path_put(&path);
407 return prog;
408}
409EXPORT_SYMBOL(bpf_prog_get_type_path);
372 410
373static void bpf_evict_inode(struct inode *inode) 411static void bpf_evict_inode(struct inode *inode)
374{ 412{
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5ee2e41893d9..1712d319c2d8 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
591 591
592 write_lock_bh(&sock->sk_callback_lock); 592 write_lock_bh(&sock->sk_callback_lock);
593 psock = smap_psock_sk(sock); 593 psock = smap_psock_sk(sock);
594 smap_list_remove(psock, &stab->sock_map[i]); 594 /* This check handles a racing sock event that can get the
595 smap_release_sock(psock, sock); 595 * sk_callback_lock before this case but after xchg happens
596 * causing the refcnt to hit zero and sock user data (psock)
597 * to be null and queued for garbage collection.
598 */
599 if (likely(psock)) {
600 smap_list_remove(psock, &stab->sock_map[i]);
601 smap_release_sock(psock, sock);
602 }
596 write_unlock_bh(&sock->sk_callback_lock); 603 write_unlock_bh(&sock->sk_callback_lock);
597 } 604 }
598 rcu_read_unlock(); 605 rcu_read_unlock();
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2c4cfeaa8d5e..5cb783fc8224 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1057} 1057}
1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059 1059
1060static bool bpf_prog_get_ok(struct bpf_prog *prog, 1060bool bpf_prog_get_ok(struct bpf_prog *prog,
1061 enum bpf_prog_type *attach_type, bool attach_drv) 1061 enum bpf_prog_type *attach_type, bool attach_drv)
1062{ 1062{
1063 /* not an attachment, just a refcount inc, always allow */ 1063 /* not an attachment, just a refcount inc, always allow */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 04b24876cd23..b414d6b2d470 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1729,6 +1729,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1730 if (err) 1730 if (err)
1731 return err; 1731 return err;
1732 if (func_id == BPF_FUNC_tail_call) {
1733 if (meta.map_ptr == NULL) {
1734 verbose(env, "verifier bug\n");
1735 return -EINVAL;
1736 }
1737 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1738 }
1732 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1739 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1733 if (err) 1740 if (err)
1734 return err; 1741 return err;
@@ -4456,6 +4463,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4456 */ 4463 */
4457 insn->imm = 0; 4464 insn->imm = 0;
4458 insn->code = BPF_JMP | BPF_TAIL_CALL; 4465 insn->code = BPF_JMP | BPF_TAIL_CALL;
4466
4467 /* instead of changing every JIT dealing with tail_call
4468 * emit two extra insns:
4469 * if (index >= max_entries) goto out;
4470 * index &= array->index_mask;
4471 * to avoid out-of-bounds cpu speculation
4472 */
4473 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4474 if (map_ptr == BPF_MAP_PTR_POISON) {
4475 verbose(env, "tail_call obusing map_ptr\n");
4476 return -EINVAL;
4477 }
4478 if (!map_ptr->unpriv_array)
4479 continue;
4480 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4481 map_ptr->max_entries, 2);
4482 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4483 container_of(map_ptr,
4484 struct bpf_array,
4485 map)->index_mask);
4486 insn_buf[2] = *insn;
4487 cnt = 3;
4488 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4489 if (!new_prog)
4490 return -ENOMEM;
4491
4492 delta += cnt - 1;
4493 env->prog = prog = new_prog;
4494 insn = new_prog->insnsi + i + delta;
4459 continue; 4495 continue;
4460 } 4496 }
4461 4497
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 024085daab1a..a2c05d2476ac 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
123 */ 123 */
124 do { 124 do {
125 css_task_iter_start(&from->self, 0, &it); 125 css_task_iter_start(&from->self, 0, &it);
126 task = css_task_iter_next(&it); 126
127 do {
128 task = css_task_iter_next(&it);
129 } while (task && (task->flags & PF_EXITING));
130
127 if (task) 131 if (task)
128 get_task_struct(task); 132 get_task_struct(task);
129 css_task_iter_end(&it); 133 css_task_iter_end(&it);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0b1ffe147f24..2cf06c274e4c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1397 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1398 cft->name); 1398 cft->name);
1399 else 1399 else
1400 strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1400 strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1401 return buf; 1401 return buf;
1402} 1402}
1403 1403
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
1864 1864
1865 root->flags = opts->flags; 1865 root->flags = opts->flags;
1866 if (opts->release_agent) 1866 if (opts->release_agent)
1867 strcpy(root->release_agent_path, opts->release_agent); 1867 strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
1868 if (opts->name) 1868 if (opts->name)
1869 strcpy(root->name, opts->name); 1869 strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
1870 if (opts->cpuset_clone_children) 1870 if (opts->cpuset_clone_children)
1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1871 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1872} 1872}
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
4125 4125
4126static void css_task_iter_advance(struct css_task_iter *it) 4126static void css_task_iter_advance(struct css_task_iter *it)
4127{ 4127{
4128 struct list_head *l = it->task_pos; 4128 struct list_head *next;
4129 4129
4130 lockdep_assert_held(&css_set_lock); 4130 lockdep_assert_held(&css_set_lock);
4131 WARN_ON_ONCE(!l);
4132
4133repeat: 4131repeat:
4134 /* 4132 /*
4135 * Advance iterator to find next entry. cset->tasks is consumed 4133 * Advance iterator to find next entry. cset->tasks is consumed
4136 * first and then ->mg_tasks. After ->mg_tasks, we move onto the 4134 * first and then ->mg_tasks. After ->mg_tasks, we move onto the
4137 * next cset. 4135 * next cset.
4138 */ 4136 */
4139 l = l->next; 4137 next = it->task_pos->next;
4140 4138
4141 if (l == it->tasks_head) 4139 if (next == it->tasks_head)
4142 l = it->mg_tasks_head->next; 4140 next = it->mg_tasks_head->next;
4143 4141
4144 if (l == it->mg_tasks_head) 4142 if (next == it->mg_tasks_head)
4145 css_task_iter_advance_css_set(it); 4143 css_task_iter_advance_css_set(it);
4146 else 4144 else
4147 it->task_pos = l; 4145 it->task_pos = next;
4148 4146
4149 /* if PROCS, skip over tasks which aren't group leaders */ 4147 /* if PROCS, skip over tasks which aren't group leaders */
4150 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && 4148 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b3663896278e..4f63597c824d 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
410 VMCOREINFO_SYMBOL(contig_page_data); 410 VMCOREINFO_SYMBOL(contig_page_data);
411#endif 411#endif
412#ifdef CONFIG_SPARSEMEM 412#ifdef CONFIG_SPARSEMEM
413 VMCOREINFO_SYMBOL(mem_section); 413 VMCOREINFO_SYMBOL_ARRAY(mem_section);
414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); 414 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
415 VMCOREINFO_STRUCT_SIZE(mem_section); 415 VMCOREINFO_STRUCT_SIZE(mem_section);
416 VMCOREINFO_OFFSET(mem_section, section_mem_map); 416 VMCOREINFO_OFFSET(mem_section, section_mem_map);
diff --git a/kernel/exit.c b/kernel/exit.c
index df0c91d5606c..995453d9fb55 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1763,3 +1763,4 @@ __weak void abort(void)
1763 /* if that doesn't kill us, halt */ 1763 /* if that doesn't kill us, halt */
1764 panic("Oops failed to kill thread"); 1764 panic("Oops failed to kill thread");
1765} 1765}
1766EXPORT_SYMBOL(abort);
diff --git a/kernel/pid.c b/kernel/pid.c
index b13b624e2c49..1e8bb6550ec4 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -193,10 +193,8 @@ struct pid *alloc_pid(struct pid_namespace *ns)
193 } 193 }
194 194
195 if (unlikely(is_child_reaper(pid))) { 195 if (unlikely(is_child_reaper(pid))) {
196 if (pid_ns_prepare_proc(ns)) { 196 if (pid_ns_prepare_proc(ns))
197 disable_pid_allocation(ns);
198 goto out_free; 197 goto out_free;
199 }
200 } 198 }
201 199
202 get_pid_ns(ns); 200 get_pid_ns(ns);
@@ -226,6 +224,10 @@ out_free:
226 while (++i <= ns->level) 224 while (++i <= ns->level)
227 idr_remove(&ns->idr, (pid->numbers + i)->nr); 225 idr_remove(&ns->idr, (pid->numbers + i)->nr);
228 226
227 /* On failure to allocate the first pid, reset the state */
228 if (ns->pid_allocated == PIDNS_ADDING)
229 idr_set_cursor(&ns->idr, 0);
230
229 spin_unlock_irq(&pidmap_lock); 231 spin_unlock_irq(&pidmap_lock);
230 232
231 kmem_cache_free(ns->pid_cachep, pid); 233 kmem_cache_free(ns->pid_cachep, pid);
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 2ddaec40956f..0926aef10dad 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -34,11 +34,6 @@ void complete(struct completion *x)
34 34
35 spin_lock_irqsave(&x->wait.lock, flags); 35 spin_lock_irqsave(&x->wait.lock, flags);
36 36
37 /*
38 * Perform commit of crossrelease here.
39 */
40 complete_release_commit(x);
41
42 if (x->done != UINT_MAX) 37 if (x->done != UINT_MAX)
43 x->done++; 38 x->done++;
44 __wake_up_locked(&x->wait, TASK_NORMAL, 1); 39 __wake_up_locked(&x->wait, TASK_NORMAL, 1);
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index dd7908743dab..9bcbacba82a8 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
89 rcu_read_unlock(); 89 rcu_read_unlock();
90 } 90 }
91 if (!fallback) { 91 if (!fallback) {
92 preempt_disable();
92 smp_call_function_many(tmpmask, ipi_mb, NULL, 1); 93 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
94 preempt_enable();
93 free_cpumask_var(tmpmask); 95 free_cpumask_var(tmpmask);
94 } 96 }
95 cpus_read_unlock(); 97 cpus_read_unlock();
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 57fd45ab7af1..08c60d10747f 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 674#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
675/*
676 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
677 * code below, so we special case MIPS64r6 until the compiler can do better.
678 */
679#define umul_ppmm(w1, w0, u, v) \
680do { \
681 __asm__ ("dmulu %0,%1,%2" \
682 : "=d" ((UDItype)(w0)) \
683 : "d" ((UDItype)(u)), \
684 "d" ((UDItype)(v))); \
685 __asm__ ("dmuhu %0,%1,%2" \
686 : "=d" ((UDItype)(w1)) \
687 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \
689} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 691#define umul_ppmm(w1, w0, u, v) \
676do { \ 692do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 9e9748089270..f369889e521d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
6250 return NULL; 6250 return NULL;
6251 } 6251 }
6252 } 6252 }
6253 /* We don't expect to fail. */
6254 if (*err) { 6253 if (*err) {
6255 pr_cont("FAIL to attach err=%d len=%d\n", 6254 pr_cont("FAIL to prog_create err=%d len=%d\n",
6256 *err, fprog.len); 6255 *err, fprog.len);
6257 return NULL; 6256 return NULL;
6258 } 6257 }
@@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
6276 * checks. 6275 * checks.
6277 */ 6276 */
6278 fp = bpf_prog_select_runtime(fp, err); 6277 fp = bpf_prog_select_runtime(fp, err);
6278 if (*err) {
6279 pr_cont("FAIL to select_runtime err=%d\n", *err);
6280 return NULL;
6281 }
6279 break; 6282 break;
6280 } 6283 }
6281 6284
@@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
6461 pass_cnt++; 6464 pass_cnt++;
6462 continue; 6465 continue;
6463 } 6466 }
6464 6467 err_cnt++;
6465 return err; 6468 continue;
6466 } 6469 }
6467 6470
6468 pr_cont("jited:%u ", fp->jited); 6471 pr_cont("jited:%u ", fp->jited);
diff --git a/mm/debug.c b/mm/debug.c
index d947f3e03b0d..56e2d9125ea5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
50 */ 50 */
51 int mapcount = PageSlab(page) ? 0 : page_mapcount(page); 51 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
52 52
53 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", 53 pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
54 page, page_ref_count(page), mapcount, 54 page, page_ref_count(page), mapcount,
55 page->mapping, page_to_pgoff(page)); 55 page->mapping, page_to_pgoff(page));
56 if (PageCompound(page)) 56 if (PageCompound(page))
@@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
69 69
70#ifdef CONFIG_MEMCG 70#ifdef CONFIG_MEMCG
71 if (page->mem_cgroup) 71 if (page->mem_cgroup)
72 pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); 72 pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
73#endif 73#endif
74} 74}
75 75
@@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
84 84
85void dump_vma(const struct vm_area_struct *vma) 85void dump_vma(const struct vm_area_struct *vma)
86{ 86{
87 pr_emerg("vma %p start %p end %p\n" 87 pr_emerg("vma %px start %px end %px\n"
88 "next %p prev %p mm %p\n" 88 "next %px prev %px mm %px\n"
89 "prot %lx anon_vma %p vm_ops %p\n" 89 "prot %lx anon_vma %px vm_ops %px\n"
90 "pgoff %lx file %p private_data %p\n" 90 "pgoff %lx file %px private_data %px\n"
91 "flags: %#lx(%pGv)\n", 91 "flags: %#lx(%pGv)\n",
92 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, 92 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
93 vma->vm_prev, vma->vm_mm, 93 vma->vm_prev, vma->vm_mm,
@@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
100 100
101void dump_mm(const struct mm_struct *mm) 101void dump_mm(const struct mm_struct *mm)
102{ 102{
103 pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" 103 pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
104#ifdef CONFIG_MMU 104#ifdef CONFIG_MMU
105 "get_unmapped_area %p\n" 105 "get_unmapped_area %px\n"
106#endif 106#endif
107 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" 107 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
108 "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 108 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 109 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" 110 "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
111 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 111 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
112 "start_brk %lx brk %lx start_stack %lx\n" 112 "start_brk %lx brk %lx start_stack %lx\n"
113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 113 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
114 "binfmt %p flags %lx core_state %p\n" 114 "binfmt %px flags %lx core_state %px\n"
115#ifdef CONFIG_AIO 115#ifdef CONFIG_AIO
116 "ioctx_table %p\n" 116 "ioctx_table %px\n"
117#endif 117#endif
118#ifdef CONFIG_MEMCG 118#ifdef CONFIG_MEMCG
119 "owner %p " 119 "owner %px "
120#endif 120#endif
121 "exe_file %p\n" 121 "exe_file %px\n"
122#ifdef CONFIG_MMU_NOTIFIER 122#ifdef CONFIG_MMU_NOTIFIER
123 "mmu_notifier_mm %p\n" 123 "mmu_notifier_mm %px\n"
124#endif 124#endif
125#ifdef CONFIG_NUMA_BALANCING 125#ifdef CONFIG_NUMA_BALANCING
126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 126 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d73c14294f3a..f656ca27f6c2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -127,7 +127,7 @@
127/* GFP bitmask for kmemleak internal allocations */ 127/* GFP bitmask for kmemleak internal allocations */
128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 __GFP_NOWARN) 130 __GFP_NOWARN | __GFP_NOFAIL)
131 131
132/* scanning area inside a memory block */ 132/* scanning area inside a memory block */
133struct kmemleak_scan_area { 133struct kmemleak_scan_area {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ec39f730a0bf..58b629bb70de 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
166 next = pmd_addr_end(addr, end); 166 next = pmd_addr_end(addr, end);
167 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) 167 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
168 && pmd_none_or_clear_bad(pmd)) 168 && pmd_none_or_clear_bad(pmd))
169 continue; 169 goto next;
170 170
171 /* invoke the mmu notifier if the pmd is populated */ 171 /* invoke the mmu notifier if the pmd is populated */
172 if (!mni_start) { 172 if (!mni_start) {
@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
188 } 188 }
189 189
190 /* huge pmd was handled */ 190 /* huge pmd was handled */
191 continue; 191 goto next;
192 } 192 }
193 } 193 }
194 /* fall through, the trans huge pmd just split */ 194 /* fall through, the trans huge pmd just split */
@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
196 this_pages = change_pte_range(vma, pmd, addr, next, newprot, 196 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
197 dirty_accountable, prot_numa); 197 dirty_accountable, prot_numa);
198 pages += this_pages; 198 pages += this_pages;
199next:
200 cond_resched();
199 } while (pmd++, addr = next, addr != end); 201 } while (pmd++, addr = next, addr != end);
200 202
201 if (mni_start) 203 if (mni_start)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1748dd4a4b1b..2bb7f163baca 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
6260 pgcnt = 0; 6260 pgcnt = 0;
6261 for_each_resv_unavail_range(i, &start, &end) { 6261 for_each_resv_unavail_range(i, &start, &end) {
6262 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { 6262 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6263 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
6264 continue;
6263 mm_zero_struct_page(pfn_to_page(pfn)); 6265 mm_zero_struct_page(pfn_to_page(pfn));
6264 pgcnt++; 6266 pgcnt++;
6265 } 6267 }
diff --git a/mm/sparse.c b/mm/sparse.c
index 06130c13dc99..2583174b1d62 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
211 if (unlikely(!mem_section)) { 211 if (unlikely(!mem_section)) {
212 unsigned long size, align; 212 unsigned long size, align;
213 213
214 size = sizeof(struct mem_section) * NR_SECTION_ROOTS; 214 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
215 align = 1 << (INTERNODE_CACHE_SHIFT); 215 align = 1 << (INTERNODE_CACHE_SHIFT);
216 mem_section = memblock_virt_alloc(size, align); 216 mem_section = memblock_virt_alloc(size, align);
217 } 217 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c02c850ea349..47d5ced51f2d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
297 */ 297 */
298void unregister_shrinker(struct shrinker *shrinker) 298void unregister_shrinker(struct shrinker *shrinker)
299{ 299{
300 if (!shrinker->nr_deferred)
301 return;
300 down_write(&shrinker_rwsem); 302 down_write(&shrinker_rwsem);
301 list_del(&shrinker->list); 303 list_del(&shrinker->list);
302 up_write(&shrinker_rwsem); 304 up_write(&shrinker_rwsem);
303 kfree(shrinker->nr_deferred); 305 kfree(shrinker->nr_deferred);
306 shrinker->nr_deferred = NULL;
304} 307}
305EXPORT_SYMBOL(unregister_shrinker); 308EXPORT_SYMBOL(unregister_shrinker);
306 309
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 685049a9048d..683c0651098c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
53#include <linux/mount.h> 53#include <linux/mount.h>
54#include <linux/migrate.h> 54#include <linux/migrate.h>
55#include <linux/pagemap.h> 55#include <linux/pagemap.h>
56#include <linux/fs.h>
56 57
57#define ZSPAGE_MAGIC 0x58 58#define ZSPAGE_MAGIC 0x58
58 59
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8dfdd94e430f..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
111 vlan_gvrp_uninit_applicant(real_dev); 111 vlan_gvrp_uninit_applicant(real_dev);
112 } 112 }
113 113
114 /* Take it out of our own structures, but be sure to interlock with 114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115 * HW accelerating devices or SW vlan input packet processing if
116 * VLAN is not 0 (leave it there for 802.1p).
117 */
118 if (vlan_id)
119 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
120 115
121 /* Get rid of the vlan's reference to real_dev */ 116 /* Get rid of the vlan's reference to real_dev */
122 dev_put(real_dev); 117 dev_put(real_dev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 43ba91c440bc..fc6615d59165 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3363 break; 3363 break;
3364 3364
3365 case L2CAP_CONF_EFS: 3365 case L2CAP_CONF_EFS:
3366 remote_efs = 1; 3366 if (olen == sizeof(efs)) {
3367 if (olen == sizeof(efs)) 3367 remote_efs = 1;
3368 memcpy(&efs, (void *) val, olen); 3368 memcpy(&efs, (void *) val, olen);
3369 }
3369 break; 3370 break;
3370 3371
3371 case L2CAP_CONF_EWS: 3372 case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3584 break; 3585 break;
3585 3586
3586 case L2CAP_CONF_EFS: 3587 case L2CAP_CONF_EFS:
3587 if (olen == sizeof(efs)) 3588 if (olen == sizeof(efs)) {
3588 memcpy(&efs, (void *)val, olen); 3589 memcpy(&efs, (void *)val, olen);
3589 3590
3590 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3591 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3591 efs.stype != L2CAP_SERV_NOTRAFIC && 3592 efs.stype != L2CAP_SERV_NOTRAFIC &&
3592 efs.stype != chan->local_stype) 3593 efs.stype != chan->local_stype)
3593 return -ECONNREFUSED; 3594 return -ECONNREFUSED;
3594 3595
3595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), 3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3596 (unsigned long) &efs, endptr - ptr); 3597 (unsigned long) &efs, endptr - ptr);
3598 }
3597 break; 3599 break;
3598 3600
3599 case L2CAP_CONF_FCS: 3601 case L2CAP_CONF_FCS:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 2d38b6e34203..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
334 mutex_lock(&caifdevs->lock); 334 mutex_lock(&caifdevs->lock);
335 list_add_rcu(&caifd->list, &caifdevs->list); 335 list_add_rcu(&caifd->list, &caifdevs->list);
336 336
337 strncpy(caifd->layer.name, dev->name, 337 strlcpy(caifd->layer.name, dev->name,
338 sizeof(caifd->layer.name) - 1); 338 sizeof(caifd->layer.name));
339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340 caifd->layer.transmit = transmit; 339 caifd->layer.transmit = transmit;
341 cfcnfg_add_phy_layer(cfg, 340 cfcnfg_add_phy_layer(cfg,
342 dev, 341 dev,
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 5cd44f001f64..1a082a946045 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
176 dev_add_pack(&caif_usb_type); 176 dev_add_pack(&caif_usb_type);
177 pack_added = true; 177 pack_added = true;
178 178
179 strncpy(layer->name, dev->name, 179 strlcpy(layer->name, dev->name, sizeof(layer->name));
180 sizeof(layer->name) - 1);
181 layer->name[sizeof(layer->name) - 1] = 0;
182 180
183 return 0; 181 return 0;
184} 182}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 273cb07f57d8..8f00bea093b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
268 case CAIFPROTO_RFM: 268 case CAIFPROTO_RFM:
269 l->linktype = CFCTRL_SRV_RFM; 269 l->linktype = CFCTRL_SRV_RFM;
270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; 270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
271 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, 271 strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
272 sizeof(l->u.rfm.volume)-1); 272 sizeof(l->u.rfm.volume));
273 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
274 break; 273 break;
275 case CAIFPROTO_UTIL: 274 case CAIFPROTO_UTIL:
276 l->linktype = CFCTRL_SRV_UTIL; 275 l->linktype = CFCTRL_SRV_UTIL;
277 l->endpoint = 0x00; 276 l->endpoint = 0x00;
278 l->chtype = 0x00; 277 l->chtype = 0x00;
279 strncpy(l->u.utility.name, s->sockaddr.u.util.service, 278 strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
280 sizeof(l->u.utility.name)-1); 279 sizeof(l->u.utility.name));
281 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
282 caif_assert(sizeof(l->u.utility.name) > 10); 280 caif_assert(sizeof(l->u.utility.name) > 10);
283 l->u.utility.paramlen = s->param.size; 281 l->u.utility.paramlen = s->param.size;
284 if (l->u.utility.paramlen > sizeof(l->u.utility.params)) 282 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..655ed7032150 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); 258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
259 cfpkt_add_body(pkt, &tmp16, 2); 259 cfpkt_add_body(pkt, &tmp16, 2);
260 memset(utility_name, 0, sizeof(utility_name)); 260 memset(utility_name, 0, sizeof(utility_name));
261 strncpy(utility_name, param->u.utility.name, 261 strlcpy(utility_name, param->u.utility.name,
262 UTILITY_NAME_LENGTH - 1); 262 UTILITY_NAME_LENGTH);
263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); 263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
264 tmp8 = param->u.utility.paramlen; 264 tmp8 = param->u.utility.paramlen;
265 cfpkt_add_body(pkt, &tmp8, 1); 265 cfpkt_add_body(pkt, &tmp8, 1);
diff --git a/net/core/dev.c b/net/core/dev.c
index 01ee854454a8..0e0ba36eeac9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
1146int dev_get_valid_name(struct net *net, struct net_device *dev, 1146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name) 1147 const char *name)
1148{ 1148{
1149 return dev_alloc_name_ns(net, dev, name); 1149 BUG_ON(!net);
1150
1151 if (!dev_valid_name(name))
1152 return -EINVAL;
1153
1154 if (strchr(name, '%'))
1155 return dev_alloc_name_ns(net, dev, name);
1156 else if (__dev_get_by_name(net, name))
1157 return -EEXIST;
1158 else if (dev->name != name)
1159 strlcpy(dev->name, name, IFNAMSIZ);
1160
1161 return 0;
1150} 1162}
1151EXPORT_SYMBOL(dev_get_valid_name); 1163EXPORT_SYMBOL(dev_get_valid_name);
1152 1164
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f8fcf450a36e..8225416911ae 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); 770 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
771} 771}
772 772
773static void
774warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
775{
776 char name[sizeof(current->comm)];
777
778 pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
779 get_task_comm(name, current), details);
780}
781
782/* Query device for its ethtool_cmd settings. 773/* Query device for its ethtool_cmd settings.
783 * 774 *
784 * Backward compatibility note: for compatibility with legacy ethtool, 775 * Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
805 &link_ksettings); 796 &link_ksettings);
806 if (err < 0) 797 if (err < 0)
807 return err; 798 return err;
808 if (!convert_link_ksettings_to_legacy_settings(&cmd, 799 convert_link_ksettings_to_legacy_settings(&cmd,
809 &link_ksettings)) 800 &link_ksettings);
810 warn_incomplete_ethtool_legacy_settings_conversion(
811 "link modes are only partially reported");
812 801
813 /* send a sensible cmd tag back to user */ 802 /* send a sensible cmd tag back to user */
814 cmd.cmd = ETHTOOL_GSET; 803 cmd.cmd = ETHTOOL_GSET;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6a85e67fafce..d339ef170df6 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1054,11 +1054,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1054 */ 1054 */
1055 goto out_err_free; 1055 goto out_err_free;
1056 1056
1057 /* We are guaranteed to never error here with cBPF to eBPF
1058 * transitions, since there's no issue with type compatibility
1059 * checks on program arrays.
1060 */
1061 fp = bpf_prog_select_runtime(fp, &err); 1057 fp = bpf_prog_select_runtime(fp, &err);
1058 if (err)
1059 goto out_err_free;
1062 1060
1063 kfree(old_prog); 1061 kfree(old_prog);
1064 return fp; 1062 return fp;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dabba2a91fc8..778d7f03404a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
1681 return false; 1681 return false;
1682} 1682}
1683 1683
1684static struct net *get_target_net(struct sk_buff *skb, int netnsid) 1684static struct net *get_target_net(struct sock *sk, int netnsid)
1685{ 1685{
1686 struct net *net; 1686 struct net *net;
1687 1687
1688 net = get_net_ns_by_id(sock_net(skb->sk), netnsid); 1688 net = get_net_ns_by_id(sock_net(sk), netnsid);
1689 if (!net) 1689 if (!net)
1690 return ERR_PTR(-EINVAL); 1690 return ERR_PTR(-EINVAL);
1691 1691
1692 /* For now, the caller is required to have CAP_NET_ADMIN in 1692 /* For now, the caller is required to have CAP_NET_ADMIN in
1693 * the user namespace owning the target net ns. 1693 * the user namespace owning the target net ns.
1694 */ 1694 */
1695 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1695 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
1696 put_net(net); 1696 put_net(net);
1697 return ERR_PTR(-EACCES); 1697 return ERR_PTR(-EACCES);
1698 } 1698 }
@@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1733 ifla_policy, NULL) >= 0) { 1733 ifla_policy, NULL) >= 0) {
1734 if (tb[IFLA_IF_NETNSID]) { 1734 if (tb[IFLA_IF_NETNSID]) {
1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1735 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1736 tgt_net = get_target_net(skb, netnsid); 1736 tgt_net = get_target_net(skb->sk, netnsid);
1737 if (IS_ERR(tgt_net)) { 1737 if (IS_ERR(tgt_net)) {
1738 tgt_net = net; 1738 tgt_net = net;
1739 netnsid = -1; 1739 netnsid = -1;
@@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2883 2883
2884 if (tb[IFLA_IF_NETNSID]) { 2884 if (tb[IFLA_IF_NETNSID]) {
2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 2885 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
2886 tgt_net = get_target_net(skb, netnsid); 2886 tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
2887 if (IS_ERR(tgt_net)) 2887 if (IS_ERR(tgt_net))
2888 return PTR_ERR(tgt_net); 2888 return PTR_ERR(tgt_net);
2889 } 2889 }
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 217f4e3b82f6..146b50e30659 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
288 case SKNLGRP_INET6_UDP_DESTROY: 288 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 289 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
291 NETLINK_SOCK_DIAG, AF_INET); 291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 292 break;
293 } 293 }
294 return 0; 294 return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cbc3dde4cfcc..a47ad6cd41c0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
325 .data = &bpf_jit_enable, 325 .data = &bpf_jit_enable,
326 .maxlen = sizeof(int), 326 .maxlen = sizeof(int),
327 .mode = 0644, 327 .mode = 0644,
328#ifndef CONFIG_BPF_JIT_ALWAYS_ON
328 .proc_handler = proc_dointvec 329 .proc_handler = proc_dointvec
330#else
331 .proc_handler = proc_dointvec_minmax,
332 .extra1 = &one,
333 .extra2 = &one,
334#endif
329 }, 335 },
330# ifdef CONFIG_HAVE_EBPF_JIT 336# ifdef CONFIG_HAVE_EBPF_JIT
331 { 337 {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 125c1eab3eaa..5e570aa9e43b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
520 goto out; 520 goto out;
521 521
522 /* hdrincl should be READ_ONCE(inet->hdrincl) 522 /* hdrincl should be READ_ONCE(inet->hdrincl)
523 * but READ_ONCE() doesn't work with bit fields 523 * but READ_ONCE() doesn't work with bit fields.
524 * Doing this indirectly yields the same result.
524 */ 525 */
525 hdrincl = inet->hdrincl; 526 hdrincl = inet->hdrincl;
527 hdrincl = READ_ONCE(hdrincl);
526 /* 528 /*
527 * Check the flags. 529 * Check the flags.
528 */ 530 */
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 83bd75713535..bc68eb661970 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
925 sr_phdr->segments[0] = **addr_p; 925 sr_phdr->segments[0] = **addr_p;
926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; 926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
927 927
928 if (sr_ihdr->hdrlen > hops * 2) {
929 int tlvs_offset, tlvs_length;
930
931 tlvs_offset = (1 + hops * 2) << 3;
932 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
933 memcpy((char *)sr_phdr + tlvs_offset,
934 (char *)sr_ihdr + tlvs_offset, tlvs_length);
935 }
936
928#ifdef CONFIG_IPV6_SEG6_HMAC 937#ifdef CONFIG_IPV6_SEG6_HMAC
929 if (sr_has_hmac(sr_phdr)) { 938 if (sr_has_hmac(sr_phdr)) {
930 struct net *net = NULL; 939 struct net *net = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f5285f4e1d08..9dcc3924a975 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
640 if (!(fn->fn_flags & RTN_RTINFO)) { 640 if (!(fn->fn_flags & RTN_RTINFO)) {
641 RCU_INIT_POINTER(fn->leaf, NULL); 641 RCU_INIT_POINTER(fn->leaf, NULL);
642 rt6_release(leaf); 642 rt6_release(leaf);
643 /* remove null_entry in the root node */
644 } else if (fn->fn_flags & RTN_TL_ROOT &&
645 rcu_access_pointer(fn->leaf) ==
646 net->ipv6.ip6_null_entry) {
647 RCU_INIT_POINTER(fn->leaf, NULL);
643 } 648 }
644 649
645 return fn; 650 return fn;
@@ -1241,23 +1246,28 @@ out:
1241 * If fib6_add_1 has cleared the old leaf pointer in the 1246 * If fib6_add_1 has cleared the old leaf pointer in the
1242 * super-tree leaf node we have to find a new one for it. 1247 * super-tree leaf node we have to find a new one for it.
1243 */ 1248 */
1244 struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf, 1249 if (pn != fn) {
1245 lockdep_is_held(&table->tb6_lock)); 1250 struct rt6_info *pn_leaf =
1246 if (pn != fn && pn_leaf == rt) { 1251 rcu_dereference_protected(pn->leaf,
1247 pn_leaf = NULL; 1252 lockdep_is_held(&table->tb6_lock));
1248 RCU_INIT_POINTER(pn->leaf, NULL); 1253 if (pn_leaf == rt) {
1249 atomic_dec(&rt->rt6i_ref); 1254 pn_leaf = NULL;
1250 } 1255 RCU_INIT_POINTER(pn->leaf, NULL);
1251 if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { 1256 atomic_dec(&rt->rt6i_ref);
1252 pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
1253#if RT6_DEBUG >= 2
1254 if (!pn_leaf) {
1255 WARN_ON(!pn_leaf);
1256 pn_leaf = info->nl_net->ipv6.ip6_null_entry;
1257 } 1257 }
1258 if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
1259 pn_leaf = fib6_find_prefix(info->nl_net, table,
1260 pn);
1261#if RT6_DEBUG >= 2
1262 if (!pn_leaf) {
1263 WARN_ON(!pn_leaf);
1264 pn_leaf =
1265 info->nl_net->ipv6.ip6_null_entry;
1266 }
1258#endif 1267#endif
1259 atomic_inc(&pn_leaf->rt6i_ref); 1268 atomic_inc(&pn_leaf->rt6i_ref);
1260 rcu_assign_pointer(pn->leaf, pn_leaf); 1269 rcu_assign_pointer(pn->leaf, pn_leaf);
1270 }
1261 } 1271 }
1262#endif 1272#endif
1263 goto failure; 1273 goto failure;
@@ -1265,13 +1275,17 @@ out:
1265 return err; 1275 return err;
1266 1276
1267failure: 1277failure:
1268 /* fn->leaf could be NULL if fn is an intermediate node and we 1278 /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
1269 * failed to add the new route to it in both subtree creation 1279 * 1. fn is an intermediate node and we failed to add the new
1270 * failure and fib6_add_rt2node() failure case. 1280 * route to it in both subtree creation failure and fib6_add_rt2node()
1271 * In both cases, fib6_repair_tree() should be called to fix 1281 * failure case.
1272 * fn->leaf. 1282 * 2. fn is the root node in the table and we fail to add the first
1283 * default route to it.
1273 */ 1284 */
1274 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1285 if (fn &&
1286 (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
1287 (fn->fn_flags & RTN_TL_ROOT &&
1288 !rcu_access_pointer(fn->leaf))))
1275 fib6_repair_tree(info->nl_net, table, fn); 1289 fib6_repair_tree(info->nl_net, table, fn);
1276 /* Always release dst as dst->__refcnt is guaranteed 1290 /* Always release dst as dst->__refcnt is guaranteed
1277 * to be taken before entering this function 1291 * to be taken before entering this function
@@ -1526,6 +1540,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1526 struct fib6_walker *w; 1540 struct fib6_walker *w;
1527 int iter = 0; 1541 int iter = 0;
1528 1542
1543 /* Set fn->leaf to null_entry for root node. */
1544 if (fn->fn_flags & RTN_TL_ROOT) {
1545 rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
1546 return fn;
1547 }
1548
1529 for (;;) { 1549 for (;;) {
1530 struct fib6_node *fn_r = rcu_dereference_protected(fn->right, 1550 struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
1531 lockdep_is_held(&table->tb6_lock)); 1551 lockdep_is_held(&table->tb6_lock));
@@ -1680,10 +1700,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1680 } 1700 }
1681 read_unlock(&net->ipv6.fib6_walker_lock); 1701 read_unlock(&net->ipv6.fib6_walker_lock);
1682 1702
1683 /* If it was last route, expunge its radix tree node */ 1703 /* If it was last route, call fib6_repair_tree() to:
1704 * 1. For root node, put back null_entry as how the table was created.
1705 * 2. For other nodes, expunge its radix tree node.
1706 */
1684 if (!rcu_access_pointer(fn->leaf)) { 1707 if (!rcu_access_pointer(fn->leaf)) {
1685 fn->fn_flags &= ~RTN_RTINFO; 1708 if (!(fn->fn_flags & RTN_TL_ROOT)) {
1686 net->ipv6.rt6_stats->fib_route_nodes--; 1709 fn->fn_flags &= ~RTN_RTINFO;
1710 net->ipv6.rt6_stats->fib_route_nodes--;
1711 }
1687 fn = fib6_repair_tree(net, table, fn); 1712 fn = fib6_repair_tree(net, table, fn);
1688 } 1713 }
1689 1714
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f7dd51c42314..688ba5f7516b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1735,9 +1735,10 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1735 cork.base.opt = NULL; 1735 cork.base.opt = NULL;
1736 v6_cork.opt = NULL; 1736 v6_cork.opt = NULL;
1737 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); 1737 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1738 if (err) 1738 if (err) {
1739 ip6_cork_release(&cork, &v6_cork);
1739 return ERR_PTR(err); 1740 return ERR_PTR(err);
1740 1741 }
1741 if (ipc6->dontfrag < 0) 1742 if (ipc6->dontfrag < 0)
1742 ipc6->dontfrag = inet6_sk(sk)->dontfrag; 1743 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1743 1744
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 931c38f6ff4a..9a7cf355bc8c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1074,10 +1074,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1074 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1075 neigh_release(neigh); 1075 neigh_release(neigh);
1076 } 1076 }
1077 } else if (!(t->parms.flags & 1077 } else if (t->parms.proto != 0 && !(t->parms.flags &
1078 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1078 (IP6_TNL_F_USE_ORIG_TCLASS |
1079 /* enable the cache only only if the routing decision does 1079 IP6_TNL_F_USE_ORIG_FWMARK))) {
1080 * not depend on the current inner header value 1080 /* enable the cache only if neither the outer protocol nor the
1081 * routing decision depends on the current inner header value
1081 */ 1082 */
1082 use_cache = true; 1083 use_cache = true;
1083 } 1084 }
@@ -1676,11 +1677,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1676{ 1677{
1677 struct ip6_tnl *tnl = netdev_priv(dev); 1678 struct ip6_tnl *tnl = netdev_priv(dev);
1678 1679
1679 if (tnl->parms.proto == IPPROTO_IPIP) { 1680 if (tnl->parms.proto == IPPROTO_IPV6) {
1680 if (new_mtu < ETH_MIN_MTU) 1681 if (new_mtu < IPV6_MIN_MTU)
1681 return -EINVAL; 1682 return -EINVAL;
1682 } else { 1683 } else {
1683 if (new_mtu < IPV6_MIN_MTU) 1684 if (new_mtu < ETH_MIN_MTU)
1684 return -EINVAL; 1685 return -EINVAL;
1685 } 1686 }
1686 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1687 if (new_mtu > 0xFFF8 - dev->hard_header_len)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 70e9d2ca8bbe..4daafb07602f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3632 } 3632 }
3633 return true; 3633 return true;
3634 case NL80211_IFTYPE_MESH_POINT: 3634 case NL80211_IFTYPE_MESH_POINT:
3635 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3636 return false;
3635 if (multicast) 3637 if (multicast)
3636 return true; 3638 return true;
3637 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3639 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 10798b357481..07bd4138c84e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2072 continue; 2072 continue;
2073 2073
2074 list_for_each_entry_rcu(chain, &table->chains, list) { 2074 list_for_each_entry_rcu(chain, &table->chains, list) {
2075 if (ctx && ctx->chain[0] && 2075 if (ctx && ctx->chain &&
2076 strcmp(ctx->chain, chain->name) != 0) 2076 strcmp(ctx->chain, chain->name) != 0)
2077 continue; 2077 continue;
2078 2078
@@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
4665{ 4665{
4666 struct nft_obj_filter *filter = cb->data; 4666 struct nft_obj_filter *filter = cb->data;
4667 4667
4668 kfree(filter->table); 4668 if (filter) {
4669 kfree(filter); 4669 kfree(filter->table);
4670 kfree(filter);
4671 }
4670 4672
4671 return 0; 4673 return 0;
4672} 4674}
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 1f7fbd3c7e5a..06b090d8e901 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
55 55
56static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) 56static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
57{ 57{
58 mm_segment_t oldfs = get_fs();
59 int retval, fd;
60
61 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) 58 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
62 return -EINVAL; 59 return -EINVAL;
63 60
64 set_fs(KERNEL_DS); 61 *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
65 fd = bpf_obj_get_user(path, 0); 62 return PTR_ERR_OR_ZERO(*ret);
66 set_fs(oldfs);
67 if (fd < 0)
68 return fd;
69
70 retval = __bpf_mt_check_fd(fd, ret);
71 sys_close(fd);
72 return retval;
73} 63}
74 64
75static int bpf_mt_check(const struct xt_mtchk_param *par) 65static int bpf_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index bc2f1e0977d6..634cfcb7bba6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
525 525
526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; 526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
527 527
528 if (args->nr_local == 0)
529 return -EINVAL;
530
528 /* figure out the number of pages in the vector */ 531 /* figure out the number of pages in the vector */
529 for (i = 0; i < args->nr_local; i++) { 532 for (i = 0; i < args->nr_local; i++) {
530 if (copy_from_user(&vec, &local_vec[i], 533 if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
874err: 877err:
875 if (page) 878 if (page)
876 put_page(page); 879 put_page(page);
880 rm->atomic.op_active = 0;
877 kfree(rm->atomic.op_notifier); 881 kfree(rm->atomic.op_notifier);
878 882
879 return ret; 883 return ret;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e29a48ef7fc3..a0ac42b3ed06 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
159 if (action == TC_ACT_SHOT) 159 if (action == TC_ACT_SHOT)
160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; 160 this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
161 161
162 tm->lastuse = lastuse; 162 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
163} 163}
164 164
165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, 165static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8b3e59388480..08b61849c2a2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
239 struct tcf_t *tm = &m->tcf_tm; 239 struct tcf_t *tm = &m->tcf_tm;
240 240
241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 241 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
242 tm->lastuse = lastuse; 242 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
243} 243}
244 244
245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 245static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 621b5ca3fd1c..141c9c466ec1 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
399 return; 399 return;
400 } 400 }
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (!(t->param_flags & SPP_PMTUD_ENABLE))
403 /* Update transports view of the MTU */ 403 /* We can't allow retransmitting in such case, as the
404 sctp_transport_update_pmtu(t, pmtu); 404 * retransmission would be sized just as before, and thus we
405 405 * would get another icmp, and retransmit again.
406 /* Update association pmtu. */ 406 */
407 sctp_assoc_sync_pmtu(asoc); 407 return;
408 }
409 408
410 /* Retransmit with the new pmtu setting. 409 /* Update transports view of the MTU. Return if no update was needed.
411 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation 410 * If an update wasn't needed/possible, it also doesn't make sense to
412 * Needed will never be sent, but if a message was sent before 411 * try to retransmit now.
413 * PMTU discovery was disabled that was larger than the PMTU, it
414 * would not be fragmented, so it must be re-transmitted fragmented.
415 */ 412 */
413 if (!sctp_transport_update_pmtu(t, pmtu))
414 return;
415
416 /* Update association pmtu. */
417 sctp_assoc_sync_pmtu(asoc);
418
419 /* Retransmit with the new pmtu setting. */
416 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); 420 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
417} 421}
418 422
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b4fb6e4886d2..9b01e994f661 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2277,7 +2277,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2277 2277
2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2279 event = sctp_ulpevent_make_sender_dry_event(asoc, 2279 event = sctp_ulpevent_make_sender_dry_event(asoc,
2280 GFP_ATOMIC); 2280 GFP_USER | __GFP_NOWARN);
2281 if (!event) 2281 if (!event)
2282 return -ENOMEM; 2282 return -ENOMEM;
2283 2283
@@ -3498,6 +3498,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3498 3498
3499 if (optlen < sizeof(struct sctp_hmacalgo)) 3499 if (optlen < sizeof(struct sctp_hmacalgo))
3500 return -EINVAL; 3500 return -EINVAL;
3501 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3502 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3501 3503
3502 hmacs = memdup_user(optval, optlen); 3504 hmacs = memdup_user(optval, optlen);
3503 if (IS_ERR(hmacs)) 3505 if (IS_ERR(hmacs))
@@ -3536,6 +3538,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3536 3538
3537 if (optlen <= sizeof(struct sctp_authkey)) 3539 if (optlen <= sizeof(struct sctp_authkey))
3538 return -EINVAL; 3540 return -EINVAL;
3541 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3542 * this.
3543 */
3544 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3545 sizeof(struct sctp_authkey));
3539 3546
3540 authkey = memdup_user(optval, optlen); 3547 authkey = memdup_user(optval, optlen);
3541 if (IS_ERR(authkey)) 3548 if (IS_ERR(authkey))
@@ -3893,6 +3900,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
3893 3900
3894 if (optlen < sizeof(*params)) 3901 if (optlen < sizeof(*params))
3895 return -EINVAL; 3902 return -EINVAL;
3903 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3904 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3905 sizeof(__u16) * sizeof(*params));
3896 3906
3897 params = memdup_user(optval, optlen); 3907 params = memdup_user(optval, optlen);
3898 if (IS_ERR(params)) 3908 if (IS_ERR(params))
@@ -5015,7 +5025,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
5015 len = sizeof(int); 5025 len = sizeof(int);
5016 if (put_user(len, optlen)) 5026 if (put_user(len, optlen))
5017 return -EFAULT; 5027 return -EFAULT;
5018 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 5028 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
5019 return -EFAULT; 5029 return -EFAULT;
5020 return 0; 5030 return 0;
5021} 5031}
@@ -5645,6 +5655,9 @@ copy_getaddrs:
5645 err = -EFAULT; 5655 err = -EFAULT;
5646 goto out; 5656 goto out;
5647 } 5657 }
5658 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5659 * but we can't change it anymore.
5660 */
5648 if (put_user(bytes_copied, optlen)) 5661 if (put_user(bytes_copied, optlen))
5649 err = -EFAULT; 5662 err = -EFAULT;
5650out: 5663out:
@@ -6081,7 +6094,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6081 params.assoc_id = 0; 6094 params.assoc_id = 0;
6082 } else if (len >= sizeof(struct sctp_assoc_value)) { 6095 } else if (len >= sizeof(struct sctp_assoc_value)) {
6083 len = sizeof(struct sctp_assoc_value); 6096 len = sizeof(struct sctp_assoc_value);
6084 if (copy_from_user(&params, optval, sizeof(params))) 6097 if (copy_from_user(&params, optval, len))
6085 return -EFAULT; 6098 return -EFAULT;
6086 } else 6099 } else
6087 return -EINVAL; 6100 return -EINVAL;
@@ -6251,7 +6264,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6251 6264
6252 if (len < sizeof(struct sctp_authkeyid)) 6265 if (len < sizeof(struct sctp_authkeyid))
6253 return -EINVAL; 6266 return -EINVAL;
6254 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6267
6268 len = sizeof(struct sctp_authkeyid);
6269 if (copy_from_user(&val, optval, len))
6255 return -EFAULT; 6270 return -EFAULT;
6256 6271
6257 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6272 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6263,7 +6278,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6263 else 6278 else
6264 val.scact_keynumber = ep->active_key_id; 6279 val.scact_keynumber = ep->active_key_id;
6265 6280
6266 len = sizeof(struct sctp_authkeyid);
6267 if (put_user(len, optlen)) 6281 if (put_user(len, optlen))
6268 return -EFAULT; 6282 return -EFAULT;
6269 if (copy_to_user(optval, &val, len)) 6283 if (copy_to_user(optval, &val, len))
@@ -6289,7 +6303,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6289 if (len < sizeof(struct sctp_authchunks)) 6303 if (len < sizeof(struct sctp_authchunks))
6290 return -EINVAL; 6304 return -EINVAL;
6291 6305
6292 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6306 if (copy_from_user(&val, optval, sizeof(val)))
6293 return -EFAULT; 6307 return -EFAULT;
6294 6308
6295 to = p->gauth_chunks; 6309 to = p->gauth_chunks;
@@ -6334,7 +6348,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6334 if (len < sizeof(struct sctp_authchunks)) 6348 if (len < sizeof(struct sctp_authchunks))
6335 return -EINVAL; 6349 return -EINVAL;
6336 6350
6337 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6351 if (copy_from_user(&val, optval, sizeof(val)))
6338 return -EFAULT; 6352 return -EFAULT;
6339 6353
6340 to = p->gauth_chunks; 6354 to = p->gauth_chunks;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 76ea66be0bbe..524dfeb94c41 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
156 sctp_stream_outq_migrate(stream, NULL, outcnt); 156 sctp_stream_outq_migrate(stream, NULL, outcnt);
157 sched->sched_all(stream); 157 sched->sched_all(stream);
158 158
159 i = sctp_stream_alloc_out(stream, outcnt, gfp); 159 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
160 if (i) 160 if (ret)
161 return i; 161 goto out;
162 162
163 stream->outcnt = outcnt; 163 stream->outcnt = outcnt;
164 for (i = 0; i < stream->outcnt; i++) 164 for (i = 0; i < stream->outcnt; i++)
@@ -170,19 +170,17 @@ in:
170 if (!incnt) 170 if (!incnt)
171 goto out; 171 goto out;
172 172
173 i = sctp_stream_alloc_in(stream, incnt, gfp); 173 ret = sctp_stream_alloc_in(stream, incnt, gfp);
174 if (i) { 174 if (ret) {
175 ret = -ENOMEM; 175 sched->free(stream);
176 goto free; 176 kfree(stream->out);
177 stream->out = NULL;
178 stream->outcnt = 0;
179 goto out;
177 } 180 }
178 181
179 stream->incnt = incnt; 182 stream->incnt = incnt;
180 goto out;
181 183
182free:
183 sched->free(stream);
184 kfree(stream->out);
185 stream->out = NULL;
186out: 184out:
187 return ret; 185 return ret;
188} 186}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1e5a22430cf5..47f82bd794d9 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
249} 249}
250 250
251void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 251bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
252{ 252{
253 struct dst_entry *dst = sctp_transport_dst_check(t); 253 struct dst_entry *dst = sctp_transport_dst_check(t);
254 bool change = true;
254 255
255 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 256 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
256 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 257 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
257 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); 258 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
258 /* Use default minimum segment size and disable 259 /* Use default minimum segment instead */
259 * pmtu discovery on this transport. 260 pmtu = SCTP_DEFAULT_MINSEGMENT;
260 */
261 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
262 } else {
263 t->pathmtu = pmtu;
264 } 261 }
262 pmtu = SCTP_TRUNC4(pmtu);
265 263
266 if (dst) { 264 if (dst) {
267 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); 265 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
268 dst = sctp_transport_dst_check(t); 266 dst = sctp_transport_dst_check(t);
269 } 267 }
270 268
271 if (!dst) 269 if (!dst) {
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); 270 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
271 dst = t->dst;
272 }
273
274 if (dst) {
275 /* Re-fetch, as under layers may have a higher minimum size */
276 pmtu = SCTP_TRUNC4(dst_mtu(dst));
277 change = t->pathmtu != pmtu;
278 }
279 t->pathmtu = pmtu;
280
281 return change;
273} 282}
274 283
275/* Caches the dst entry and source address for a transport's destination 284/* Caches the dst entry and source address for a transport's destination
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..6f05d5c4bf30 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -436,8 +436,10 @@ static int sock_map_fd(struct socket *sock, int flags)
436{ 436{
437 struct file *newfile; 437 struct file *newfile;
438 int fd = get_unused_fd_flags(flags); 438 int fd = get_unused_fd_flags(flags);
439 if (unlikely(fd < 0)) 439 if (unlikely(fd < 0)) {
440 sock_release(sock);
440 return fd; 441 return fd;
442 }
441 443
442 newfile = sock_alloc_file(sock, flags, NULL); 444 newfile = sock_alloc_file(sock, flags, NULL);
443 if (likely(!IS_ERR(newfile))) { 445 if (likely(!IS_ERR(newfile))) {
@@ -2619,6 +2621,15 @@ out_fs:
2619 2621
2620core_initcall(sock_init); /* early initcall */ 2622core_initcall(sock_init); /* early initcall */
2621 2623
2624static int __init jit_init(void)
2625{
2626#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2627 bpf_jit_enable = 1;
2628#endif
2629 return 0;
2630}
2631pure_initcall(jit_init);
2632
2622#ifdef CONFIG_PROC_FS 2633#ifdef CONFIG_PROC_FS
2623void socket_seq_show(struct seq_file *seq) 2634void socket_seq_show(struct seq_file *seq)
2624{ 2635{
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 8e12ab55346b..5f4ffae807ee 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
109static void tipc_group_decr_active(struct tipc_group *grp, 109static void tipc_group_decr_active(struct tipc_group *grp,
110 struct tipc_member *m) 110 struct tipc_member *m)
111{ 111{
112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING) 112 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
113 m->state == MBR_REMITTED)
113 grp->active_cnt--; 114 grp->active_cnt--;
114} 115}
115 116
@@ -562,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
562 int max_active = grp->max_active; 563 int max_active = grp->max_active;
563 int reclaim_limit = max_active * 3 / 4; 564 int reclaim_limit = max_active * 3 / 4;
564 int active_cnt = grp->active_cnt; 565 int active_cnt = grp->active_cnt;
565 struct tipc_member *m, *rm; 566 struct tipc_member *m, *rm, *pm;
566 567
567 m = tipc_group_find_member(grp, node, port); 568 m = tipc_group_find_member(grp, node, port);
568 if (!m) 569 if (!m)
@@ -605,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
605 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n"); 606 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
606 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 607 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
607 } 608 }
609 grp->active_cnt--;
610 list_del_init(&m->list);
611 if (list_empty(&grp->pending))
612 return;
613
614 /* Set oldest pending member to active and advertise */
615 pm = list_first_entry(&grp->pending, struct tipc_member, list);
616 pm->state = MBR_ACTIVE;
617 list_move_tail(&pm->list, &grp->active);
618 grp->active_cnt++;
619 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
608 break; 620 break;
609 case MBR_RECLAIMING: 621 case MBR_RECLAIMING:
610 case MBR_DISCOVERED: 622 case MBR_DISCOVERED:
@@ -742,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
742 if (!m || m->state != MBR_RECLAIMING) 754 if (!m || m->state != MBR_RECLAIMING)
743 return; 755 return;
744 756
745 list_del_init(&m->list);
746 grp->active_cnt--;
747 remitted = msg_grp_remitted(hdr); 757 remitted = msg_grp_remitted(hdr);
748 758
749 /* Messages preceding the REMIT still in receive queue */ 759 /* Messages preceding the REMIT still in receive queue */
750 if (m->advertised > remitted) { 760 if (m->advertised > remitted) {
751 m->state = MBR_REMITTED; 761 m->state = MBR_REMITTED;
752 in_flight = m->advertised - remitted; 762 in_flight = m->advertised - remitted;
763 m->advertised = ADV_IDLE + in_flight;
764 return;
753 } 765 }
754 /* All messages preceding the REMIT have been read */ 766 /* All messages preceding the REMIT have been read */
755 if (m->advertised <= remitted) { 767 if (m->advertised <= remitted) {
@@ -761,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
761 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 773 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
762 774
763 m->advertised = ADV_IDLE + in_flight; 775 m->advertised = ADV_IDLE + in_flight;
776 grp->active_cnt--;
777 list_del_init(&m->list);
764 778
765 /* Set oldest pending member to active and advertise */ 779 /* Set oldest pending member to active and advertise */
766 if (list_empty(&grp->pending)) 780 if (list_empty(&grp->pending))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 213d0c498c97..2b3dbcd40e46 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -11361,7 +11361,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
11361 break; 11361 break;
11362 case NL80211_NAN_FUNC_FOLLOW_UP: 11362 case NL80211_NAN_FUNC_FOLLOW_UP:
11363 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || 11363 if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
11364 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) { 11364 !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
11365 !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
11365 err = -EINVAL; 11366 err = -EINVAL;
11366 goto out; 11367 goto out;
11367 } 11368 }
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index 86dc07a01b43..e7836b47f060 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1,3 @@
1*.hash.c
2*.lex.c 1*.lex.c
3*.tab.c 2*.tab.c
4*.tab.h 3*.tab.h
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..8cee597d33a5 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
893 switch (type) { 893 switch (type) {
894 case S_BOOLEAN: 894 case S_BOOLEAN:
895 case S_TRISTATE: 895 case S_TRISTATE:
896 return k_string; 896 val->s = !strcmp(str, "n") ? 0 :
897 !strcmp(str, "m") ? 1 :
898 !strcmp(str, "y") ? 2 : -1;
899 return k_signed;
897 case S_INT: 900 case S_INT:
898 val->s = strtoll(str, &tail, 10); 901 val->s = strtoll(str, &tail, 10);
899 kind = k_signed; 902 kind = k_signed;
diff --git a/security/Kconfig b/security/Kconfig
index a623d13bf288..b0cb9a5f9448 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -56,13 +56,14 @@ config SECURITY_NETWORK
56 56
57config PAGE_TABLE_ISOLATION 57config PAGE_TABLE_ISOLATION
58 bool "Remove the kernel mapping in user mode" 58 bool "Remove the kernel mapping in user mode"
59 default y
59 depends on X86_64 && !UML 60 depends on X86_64 && !UML
60 help 61 help
61 This feature reduces the number of hardware side channels by 62 This feature reduces the number of hardware side channels by
62 ensuring that the majority of kernel addresses are not mapped 63 ensuring that the majority of kernel addresses are not mapped
63 into userspace. 64 into userspace.
64 65
65 See Documentation/x86/pagetable-isolation.txt for more details. 66 See Documentation/x86/pti.txt for more details.
66 67
67config SECURITY_INFINIBAND 68config SECURITY_INFINIBAND
68 bool "Infiniband Security Hooks" 69 bool "Infiniband Security Hooks"
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 04ba9d0718ea..6a54d2ffa840 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
330 continue; 330 continue;
331 331
332 if (profile->xmatch) { 332 if (profile->xmatch) {
333 if (profile->xmatch_len == len) { 333 if (profile->xmatch_len >= len) {
334 conflict = true;
335 continue;
336 } else if (profile->xmatch_len > len) {
337 unsigned int state; 334 unsigned int state;
338 u32 perm; 335 u32 perm;
339 336
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
342 perm = dfa_user_allow(profile->xmatch, state); 339 perm = dfa_user_allow(profile->xmatch, state);
343 /* any accepting state means a valid match. */ 340 /* any accepting state means a valid match. */
344 if (perm & MAY_EXEC) { 341 if (perm & MAY_EXEC) {
342 if (profile->xmatch_len == len) {
343 conflict = true;
344 continue;
345 }
345 candidate = profile; 346 candidate = profile;
346 len = profile->xmatch_len; 347 len = profile->xmatch_len;
347 conflict = false; 348 conflict = false;
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 2b27bb79aec4..d7b7e7115160 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
133#define xcheck_labels_profiles(L1, L2, FN, args...) \ 133#define xcheck_labels_profiles(L1, L2, FN, args...) \
134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) 134 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
135 135
136#define xcheck_labels(L1, L2, P, FN1, FN2) \
137 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
138
136 139
137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); 140void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
138void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); 141void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ca0032e7ba9..b40678f3c1d5 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
64 FLAGS_NONE, GFP_ATOMIC); 64 FLAGS_NONE, GFP_ATOMIC);
65} 65}
66 66
67/* assumes check for PROFILE_MEDIATES is already done */
67/* TODO: conditionals */ 68/* TODO: conditionals */
68static int profile_ptrace_perm(struct aa_profile *profile, 69static int profile_ptrace_perm(struct aa_profile *profile,
69 struct aa_profile *peer, u32 request, 70 struct aa_label *peer, u32 request,
70 struct common_audit_data *sa) 71 struct common_audit_data *sa)
71{ 72{
72 struct aa_perms perms = { }; 73 struct aa_perms perms = { };
73 74
74 /* need because of peer in cross check */ 75 aad(sa)->peer = peer;
75 if (profile_unconfined(profile) || 76 aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
76 !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
77 return 0;
78
79 aad(sa)->peer = &peer->label;
80 aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
81 &perms); 77 &perms);
82 aa_apply_modes_to_perms(profile, &perms); 78 aa_apply_modes_to_perms(profile, &perms);
83 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); 79 return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
84} 80}
85 81
86static int cross_ptrace_perm(struct aa_profile *tracer, 82static int profile_tracee_perm(struct aa_profile *tracee,
87 struct aa_profile *tracee, u32 request, 83 struct aa_label *tracer, u32 request,
88 struct common_audit_data *sa) 84 struct common_audit_data *sa)
89{ 85{
86 if (profile_unconfined(tracee) || unconfined(tracer) ||
87 !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
88 return 0;
89
90 return profile_ptrace_perm(tracee, tracer, request, sa);
91}
92
93static int profile_tracer_perm(struct aa_profile *tracer,
94 struct aa_label *tracee, u32 request,
95 struct common_audit_data *sa)
96{
97 if (profile_unconfined(tracer))
98 return 0;
99
90 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) 100 if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
91 return xcheck(profile_ptrace_perm(tracer, tracee, request, sa), 101 return profile_ptrace_perm(tracer, tracee, request, sa);
92 profile_ptrace_perm(tracee, tracer, 102
93 request << PTRACE_PERM_SHIFT, 103 /* profile uses the old style capability check for ptrace */
94 sa)); 104 if (&tracer->label == tracee)
95 /* policy uses the old style capability check for ptrace */
96 if (profile_unconfined(tracer) || tracer == tracee)
97 return 0; 105 return 0;
98 106
99 aad(sa)->label = &tracer->label; 107 aad(sa)->label = &tracer->label;
100 aad(sa)->peer = &tracee->label; 108 aad(sa)->peer = tracee;
101 aad(sa)->request = 0; 109 aad(sa)->request = 0;
102 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); 110 aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
103 111
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
115int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, 123int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
116 u32 request) 124 u32 request)
117{ 125{
126 struct aa_profile *profile;
127 u32 xrequest = request << PTRACE_PERM_SHIFT;
118 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); 128 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
119 129
120 return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm, 130 return xcheck_labels(tracer, tracee, profile,
121 request, &sa); 131 profile_tracer_perm(profile, tracee, request, &sa),
132 profile_tracee_perm(profile, tracer, xrequest, &sa));
122} 133}
123 134
124 135
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index ed9b4d0f9f7e..8c558cbce930 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
329 AA_BUG(!mntpath); 329 AA_BUG(!mntpath);
330 AA_BUG(!buffer); 330 AA_BUG(!buffer);
331 331
332 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
333 return 0;
334
332 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, 335 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
333 &mntpnt, &info, profile->disconnected); 336 &mntpnt, &info, profile->disconnected);
334 if (error) 337 if (error)
@@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
380 AA_BUG(!profile); 383 AA_BUG(!profile);
381 AA_BUG(devpath && !devbuffer); 384 AA_BUG(devpath && !devbuffer);
382 385
386 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
387 return 0;
388
383 if (devpath) { 389 if (devpath) {
384 error = aa_path_name(devpath, path_flags(profile, devpath), 390 error = aa_path_name(devpath, path_flags(profile, devpath),
385 devbuffer, &devname, &info, 391 devbuffer, &devname, &info,
@@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
558 AA_BUG(!profile); 564 AA_BUG(!profile);
559 AA_BUG(!path); 565 AA_BUG(!path);
560 566
567 if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
568 return 0;
569
561 error = aa_path_name(path, path_flags(profile, path), buffer, &name, 570 error = aa_path_name(path, path_flags(profile, path), buffer, &name,
562 &info, profile->disconnected); 571 &info, profile->disconnected);
563 if (error) 572 if (error)
@@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
613 AA_BUG(!new_path); 622 AA_BUG(!new_path);
614 AA_BUG(!old_path); 623 AA_BUG(!old_path);
615 624
616 if (profile_unconfined(profile)) 625 if (profile_unconfined(profile) ||
626 !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
617 return aa_get_newest_label(&profile->label); 627 return aa_get_newest_label(&profile->label);
618 628
619 error = aa_path_name(old_path, path_flags(profile, old_path), 629 error = aa_path_name(old_path, path_flags(profile, old_path),
diff --git a/security/commoncap.c b/security/commoncap.c
index 4f8e09340956..48620c93d697 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
348 return m & ~VFS_CAP_FLAGS_EFFECTIVE; 348 return m & ~VFS_CAP_FLAGS_EFFECTIVE;
349} 349}
350 350
351static bool is_v2header(size_t size, __le32 magic) 351static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
352{ 352{
353 __u32 m = le32_to_cpu(magic);
354 if (size != XATTR_CAPS_SZ_2) 353 if (size != XATTR_CAPS_SZ_2)
355 return false; 354 return false;
356 return sansflags(m) == VFS_CAP_REVISION_2; 355 return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
357} 356}
358 357
359static bool is_v3header(size_t size, __le32 magic) 358static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
360{ 359{
361 __u32 m = le32_to_cpu(magic);
362
363 if (size != XATTR_CAPS_SZ_3) 360 if (size != XATTR_CAPS_SZ_3)
364 return false; 361 return false;
365 return sansflags(m) == VFS_CAP_REVISION_3; 362 return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
366} 363}
367 364
368/* 365/*
@@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
405 402
406 fs_ns = inode->i_sb->s_user_ns; 403 fs_ns = inode->i_sb->s_user_ns;
407 cap = (struct vfs_cap_data *) tmpbuf; 404 cap = (struct vfs_cap_data *) tmpbuf;
408 if (is_v2header((size_t) ret, cap->magic_etc)) { 405 if (is_v2header((size_t) ret, cap)) {
409 /* If this is sizeof(vfs_cap_data) then we're ok with the 406 /* If this is sizeof(vfs_cap_data) then we're ok with the
410 * on-disk value, so return that. */ 407 * on-disk value, so return that. */
411 if (alloc) 408 if (alloc)
@@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
413 else 410 else
414 kfree(tmpbuf); 411 kfree(tmpbuf);
415 return ret; 412 return ret;
416 } else if (!is_v3header((size_t) ret, cap->magic_etc)) { 413 } else if (!is_v3header((size_t) ret, cap)) {
417 kfree(tmpbuf); 414 kfree(tmpbuf);
418 return -EINVAL; 415 return -EINVAL;
419 } 416 }
@@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
470 return make_kuid(task_ns, rootid); 467 return make_kuid(task_ns, rootid);
471} 468}
472 469
473static bool validheader(size_t size, __le32 magic) 470static bool validheader(size_t size, const struct vfs_cap_data *cap)
474{ 471{
475 return is_v2header(size, magic) || is_v3header(size, magic); 472 return is_v2header(size, cap) || is_v3header(size, cap);
476} 473}
477 474
478/* 475/*
@@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
495 492
496 if (!*ivalue) 493 if (!*ivalue)
497 return -EINVAL; 494 return -EINVAL;
498 if (!validheader(size, cap->magic_etc)) 495 if (!validheader(size, cap))
499 return -EINVAL; 496 return -EINVAL;
500 if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP)) 497 if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
501 return -EPERM; 498 return -EPERM;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index e49f448ee04f..c2db7e905f7d 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
455 v = snd_pcm_hw_param_last(pcm, params, var, dir); 455 v = snd_pcm_hw_param_last(pcm, params, var, dir);
456 else 456 else
457 v = snd_pcm_hw_param_first(pcm, params, var, dir); 457 v = snd_pcm_hw_param_first(pcm, params, var, dir);
458 snd_BUG_ON(v < 0);
459 return v; 458 return v;
460} 459}
461 460
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1335 1334
1336 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1335 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1337 return tmp; 1336 return tmp;
1338 mutex_lock(&runtime->oss.params_lock);
1339 while (bytes > 0) { 1337 while (bytes > 0) {
1338 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1339 tmp = -ERESTARTSYS;
1340 break;
1341 }
1340 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1342 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1341 tmp = bytes; 1343 tmp = bytes;
1342 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) 1344 if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
1380 xfer += tmp; 1382 xfer += tmp;
1381 if ((substream->f_flags & O_NONBLOCK) != 0 && 1383 if ((substream->f_flags & O_NONBLOCK) != 0 &&
1382 tmp != runtime->oss.period_bytes) 1384 tmp != runtime->oss.period_bytes)
1383 break; 1385 tmp = -EAGAIN;
1384 } 1386 }
1385 }
1386 mutex_unlock(&runtime->oss.params_lock);
1387 return xfer;
1388
1389 err: 1387 err:
1390 mutex_unlock(&runtime->oss.params_lock); 1388 mutex_unlock(&runtime->oss.params_lock);
1389 if (tmp < 0)
1390 break;
1391 if (signal_pending(current)) {
1392 tmp = -ERESTARTSYS;
1393 break;
1394 }
1395 tmp = 0;
1396 }
1391 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1397 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1392} 1398}
1393 1399
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1435 1441
1436 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) 1442 if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
1437 return tmp; 1443 return tmp;
1438 mutex_lock(&runtime->oss.params_lock);
1439 while (bytes > 0) { 1444 while (bytes > 0) {
1445 if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
1446 tmp = -ERESTARTSYS;
1447 break;
1448 }
1440 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { 1449 if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
1441 if (runtime->oss.buffer_used == 0) { 1450 if (runtime->oss.buffer_used == 0) {
1442 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); 1451 tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1467 bytes -= tmp; 1476 bytes -= tmp;
1468 xfer += tmp; 1477 xfer += tmp;
1469 } 1478 }
1470 }
1471 mutex_unlock(&runtime->oss.params_lock);
1472 return xfer;
1473
1474 err: 1479 err:
1475 mutex_unlock(&runtime->oss.params_lock); 1480 mutex_unlock(&runtime->oss.params_lock);
1481 if (tmp < 0)
1482 break;
1483 if (signal_pending(current)) {
1484 tmp = -ERESTARTSYS;
1485 break;
1486 }
1487 tmp = 0;
1488 }
1476 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; 1489 return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
1477} 1490}
1478 1491
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index cadc93792868..85a56af104bd 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
592 snd_pcm_sframes_t frames = size; 592 snd_pcm_sframes_t frames = size;
593 593
594 plugin = snd_pcm_plug_first(plug); 594 plugin = snd_pcm_plug_first(plug);
595 while (plugin && frames > 0) { 595 while (plugin) {
596 if (frames <= 0)
597 return frames;
596 if ((next = plugin->next) != NULL) { 598 if ((next = plugin->next) != NULL) {
597 snd_pcm_sframes_t frames1 = frames; 599 snd_pcm_sframes_t frames1 = frames;
598 if (plugin->dst_frames) 600 if (plugin->dst_frames) {
599 frames1 = plugin->dst_frames(plugin, frames); 601 frames1 = plugin->dst_frames(plugin, frames);
602 if (frames1 <= 0)
603 return frames1;
604 }
600 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { 605 if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
601 return err; 606 return err;
602 } 607 }
603 if (err != frames1) { 608 if (err != frames1) {
604 frames = err; 609 frames = err;
605 if (plugin->src_frames) 610 if (plugin->src_frames) {
606 frames = plugin->src_frames(plugin, frames1); 611 frames = plugin->src_frames(plugin, frames1);
612 if (frames <= 0)
613 return frames;
614 }
607 } 615 }
608 } else 616 } else
609 dst_channels = NULL; 617 dst_channels = NULL;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 10e7ef7a8804..db7894bb028c 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1632,7 +1632,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1632 return changed; 1632 return changed;
1633 if (params->rmask) { 1633 if (params->rmask) {
1634 int err = snd_pcm_hw_refine(pcm, params); 1634 int err = snd_pcm_hw_refine(pcm, params);
1635 if (snd_BUG_ON(err < 0)) 1635 if (err < 0)
1636 return err; 1636 return err;
1637 } 1637 }
1638 return snd_pcm_hw_param_value(params, var, dir); 1638 return snd_pcm_hw_param_value(params, var, dir);
@@ -1678,7 +1678,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1678 return changed; 1678 return changed;
1679 if (params->rmask) { 1679 if (params->rmask) {
1680 int err = snd_pcm_hw_refine(pcm, params); 1680 int err = snd_pcm_hw_refine(pcm, params);
1681 if (snd_BUG_ON(err < 0)) 1681 if (err < 0)
1682 return err; 1682 return err;
1683 } 1683 }
1684 return snd_pcm_hw_param_value(params, var, dir); 1684 return snd_pcm_hw_param_value(params, var, dir);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a4d92e46c459..f08772568c17 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2580 return ret < 0 ? ret : frames; 2580 return ret < 0 ? ret : frames;
2581} 2581}
2582 2582
2583/* decrease the appl_ptr; returns the processed frames or a negative error */ 2583/* decrease the appl_ptr; returns the processed frames or zero for error */
2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, 2584static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2585 snd_pcm_uframes_t frames, 2585 snd_pcm_uframes_t frames,
2586 snd_pcm_sframes_t avail) 2586 snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2597 if (appl_ptr < 0) 2597 if (appl_ptr < 0)
2598 appl_ptr += runtime->boundary; 2598 appl_ptr += runtime->boundary;
2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2599 ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2600 return ret < 0 ? ret : frames; 2600 /* NOTE: we return zero for errors because PulseAudio gets depressed
2601 * upon receiving an error from rewind ioctl and stops processing
2602 * any longer. Returning zero means that no rewind is done, so
2603 * it's not absolutely wrong to answer like that.
2604 */
2605 return ret < 0 ? 0 : frames;
2601} 2606}
2602 2607
2603static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, 2608static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index afac886ffa28..0333143a1fa7 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -39,6 +39,7 @@
39#include <sound/core.h> 39#include <sound/core.h>
40#include <sound/control.h> 40#include <sound/control.h>
41#include <sound/pcm.h> 41#include <sound/pcm.h>
42#include <sound/pcm_params.h>
42#include <sound/info.h> 43#include <sound/info.h>
43#include <sound/initval.h> 44#include <sound/initval.h>
44 45
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
305 return 0; 306 return 0;
306} 307}
307 308
308static void params_change_substream(struct loopback_pcm *dpcm,
309 struct snd_pcm_runtime *runtime)
310{
311 struct snd_pcm_runtime *dst_runtime;
312
313 if (dpcm == NULL || dpcm->substream == NULL)
314 return;
315 dst_runtime = dpcm->substream->runtime;
316 if (dst_runtime == NULL)
317 return;
318 dst_runtime->hw = dpcm->cable->hw;
319}
320
321static void params_change(struct snd_pcm_substream *substream) 309static void params_change(struct snd_pcm_substream *substream)
322{ 310{
323 struct snd_pcm_runtime *runtime = substream->runtime; 311 struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
329 cable->hw.rate_max = runtime->rate; 317 cable->hw.rate_max = runtime->rate;
330 cable->hw.channels_min = runtime->channels; 318 cable->hw.channels_min = runtime->channels;
331 cable->hw.channels_max = runtime->channels; 319 cable->hw.channels_max = runtime->channels;
332 params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
333 runtime);
334 params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
335 runtime);
336} 320}
337 321
338static int loopback_prepare(struct snd_pcm_substream *substream) 322static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
620static int rule_format(struct snd_pcm_hw_params *params, 604static int rule_format(struct snd_pcm_hw_params *params,
621 struct snd_pcm_hw_rule *rule) 605 struct snd_pcm_hw_rule *rule)
622{ 606{
607 struct loopback_pcm *dpcm = rule->private;
608 struct loopback_cable *cable = dpcm->cable;
609 struct snd_mask m;
623 610
624 struct snd_pcm_hardware *hw = rule->private; 611 snd_mask_none(&m);
625 struct snd_mask *maskp = hw_param_mask(params, rule->var); 612 mutex_lock(&dpcm->loopback->cable_lock);
626 613 m.bits[0] = (u_int32_t)cable->hw.formats;
627 maskp->bits[0] &= (u_int32_t)hw->formats; 614 m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
628 maskp->bits[1] &= (u_int32_t)(hw->formats >> 32); 615 mutex_unlock(&dpcm->loopback->cable_lock);
629 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 616 return snd_mask_refine(hw_param_mask(params, rule->var), &m);
630 if (! maskp->bits[0] && ! maskp->bits[1])
631 return -EINVAL;
632 return 0;
633} 617}
634 618
635static int rule_rate(struct snd_pcm_hw_params *params, 619static int rule_rate(struct snd_pcm_hw_params *params,
636 struct snd_pcm_hw_rule *rule) 620 struct snd_pcm_hw_rule *rule)
637{ 621{
638 struct snd_pcm_hardware *hw = rule->private; 622 struct loopback_pcm *dpcm = rule->private;
623 struct loopback_cable *cable = dpcm->cable;
639 struct snd_interval t; 624 struct snd_interval t;
640 625
641 t.min = hw->rate_min; 626 mutex_lock(&dpcm->loopback->cable_lock);
642 t.max = hw->rate_max; 627 t.min = cable->hw.rate_min;
628 t.max = cable->hw.rate_max;
629 mutex_unlock(&dpcm->loopback->cable_lock);
643 t.openmin = t.openmax = 0; 630 t.openmin = t.openmax = 0;
644 t.integer = 0; 631 t.integer = 0;
645 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 632 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
648static int rule_channels(struct snd_pcm_hw_params *params, 635static int rule_channels(struct snd_pcm_hw_params *params,
649 struct snd_pcm_hw_rule *rule) 636 struct snd_pcm_hw_rule *rule)
650{ 637{
651 struct snd_pcm_hardware *hw = rule->private; 638 struct loopback_pcm *dpcm = rule->private;
639 struct loopback_cable *cable = dpcm->cable;
652 struct snd_interval t; 640 struct snd_interval t;
653 641
654 t.min = hw->channels_min; 642 mutex_lock(&dpcm->loopback->cable_lock);
655 t.max = hw->channels_max; 643 t.min = cable->hw.channels_min;
644 t.max = cable->hw.channels_max;
645 mutex_unlock(&dpcm->loopback->cable_lock);
656 t.openmin = t.openmax = 0; 646 t.openmin = t.openmax = 0;
657 t.integer = 0; 647 t.integer = 0;
658 return snd_interval_refine(hw_param_interval(params, rule->var), &t); 648 return snd_interval_refine(hw_param_interval(params, rule->var), &t);
659} 649}
660 650
651static void free_cable(struct snd_pcm_substream *substream)
652{
653 struct loopback *loopback = substream->private_data;
654 int dev = get_cable_index(substream);
655 struct loopback_cable *cable;
656
657 cable = loopback->cables[substream->number][dev];
658 if (!cable)
659 return;
660 if (cable->streams[!substream->stream]) {
661 /* other stream is still alive */
662 cable->streams[substream->stream] = NULL;
663 } else {
664 /* free the cable */
665 loopback->cables[substream->number][dev] = NULL;
666 kfree(cable);
667 }
668}
669
661static int loopback_open(struct snd_pcm_substream *substream) 670static int loopback_open(struct snd_pcm_substream *substream)
662{ 671{
663 struct snd_pcm_runtime *runtime = substream->runtime; 672 struct snd_pcm_runtime *runtime = substream->runtime;
664 struct loopback *loopback = substream->private_data; 673 struct loopback *loopback = substream->private_data;
665 struct loopback_pcm *dpcm; 674 struct loopback_pcm *dpcm;
666 struct loopback_cable *cable; 675 struct loopback_cable *cable = NULL;
667 int err = 0; 676 int err = 0;
668 int dev = get_cable_index(substream); 677 int dev = get_cable_index(substream);
669 678
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
681 if (!cable) { 690 if (!cable) {
682 cable = kzalloc(sizeof(*cable), GFP_KERNEL); 691 cable = kzalloc(sizeof(*cable), GFP_KERNEL);
683 if (!cable) { 692 if (!cable) {
684 kfree(dpcm);
685 err = -ENOMEM; 693 err = -ENOMEM;
686 goto unlock; 694 goto unlock;
687 } 695 }
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
699 /* are cached -> they do not reflect the actual state */ 707 /* are cached -> they do not reflect the actual state */
700 err = snd_pcm_hw_rule_add(runtime, 0, 708 err = snd_pcm_hw_rule_add(runtime, 0,
701 SNDRV_PCM_HW_PARAM_FORMAT, 709 SNDRV_PCM_HW_PARAM_FORMAT,
702 rule_format, &runtime->hw, 710 rule_format, dpcm,
703 SNDRV_PCM_HW_PARAM_FORMAT, -1); 711 SNDRV_PCM_HW_PARAM_FORMAT, -1);
704 if (err < 0) 712 if (err < 0)
705 goto unlock; 713 goto unlock;
706 err = snd_pcm_hw_rule_add(runtime, 0, 714 err = snd_pcm_hw_rule_add(runtime, 0,
707 SNDRV_PCM_HW_PARAM_RATE, 715 SNDRV_PCM_HW_PARAM_RATE,
708 rule_rate, &runtime->hw, 716 rule_rate, dpcm,
709 SNDRV_PCM_HW_PARAM_RATE, -1); 717 SNDRV_PCM_HW_PARAM_RATE, -1);
710 if (err < 0) 718 if (err < 0)
711 goto unlock; 719 goto unlock;
712 err = snd_pcm_hw_rule_add(runtime, 0, 720 err = snd_pcm_hw_rule_add(runtime, 0,
713 SNDRV_PCM_HW_PARAM_CHANNELS, 721 SNDRV_PCM_HW_PARAM_CHANNELS,
714 rule_channels, &runtime->hw, 722 rule_channels, dpcm,
715 SNDRV_PCM_HW_PARAM_CHANNELS, -1); 723 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
716 if (err < 0) 724 if (err < 0)
717 goto unlock; 725 goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
723 else 731 else
724 runtime->hw = cable->hw; 732 runtime->hw = cable->hw;
725 unlock: 733 unlock:
734 if (err < 0) {
735 free_cable(substream);
736 kfree(dpcm);
737 }
726 mutex_unlock(&loopback->cable_lock); 738 mutex_unlock(&loopback->cable_lock);
727 return err; 739 return err;
728} 740}
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
731{ 743{
732 struct loopback *loopback = substream->private_data; 744 struct loopback *loopback = substream->private_data;
733 struct loopback_pcm *dpcm = substream->runtime->private_data; 745 struct loopback_pcm *dpcm = substream->runtime->private_data;
734 struct loopback_cable *cable;
735 int dev = get_cable_index(substream);
736 746
737 loopback_timer_stop(dpcm); 747 loopback_timer_stop(dpcm);
738 mutex_lock(&loopback->cable_lock); 748 mutex_lock(&loopback->cable_lock);
739 cable = loopback->cables[substream->number][dev]; 749 free_cable(substream);
740 if (cable->streams[!substream->stream]) {
741 /* other stream is still alive */
742 cable->streams[substream->stream] = NULL;
743 } else {
744 /* free the cable */
745 loopback->cables[substream->number][dev] = NULL;
746 kfree(cable);
747 }
748 mutex_unlock(&loopback->cable_lock); 750 mutex_unlock(&loopback->cable_lock);
749 return 0; 751 return 0;
750} 752}
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index ae0272f9a091..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
46 @$(MAKE) $(build)=objtool 46 @$(MAKE) $(build)=objtool
47 47
48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) 48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
49 @./sync-check.sh 49 @$(CONFIG_SHELL) ./sync-check.sh
50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ 50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
51 51
52 52
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584eb1b..f40d46e24bcc 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)
428} 428}
429 429
430/* 430/*
431 * FIXME: For now, just ignore any alternatives which add retpolines. This is
432 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
433 * But it at least allows objtool to understand the control flow *around* the
434 * retpoline.
435 */
436static int add_nospec_ignores(struct objtool_file *file)
437{
438 struct section *sec;
439 struct rela *rela;
440 struct instruction *insn;
441
442 sec = find_section_by_name(file->elf, ".rela.discard.nospec");
443 if (!sec)
444 return 0;
445
446 list_for_each_entry(rela, &sec->rela_list, list) {
447 if (rela->sym->type != STT_SECTION) {
448 WARN("unexpected relocation symbol type in %s", sec->name);
449 return -1;
450 }
451
452 insn = find_insn(file, rela->sym->sec, rela->addend);
453 if (!insn) {
454 WARN("bad .discard.nospec entry");
455 return -1;
456 }
457
458 insn->ignore_alts = true;
459 }
460
461 return 0;
462}
463
464/*
431 * Find the destination instructions for all jumps. 465 * Find the destination instructions for all jumps.
432 */ 466 */
433static int add_jump_destinations(struct objtool_file *file) 467static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
456 } else if (rela->sym->sec->idx) { 490 } else if (rela->sym->sec->idx) {
457 dest_sec = rela->sym->sec; 491 dest_sec = rela->sym->sec;
458 dest_off = rela->sym->sym.st_value + rela->addend + 4; 492 dest_off = rela->sym->sym.st_value + rela->addend + 4;
493 } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
494 /*
495 * Retpoline jumps are really dynamic jumps in
496 * disguise, so convert them accordingly.
497 */
498 insn->type = INSN_JUMP_DYNAMIC;
499 continue;
459 } else { 500 } else {
460 /* sibling call */ 501 /* sibling call */
461 insn->jump_dest = 0; 502 insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
502 dest_off = insn->offset + insn->len + insn->immediate; 543 dest_off = insn->offset + insn->len + insn->immediate;
503 insn->call_dest = find_symbol_by_offset(insn->sec, 544 insn->call_dest = find_symbol_by_offset(insn->sec,
504 dest_off); 545 dest_off);
546 /*
547 * FIXME: Thanks to retpolines, it's now considered
548 * normal for a function to call within itself. So
549 * disable this warning for now.
550 */
551#if 0
505 if (!insn->call_dest) { 552 if (!insn->call_dest) {
506 WARN_FUNC("can't find call dest symbol at offset 0x%lx", 553 WARN_FUNC("can't find call dest symbol at offset 0x%lx",
507 insn->sec, insn->offset, dest_off); 554 insn->sec, insn->offset, dest_off);
508 return -1; 555 return -1;
509 } 556 }
557#endif
510 } else if (rela->sym->type == STT_SECTION) { 558 } else if (rela->sym->type == STT_SECTION) {
511 insn->call_dest = find_symbol_by_offset(rela->sym->sec, 559 insn->call_dest = find_symbol_by_offset(rela->sym->sec,
512 rela->addend+4); 560 rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
671 return ret; 719 return ret;
672 720
673 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 721 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
674 alt = malloc(sizeof(*alt));
675 if (!alt) {
676 WARN("malloc failed");
677 ret = -1;
678 goto out;
679 }
680 722
681 orig_insn = find_insn(file, special_alt->orig_sec, 723 orig_insn = find_insn(file, special_alt->orig_sec,
682 special_alt->orig_off); 724 special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
687 goto out; 729 goto out;
688 } 730 }
689 731
732 /* Ignore retpoline alternatives. */
733 if (orig_insn->ignore_alts)
734 continue;
735
690 new_insn = NULL; 736 new_insn = NULL;
691 if (!special_alt->group || special_alt->new_len) { 737 if (!special_alt->group || special_alt->new_len) {
692 new_insn = find_insn(file, special_alt->new_sec, 738 new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
712 goto out; 758 goto out;
713 } 759 }
714 760
761 alt = malloc(sizeof(*alt));
762 if (!alt) {
763 WARN("malloc failed");
764 ret = -1;
765 goto out;
766 }
767
715 alt->insn = new_insn; 768 alt->insn = new_insn;
716 list_add_tail(&alt->list, &orig_insn->alts); 769 list_add_tail(&alt->list, &orig_insn->alts);
717 770
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
1028 1081
1029 add_ignores(file); 1082 add_ignores(file);
1030 1083
1084 ret = add_nospec_ignores(file);
1085 if (ret)
1086 return ret;
1087
1031 ret = add_jump_destinations(file); 1088 ret = add_jump_destinations(file);
1032 if (ret) 1089 if (ret)
1033 return ret; 1090 return ret;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea70a83d..dbadb304a410 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
44 unsigned int len; 44 unsigned int len;
45 unsigned char type; 45 unsigned char type;
46 unsigned long immediate; 46 unsigned long immediate;
47 bool alt_group, visited, dead_end, ignore, hint, save, restore; 47 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
48 struct symbol *call_dest; 48 struct symbol *call_dest;
49 struct instruction *jump_dest; 49 struct instruction *jump_dest;
50 struct list_head alts; 50 struct list_head alts;
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 22c3b4ee1617..be418fba9441 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -79,7 +79,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
79u32 current_action = 0; 79u32 current_action = 0;
80 80
81#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility" 81#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility"
82#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:svxz" 82#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:sv^xz"
83 83
84/****************************************************************************** 84/******************************************************************************
85 * 85 *
@@ -100,6 +100,7 @@ static void ap_display_usage(void)
100 ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP"); 100 ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
101 ACPI_OPTION("-s", "Print table summaries only"); 101 ACPI_OPTION("-s", "Print table summaries only");
102 ACPI_OPTION("-v", "Display version information"); 102 ACPI_OPTION("-v", "Display version information");
103 ACPI_OPTION("-vd", "Display build date and time");
103 ACPI_OPTION("-z", "Verbose mode"); 104 ACPI_OPTION("-z", "Verbose mode");
104 105
105 ACPI_USAGE_TEXT("\nTable Options:\n"); 106 ACPI_USAGE_TEXT("\nTable Options:\n");
@@ -231,10 +232,29 @@ static int ap_do_options(int argc, char **argv)
231 } 232 }
232 continue; 233 continue;
233 234
234 case 'v': /* Revision/version */ 235 case 'v': /* -v: (Version): signon already emitted, just exit */
235 236
236 acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME)); 237 switch (acpi_gbl_optarg[0]) {
237 return (1); 238 case '^': /* -v: (Version) */
239
240 fprintf(stderr,
241 ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
242 return (1);
243
244 case 'd':
245
246 fprintf(stderr,
247 ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
248 printf(ACPI_COMMON_BUILD_TIME);
249 return (1);
250
251 default:
252
253 printf("Unknown option: -v%s\n",
254 acpi_gbl_optarg);
255 return (-1);
256 }
257 break;
238 258
239 case 'z': /* Verbose mode */ 259 case 'z': /* Verbose mode */
240 260
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 450b4cbba6b6..620fa78b3b1b 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1250,7 +1250,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
1250 window_size) * NUM_DCR 1250 window_size) * NUM_DCR
1251 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 1251 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
1252 + (sizeof(struct acpi_nfit_flush_address) 1252 + (sizeof(struct acpi_nfit_flush_address)
1253 + sizeof(u64) * NUM_HINTS) * NUM_DCR; 1253 + sizeof(u64) * NUM_HINTS) * NUM_DCR
1254 + sizeof(struct acpi_nfit_capabilities);
1254 int i; 1255 int i;
1255 1256
1256 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 1257 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -1364,6 +1365,7 @@ static void nfit_test0_setup(struct nfit_test *t)
1364 struct acpi_nfit_control_region *dcr; 1365 struct acpi_nfit_control_region *dcr;
1365 struct acpi_nfit_data_region *bdw; 1366 struct acpi_nfit_data_region *bdw;
1366 struct acpi_nfit_flush_address *flush; 1367 struct acpi_nfit_flush_address *flush;
1368 struct acpi_nfit_capabilities *pcap;
1367 unsigned int offset, i; 1369 unsigned int offset, i;
1368 1370
1369 /* 1371 /*
@@ -1871,8 +1873,16 @@ static void nfit_test0_setup(struct nfit_test *t)
1871 for (i = 0; i < NUM_HINTS; i++) 1873 for (i = 0; i < NUM_HINTS; i++)
1872 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64); 1874 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1873 1875
1876 /* platform capabilities */
1877 pcap = nfit_buf + offset + flush_hint_size * 4;
1878 pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
1879 pcap->header.length = sizeof(*pcap);
1880 pcap->highest_capability = 1;
1881 pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
1882 ACPI_NFIT_CAPABILITY_MEM_FLUSH;
1883
1874 if (t->setup_hotplug) { 1884 if (t->setup_hotplug) {
1875 offset = offset + flush_hint_size * 4; 1885 offset = offset + flush_hint_size * 4 + sizeof(*pcap);
1876 /* dcr-descriptor4: blk */ 1886 /* dcr-descriptor4: blk */
1877 dcr = nfit_buf + offset; 1887 dcr = nfit_buf + offset;
1878 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 1888 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..471bbbdb94db 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
474 .result = REJECT, 474 .result = REJECT,
475 .matches = { 475 .matches = {
476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
477 /* ptr & 0x40 == either 0 or 0x40 */ 477 /* R5 bitwise operator &= on pointer prohibited */
478 {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
479 /* ptr << 2 == unknown, (4n) */
480 {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
481 /* (4n) + 14 == (4n+2). We blow our bounds, because
482 * the add could overflow.
483 */
484 {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
485 /* Checked s>=0 */
486 {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
487 /* packet pointer + nonnegative (4n+2) */
488 {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
489 {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
491 * We checked the bounds, but it might have been able
492 * to overflow if the packet pointer started in the
493 * upper half of the address space.
494 * So we did not get a 'range' on R6, and the access
495 * attempt will fail.
496 */
497 {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
498 } 478 }
499 }, 479 },
500 { 480 {
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337128db..5d4f10ac2af2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -7,7 +7,7 @@ include ../lib.mk
7 7
8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ 8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ 9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
10 protection_keys test_vdso 10 protection_keys test_vdso test_vsyscall
11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
12 test_FCMOV test_FCOMI test_FISTTP \ 12 test_FCMOV test_FCOMI test_FISTTP \
13 vdso_restorer 13 vdso_restorer
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 000000000000..7a744fa7b786
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#define _GNU_SOURCE
4
5#include <stdio.h>
6#include <sys/time.h>
7#include <time.h>
8#include <stdlib.h>
9#include <sys/syscall.h>
10#include <unistd.h>
11#include <dlfcn.h>
12#include <string.h>
13#include <inttypes.h>
14#include <signal.h>
15#include <sys/ucontext.h>
16#include <errno.h>
17#include <err.h>
18#include <sched.h>
19#include <stdbool.h>
20#include <setjmp.h>
21
22#ifdef __x86_64__
23# define VSYS(x) (x)
24#else
25# define VSYS(x) 0
26#endif
27
28#ifndef SYS_getcpu
29# ifdef __x86_64__
30# define SYS_getcpu 309
31# else
32# define SYS_getcpu 318
33# endif
34#endif
35
36static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
37 int flags)
38{
39 struct sigaction sa;
40 memset(&sa, 0, sizeof(sa));
41 sa.sa_sigaction = handler;
42 sa.sa_flags = SA_SIGINFO | flags;
43 sigemptyset(&sa.sa_mask);
44 if (sigaction(sig, &sa, 0))
45 err(1, "sigaction");
46}
47
48/* vsyscalls and vDSO */
49bool should_read_vsyscall = false;
50
51typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
52gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
53gtod_t vdso_gtod;
54
55typedef int (*vgettime_t)(clockid_t, struct timespec *);
56vgettime_t vdso_gettime;
57
58typedef long (*time_func_t)(time_t *t);
59time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
60time_func_t vdso_time;
61
62typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
63getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
64getcpu_t vdso_getcpu;
65
66static void init_vdso(void)
67{
68 void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
69 if (!vdso)
70 vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
71 if (!vdso) {
72 printf("[WARN]\tfailed to find vDSO\n");
73 return;
74 }
75
76 vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
77 if (!vdso_gtod)
78 printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
79
80 vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
81 if (!vdso_gettime)
82 printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
83
84 vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
85 if (!vdso_time)
86 printf("[WARN]\tfailed to find time in vDSO\n");
87
88 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
89 if (!vdso_getcpu) {
90 /* getcpu() was never wired up in the 32-bit vDSO. */
91 printf("[%s]\tfailed to find getcpu in vDSO\n",
92 sizeof(long) == 8 ? "WARN" : "NOTE");
93 }
94}
95
96static int init_vsys(void)
97{
98#ifdef __x86_64__
99 int nerrs = 0;
100 FILE *maps;
101 char line[128];
102 bool found = false;
103
104 maps = fopen("/proc/self/maps", "r");
105 if (!maps) {
106 printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
107 should_read_vsyscall = true;
108 return 0;
109 }
110
111 while (fgets(line, sizeof(line), maps)) {
112 char r, x;
113 void *start, *end;
114 char name[128];
115 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
116 &start, &end, &r, &x, name) != 5)
117 continue;
118
119 if (strcmp(name, "[vsyscall]"))
120 continue;
121
122 printf("\tvsyscall map: %s", line);
123
124 if (start != (void *)0xffffffffff600000 ||
125 end != (void *)0xffffffffff601000) {
126 printf("[FAIL]\taddress range is nonsense\n");
127 nerrs++;
128 }
129
130 printf("\tvsyscall permissions are %c-%c\n", r, x);
131 should_read_vsyscall = (r == 'r');
132 if (x != 'x') {
133 vgtod = NULL;
134 vtime = NULL;
135 vgetcpu = NULL;
136 }
137
138 found = true;
139 break;
140 }
141
142 fclose(maps);
143
144 if (!found) {
145 printf("\tno vsyscall map in /proc/self/maps\n");
146 should_read_vsyscall = false;
147 vgtod = NULL;
148 vtime = NULL;
149 vgetcpu = NULL;
150 }
151
152 return nerrs;
153#else
154 return 0;
155#endif
156}
157
158/* syscalls */
159static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
160{
161 return syscall(SYS_gettimeofday, tv, tz);
162}
163
164static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
165{
166 return syscall(SYS_clock_gettime, id, ts);
167}
168
169static inline long sys_time(time_t *t)
170{
171 return syscall(SYS_time, t);
172}
173
174static inline long sys_getcpu(unsigned * cpu, unsigned * node,
175 void* cache)
176{
177 return syscall(SYS_getcpu, cpu, node, cache);
178}
179
180static jmp_buf jmpbuf;
181
182static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
183{
184 siglongjmp(jmpbuf, 1);
185}
186
187static double tv_diff(const struct timeval *a, const struct timeval *b)
188{
189 return (double)(a->tv_sec - b->tv_sec) +
190 (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
191}
192
193static int check_gtod(const struct timeval *tv_sys1,
194 const struct timeval *tv_sys2,
195 const struct timezone *tz_sys,
196 const char *which,
197 const struct timeval *tv_other,
198 const struct timezone *tz_other)
199{
200 int nerrs = 0;
201 double d1, d2;
202
203 if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
204 printf("[FAIL] %s tz mismatch\n", which);
205 nerrs++;
206 }
207
208 d1 = tv_diff(tv_other, tv_sys1);
209 d2 = tv_diff(tv_sys2, tv_other);
210 printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
211
212 if (d1 < 0 || d2 < 0) {
213 printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
214 nerrs++;
215 } else {
216 printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
217 }
218
219 return nerrs;
220}
221
222static int test_gtod(void)
223{
224 struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
225 struct timezone tz_sys, tz_vdso, tz_vsys;
226 long ret_vdso = -1;
227 long ret_vsys = -1;
228 int nerrs = 0;
229
230 printf("[RUN]\ttest gettimeofday()\n");
231
232 if (sys_gtod(&tv_sys1, &tz_sys) != 0)
233 err(1, "syscall gettimeofday");
234 if (vdso_gtod)
235 ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
236 if (vgtod)
237 ret_vsys = vgtod(&tv_vsys, &tz_vsys);
238 if (sys_gtod(&tv_sys2, &tz_sys) != 0)
239 err(1, "syscall gettimeofday");
240
241 if (vdso_gtod) {
242 if (ret_vdso == 0) {
243 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
244 } else {
245 printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
246 nerrs++;
247 }
248 }
249
250 if (vgtod) {
251 if (ret_vsys == 0) {
252 nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
253 } else {
254 printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
255 nerrs++;
256 }
257 }
258
259 return nerrs;
260}
261
262static int test_time(void) {
263 int nerrs = 0;
264
265 printf("[RUN]\ttest time()\n");
266 long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
267 long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
268 t_sys1 = sys_time(&t2_sys1);
269 if (vdso_time)
270 t_vdso = vdso_time(&t2_vdso);
271 if (vtime)
272 t_vsys = vtime(&t2_vsys);
273 t_sys2 = sys_time(&t2_sys2);
274 if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
275 printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
276 nerrs++;
277 return nerrs;
278 }
279
280 if (vdso_time) {
281 if (t_vdso < 0 || t_vdso != t2_vdso) {
282 printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
283 nerrs++;
284 } else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
285 printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
286 nerrs++;
287 } else {
288 printf("[OK]\tvDSO time() is okay\n");
289 }
290 }
291
292 if (vtime) {
293 if (t_vsys < 0 || t_vsys != t2_vsys) {
294 printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
295 nerrs++;
296 } else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
297 printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
298 nerrs++;
299 } else {
300 printf("[OK]\tvsyscall time() is okay\n");
301 }
302 }
303
304 return nerrs;
305}
306
307static int test_getcpu(int cpu)
308{
309 int nerrs = 0;
310 long ret_sys, ret_vdso = -1, ret_vsys = -1;
311
312 printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
313
314 cpu_set_t cpuset;
315 CPU_ZERO(&cpuset);
316 CPU_SET(cpu, &cpuset);
317 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
318 printf("[SKIP]\tfailed to force CPU %d\n", cpu);
319 return nerrs;
320 }
321
322 unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
323 unsigned node = 0;
324 bool have_node = false;
325 ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
326 if (vdso_getcpu)
327 ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
328 if (vgetcpu)
329 ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
330
331 if (ret_sys == 0) {
332 if (cpu_sys != cpu) {
333 printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
334 nerrs++;
335 }
336
337 have_node = true;
338 node = node_sys;
339 }
340
341 if (vdso_getcpu) {
342 if (ret_vdso) {
343 printf("[FAIL]\tvDSO getcpu() failed\n");
344 nerrs++;
345 } else {
346 if (!have_node) {
347 have_node = true;
348 node = node_vdso;
349 }
350
351 if (cpu_vdso != cpu) {
352 printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
353 nerrs++;
354 } else {
355 printf("[OK]\tvDSO reported correct CPU\n");
356 }
357
358 if (node_vdso != node) {
359 printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
360 nerrs++;
361 } else {
362 printf("[OK]\tvDSO reported correct node\n");
363 }
364 }
365 }
366
367 if (vgetcpu) {
368 if (ret_vsys) {
369 printf("[FAIL]\tvsyscall getcpu() failed\n");
370 nerrs++;
371 } else {
372 if (!have_node) {
373 have_node = true;
374 node = node_vsys;
375 }
376
377 if (cpu_vsys != cpu) {
378 printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
379 nerrs++;
380 } else {
381 printf("[OK]\tvsyscall reported correct CPU\n");
382 }
383
384 if (node_vsys != node) {
385 printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
386 nerrs++;
387 } else {
388 printf("[OK]\tvsyscall reported correct node\n");
389 }
390 }
391 }
392
393 return nerrs;
394}
395
396static int test_vsys_r(void)
397{
398#ifdef __x86_64__
399 printf("[RUN]\tChecking read access to the vsyscall page\n");
400 bool can_read;
401 if (sigsetjmp(jmpbuf, 1) == 0) {
402 *(volatile int *)0xffffffffff600000;
403 can_read = true;
404 } else {
405 can_read = false;
406 }
407
408 if (can_read && !should_read_vsyscall) {
409 printf("[FAIL]\tWe have read access, but we shouldn't\n");
410 return 1;
411 } else if (!can_read && should_read_vsyscall) {
412 printf("[FAIL]\tWe don't have read access, but we should\n");
413 return 1;
414 } else {
415 printf("[OK]\tgot expected result\n");
416 }
417#endif
418
419 return 0;
420}
421
422
423#ifdef __x86_64__
424#define X86_EFLAGS_TF (1UL << 8)
425static volatile sig_atomic_t num_vsyscall_traps;
426
427static unsigned long get_eflags(void)
428{
429 unsigned long eflags;
430 asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
431 return eflags;
432}
433
434static void set_eflags(unsigned long eflags)
435{
436 asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
437}
438
439static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
440{
441 ucontext_t *ctx = (ucontext_t *)ctx_void;
442 unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
443
444 if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
445 num_vsyscall_traps++;
446}
447
448static int test_native_vsyscall(void)
449{
450 time_t tmp;
451 bool is_native;
452
453 if (!vtime)
454 return 0;
455
456 printf("[RUN]\tchecking for native vsyscall\n");
457 sethandler(SIGTRAP, sigtrap, 0);
458 set_eflags(get_eflags() | X86_EFLAGS_TF);
459 vtime(&tmp);
460 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
461
462 /*
463 * If vsyscalls are emulated, we expect a single trap in the
464 * vsyscall page -- the call instruction will trap with RIP
465 * pointing to the entry point before emulation takes over.
466 * In native mode, we expect two traps, since whatever code
467 * the vsyscall page contains will be more than just a ret
468 * instruction.
469 */
470 is_native = (num_vsyscall_traps > 1);
471
472 printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
473 (is_native ? "native" : "emulated"),
474 (int)num_vsyscall_traps);
475
476 return 0;
477}
478#endif
479
480int main(int argc, char **argv)
481{
482 int nerrs = 0;
483
484 init_vdso();
485 nerrs += init_vsys();
486
487 nerrs += test_gtod();
488 nerrs += test_time();
489 nerrs += test_getcpu(0);
490 nerrs += test_getcpu(1);
491
492 sethandler(SIGSEGV, sigsegv, 0);
493 nerrs += test_vsys_r();
494
495#ifdef __x86_64__
496 nerrs += test_native_vsyscall();
497#endif
498
499 return nerrs ? 1 : 0;
500}