aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/kernel-api.rst13
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/mvebu-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/exynos-thermal.txt23
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt16
-rw-r--r--Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt21
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt5
-rw-r--r--Documentation/driver-api/firmware/request_firmware.rst16
-rw-r--r--Documentation/driver-api/infrastructure.rst2
-rw-r--r--Documentation/driver-api/usb/typec.rst2
-rw-r--r--Documentation/i2c/dev-interface32
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/livepatch/shadow-vars.txt41
-rw-r--r--Documentation/networking/filter.txt6
-rw-r--r--Documentation/networking/ip-sysctl.txt23
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt2
-rw-r--r--Documentation/process/magic-number.rst3
-rw-r--r--Documentation/trace/ftrace.rst14
-rw-r--r--Documentation/virtual/kvm/api.txt9
-rw-r--r--Documentation/virtual/kvm/arm/psci.txt30
-rw-r--r--MAINTAINERS68
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts28
-rw-r--r--arch/arm/boot/dts/omap4.dtsi8
-rw-r--r--arch/arm/configs/gemini_defconfig27
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kvm/guest.c13
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/pm-asm-offsets.c3
-rw-r--r--arch/arm/mach-omap2/sleep33xx.S1
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S1
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c4
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi61
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi17
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi80
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/module.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/ptrace.c20
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kvm/guest.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/mm/flush.c2
-rw-r--r--arch/arm64/mm/kasan_init.c4
-rw-r--r--arch/hexagon/include/asm/io.h6
-rw-r--r--arch/hexagon/lib/checksum.c1
-rw-r--r--arch/mips/boot/dts/img/boston.dts6
-rw-r--r--arch/mips/include/asm/io.h4
-rw-r--r--arch/mips/include/asm/uaccess.h11
-rw-r--r--arch/mips/lib/memset.S11
-rw-r--r--arch/parisc/kernel/Makefile2
-rw-r--r--arch/powerpc/include/asm/powernv.h2
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S4
-rw-r--r--arch/powerpc/kernel/mce_power.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/smp.c49
-rw-r--r--arch/powerpc/kvm/booke.c7
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c17
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c88
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c8
-rw-r--r--arch/powerpc/sysdev/xive/native.c4
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig32
-rw-r--r--arch/s390/boot/Makefile6
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/configs/debug_defconfig (renamed from arch/s390/configs/default_defconfig)30
-rw-r--r--arch/s390/configs/gcov_defconfig661
-rw-r--r--arch/s390/configs/performance_defconfig20
-rw-r--r--arch/s390/defconfig13
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/kexec.h23
-rw-r--r--arch/s390/include/asm/purgatory.h17
-rw-r--r--arch/s390/include/asm/setup.h40
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/uapi/asm/signal.h23
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/kexec_elf.c147
-rw-r--r--arch/s390/kernel/kexec_image.c76
-rw-r--r--arch/s390/kernel/machine_kexec_file.c245
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/nospec-branch.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c9
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/s390/purgatory/.gitignore2
-rw-r--r--arch/s390/purgatory/Makefile37
-rw-r--r--arch/s390/purgatory/head.S279
-rw-r--r--arch/s390/purgatory/purgatory.c42
-rw-r--r--arch/sparc/include/uapi/asm/oradax.h2
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/entry/entry_64_compat.S8
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/uncore_snbep.c37
-rw-r--r--arch/x86/include/asm/asm.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/ftrace.h19
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/jailhouse_para.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h8
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/jailhouse.c2
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c5
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/pci-nommu.c90
-rw-r--r--arch/x86/kernel/setup.c6
-rw-r--r--arch/x86/kernel/smpboot.c47
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/svm.c31
-rw-r--r--arch/x86/kvm/vmx.c109
-rw-r--r--arch/x86/kvm/x86.c15
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/mm/dump_pagetables.c11
-rw-r--r--arch/x86/mm/pageattr.c44
-rw-r--r--arch/x86/mm/pti.c26
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--block/bfq-iosched.c10
-rw-r--r--block/blk-cgroup.c28
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/blk-mq.h3
-rw-r--r--crypto/api.c11
-rw-r--r--crypto/drbg.c2
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/base/firmware_loader/fallback.c4
-rw-r--r--drivers/base/firmware_loader/fallback.h2
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c172
-rw-r--r--drivers/char/virtio_console.c157
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-imx-tpm.c45
-rw-r--r--drivers/clocksource/timer-npcm7xx.c215
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c14
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h4
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c39
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c37
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c13
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c24
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hid/wacom_wac.c76
-rw-r--r--drivers/hwmon/k10temp.c17
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c5
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-sprd.c22
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c8
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid1.c25
-rw-r--r--drivers/memory/emif-asm-offsets.c72
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c39
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c25
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c196
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c89
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c44
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c11
-rw-r--r--drivers/net/ethernet/sfc/ef10.c85
-rw-r--r--drivers/net/ethernet/sfc/efx.c143
-rw-r--r--drivers/net/ethernet/sfc/efx.h21
-rw-r--r--drivers/net/ethernet/sfc/farch.c41
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h61
-rw-r--r--drivers/net/ethernet/sfc/rx.c122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/microchip.c178
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c38
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c79
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/nvdimm/Kconfig3
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/pci/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c53
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c19
-rw-r--r--drivers/rtc/rtc-opal.c37
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c158
-rw-r--r--drivers/s390/net/qeth_core_mpc.h12
-rw-r--r--drivers/s390/net/qeth_l2_main.c60
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/sbus/char/oradax.c2
-rw-r--r--drivers/slimbus/messaging.c2
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/tty/n_gsm.c23
-rw-r--r--drivers/tty/serial/earlycon.c6
-rw-r--r--drivers/tty/serial/imx.c19
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/tty_ldisc.c29
-rw-r--r--drivers/uio/uio_hv_generic.c72
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/hcd.c19
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/phy.c93
-rw-r--r--drivers/usb/core/phy.h22
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c8
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c32
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/usbip_event.c4
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c70
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h9
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c19
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c17
-rw-r--r--drivers/watchdog/aspeed_wdt.c7
-rw-r--r--drivers/watchdog/renesas_wdt.c6
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c3
-rw-r--r--fs/afs/server.c9
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/binfmt_elf.c8
-rw-r--r--fs/btrfs/ctree.h25
-rw-r--r--fs/btrfs/delayed-inode.c20
-rw-r--r--fs/btrfs/delayed-ref.c19
-rw-r--r--fs/btrfs/delayed-ref.h1
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c73
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c20
-rw-r--r--fs/btrfs/print-tree.c25
-rw-r--r--fs/btrfs/print-tree.h2
-rw-r--r--fs/btrfs/qgroup.c43
-rw-r--r--fs/btrfs/transaction.c1
-rw-r--r--fs/btrfs/transaction.h14
-rw-r--r--fs/ceph/inode.c10
-rw-r--r--fs/ceph/xattr.c28
-rw-r--r--fs/cifs/cifs_debug.h2
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/smb2ops.c22
-rw-r--r--fs/cifs/smb2pdu.c13
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smbdirect.c42
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/ecryptfs/crypto.c41
-rw-r--r--fs/ecryptfs/file.c21
-rw-r--r--fs/ecryptfs/inode.c3
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext4/balloc.c9
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/isofs/compress.c19
-rw-r--r--fs/isofs/inode.c3
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/notify/fanotify/fanotify.c34
-rw-r--r--fs/notify/fsnotify.c25
-rw-r--r--fs/orangefs/super.c5
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/loadavg.c2
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/super.c9
-rw-r--r--fs/udf/unicode.c6
-rw-r--r--fs/xfs/libxfs/xfs_attr.c9
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c21
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drm_hdcp.h2
-rw-r--r--include/kvm/arm_psci.h16
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h30
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/compiler-clang.h3
-rw-r--r--include/linux/coresight-pmu.h13
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/hid.h9
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/livepatch.h19
-rw-r--r--include/linux/microchipphy.h8
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/serial_core.h21
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/stringhash.h4
-rw-r--r--include/linux/textsearch.h4
-rw-r--r--include/linux/thread_info.h6
-rw-r--r--include/linux/ti-emif-sram.h75
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h37
-rw-r--r--include/linux/timekeeping32.h3
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/vbox_utils.h23
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/net/ife.h3
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h4
-rw-r--r--include/sound/control.h7
-rw-r--r--include/trace/events/workqueue.h2
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/perf_event.h18
-rw-r--r--include/uapi/linux/random.h3
-rw-r--r--include/uapi/linux/sysctl.h18
-rw-r--r--include/uapi/linux/time.h1
-rw-r--r--include/uapi/linux/virtio_balloon.h15
-rw-r--r--include/xen/interface/io/sndif.h322
-rw-r--r--kernel/bpf/core.c45
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/events/callchain.c25
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/livepatch/shadow.c108
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/sysctl_binary.c20
-rw-r--r--kernel/time/hrtimer.c16
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c26
-rw-r--r--kernel/time/tick-common.c15
-rw-r--r--kernel/time/tick-internal.h6
-rw-r--r--kernel/time/tick-oneshot.c11
-rw-r--r--kernel/time/tick-sched.c19
-rw-r--r--kernel/time/timekeeping.c85
-rw-r--r--kernel/time/timekeeping.h1
-rw-r--r--kernel/trace/bpf_trace.c25
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_events_filter.c14
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--lib/dma-direct.c3
-rw-r--r--lib/errseq.c23
-rw-r--r--lib/kobject.c11
-rw-r--r--lib/textsearch.c40
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/migrate.c22
-rw-r--r--mm/mmap.c11
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/vmscan.c21
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/neighbour.c40
-rw-r--r--net/dns_resolver/dns_key.c12
-rw-r--r--net/ife/ife.c38
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv6/netfilter/Kconfig55
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/l2tp/l2tp_core.c40
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/l2tp/l2tp_debugfs.c18
-rw-r--r--net/l2tp/l2tp_netlink.c11
-rw-r--r--net/l2tp/l2tp_ppp.c34
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/llc/llc_c_ac.c9
-rw-r--r--net/llc/llc_conn.c22
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c155
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c16
-rw-r--r--net/netfilter/nf_tables_api.c69
-rw-r--r--net/netfilter/xt_connmark.c49
-rw-r--r--net/packet/af_packet.c83
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/qrtr/qrtr.c1
-rw-r--r--net/sched/act_ife.c9
-rw-r--r--net/sctp/ipv6.c60
-rw-r--r--net/smc/af_smc.c10
-rw-r--r--net/strparser/strparser.c9
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/name_table.c34
-rw-r--r--net/tipc/name_table.h2
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/netlink.c5
-rw-r--r--net/tipc/node.c11
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/tipc/subscr.c5
-rw-r--r--net/tls/tls_sw.c10
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c43
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c33
-rw-r--r--security/commoncap.c2
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/pcm_compat.c7
-rw-r--r--sound/core/pcm_native.c30
-rw-r--r--sound/core/rawmidi_compat.c18
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/firewire/dice/dice-stream.c2
-rw-r--r--sound/firewire/dice/dice.c2
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c2
-rw-r--r--sound/soc/codecs/adau17x1.c26
-rw-r--r--sound/soc/codecs/adau17x1.h3
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c9
-rw-r--r--sound/soc/codecs/rt5514.c3
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/fsl/fsl_ssi.c14
-rw-r--r--sound/soc/intel/Kconfig22
-rw-r--r--sound/soc/omap/omap-dmic.c14
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/soc-topology.c14
-rw-r--r--sound/usb/line6/midi.c2
-rw-r--r--sound/usb/mixer.c7
-rw-r--r--sound/usb/mixer_maps.c3
-rw-r--r--sound/usb/stream.c2
-rw-r--r--sound/usb/usx2y/us122l.c2
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c2
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h9
-rw-r--r--tools/arch/x86/include/asm/required-features.h8
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h19
-rw-r--r--tools/include/linux/compiler.h20
-rw-r--r--tools/include/linux/coresight-pmu.h13
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h3
-rw-r--r--tools/include/uapi/linux/bpf.h1
-rw-r--r--tools/include/uapi/linux/if_link.h39
-rw-r--r--tools/include/uapi/linux/kvm.h21
-rw-r--r--tools/include/uapi/linux/perf_event.h18
-rw-r--r--tools/include/uapi/sound/asound.h1
-rw-r--r--tools/lib/subcmd/parse-options.c6
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/perf/Documentation/perf-config.txt5
-rw-r--r--tools/perf/Documentation/perf-mem.txt42
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-script.txt17
-rw-r--r--tools/perf/Documentation/perf-stat.txt2
-rw-r--r--tools/perf/Makefile.config4
-rw-r--r--tools/perf/arch/arm/include/arch-tests.h12
-rw-r--r--tools/perf/arch/arm/tests/Build2
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c16
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c13
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c13
-rw-r--r--tools/perf/arch/arm/util/cs-etm.h13
-rw-r--r--tools/perf/arch/arm/util/pmu.c13
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/s390/util/header.c18
-rw-r--r--tools/perf/arch/x86/Makefile2
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c67
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl712
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-mem.c4
-rw-r--r--tools/perf/builtin-script.c9
-rw-r--r--tools/perf/builtin-stat.c54
-rw-r--r--tools/perf/builtin-version.c3
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv10
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling3
-rw-r--r--tools/perf/tests/bpf-script-example.c2
-rw-r--r--tools/perf/tests/bpf-script-test-kbuild.c1
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh6
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/ui/browsers/annotate.c5
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/annotate.c26
-rw-r--r--tools/perf/util/annotate.h9
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c3
-rw-r--r--tools/perf/util/cs-etm.c3
-rw-r--r--tools/perf/util/cs-etm.h13
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/evsel.c24
-rw-r--r--tools/perf/util/evsel.h1
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh2
-rw-r--r--tools/perf/util/header.c3
-rw-r--r--tools/perf/util/machine.c30
-rw-r--r--tools/perf/util/parse-events.y8
-rw-r--r--tools/perf/util/pmu.c28
-rw-r--r--tools/perf/util/symbol.c8
-rw-r--r--tools/perf/util/syscalltbl.c6
-rw-r--r--tools/perf/util/trace-event-scripting.c4
-rw-r--r--tools/testing/nvdimm/test/nfit.c84
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/test_sock.c1
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c1
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh4
-rw-r--r--tools/testing/selftests/filesystems/Makefile8
-rw-r--r--tools/testing/selftests/firmware/Makefile1
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh10
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc44
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h15
-rw-r--r--tools/testing/selftests/kvm/include/vmx.h494
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c20
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c4
-rw-r--r--tools/testing/selftests/kvm/lib/vmx.c243
-rw-r--r--tools/testing/selftests/kvm/vmx_tsc_adjust_test.c231
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_syscall_vdso.c35
-rw-r--r--virt/kvm/arm/arm.c15
-rw-r--r--virt/kvm/arm/psci.c60
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c22
691 files changed, 9711 insertions, 4822 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index ff335f8aeb39..92f30006adae 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -136,6 +136,19 @@ Sorting
136.. kernel-doc:: lib/list_sort.c 136.. kernel-doc:: lib/list_sort.c
137 :export: 137 :export:
138 138
139Text Searching
140--------------
141
142.. kernel-doc:: lib/textsearch.c
143 :doc: ts_intro
144
145.. kernel-doc:: lib/textsearch.c
146 :export:
147
148.. kernel-doc:: include/linux/textsearch.h
149 :functions: textsearch_find textsearch_next \
150 textsearch_get_pattern textsearch_get_pattern_len
151
139UUID/GUID 152UUID/GUID
140--------- 153---------
141 154
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
index 8ff65fa632fd..c06c045126fc 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
@@ -21,7 +21,7 @@ Required properties:
21- interrupts : identifier to the device interrupt 21- interrupts : identifier to the device interrupt
22- clocks : a list of phandle + clock-specifier pairs, one for each 22- clocks : a list of phandle + clock-specifier pairs, one for each
23 entry in clock names. 23 entry in clock names.
24- clocks-names : 24- clock-names :
25 * "xtal" for external xtal clock identifier 25 * "xtal" for external xtal clock identifier
26 * "pclk" for the bus core clock, either the clk81 clock or the gate clock 26 * "pclk" for the bus core clock, either the clk81 clock or the gate clock
27 * "baud" for the source of the baudrate generator, can be either the xtal 27 * "baud" for the source of the baudrate generator, can be either the xtal
diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index 2ae2fee7e023..b7e0e32b9ac6 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -24,7 +24,7 @@ Required properties:
24 - Must contain two elements for the extended variant of the IP 24 - Must contain two elements for the extended variant of the IP
25 (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx", 25 (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
26 respectively the UART TX interrupt and the UART RX interrupt. A 26 respectively the UART TX interrupt and the UART RX interrupt. A
27 corresponding interrupts-names property must be defined. 27 corresponding interrupt-names property must be defined.
28 - For backward compatibility reasons, a single element interrupts 28 - For backward compatibility reasons, a single element interrupts
29 property is also supported for the standard variant of the IP, 29 property is also supported for the standard variant of the IP,
30 containing only the UART sum interrupt. This form is deprecated 30 containing only the UART sum interrupt. This form is deprecated
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index ad962f4ec3aa..a006ea4d065f 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -17,6 +17,8 @@ Required properties:
17 - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART. 17 - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
18 - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART. 18 - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
19 - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART. 19 - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
20 - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART.
21 - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
20 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. 22 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
21 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 23 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
22 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. 24 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
index 1b596fd38dc4..b957acff57aa 100644
--- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
@@ -49,19 +49,6 @@ on the SoC (only first trip points defined in DT will be configured):
49 - samsung,exynos5433-tmu: 8 49 - samsung,exynos5433-tmu: 8
50 - samsung,exynos7-tmu: 8 50 - samsung,exynos7-tmu: 8
51 51
52Following properties are mandatory (depending on SoC):
53- samsung,tmu_gain: Gain value for internal TMU operation.
54- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage
55- samsung,tmu_noise_cancel_mode: Mode for noise cancellation
56- samsung,tmu_efuse_value: Default level of temperature - it is needed when
57 in factory fusing produced wrong value
58- samsung,tmu_min_efuse_value: Minimum temperature fused value
59- samsung,tmu_max_efuse_value: Maximum temperature fused value
60- samsung,tmu_first_point_trim: First point trimming value
61- samsung,tmu_second_point_trim: Second point trimming value
62- samsung,tmu_default_temp_offset: Default temperature offset
63- samsung,tmu_cal_type: Callibration type
64
65** Optional properties: 52** Optional properties:
66 53
67- vtmu-supply: This entry is optional and provides the regulator node supplying 54- vtmu-supply: This entry is optional and provides the regulator node supplying
@@ -78,7 +65,7 @@ Example 1):
78 clocks = <&clock 383>; 65 clocks = <&clock 383>;
79 clock-names = "tmu_apbif"; 66 clock-names = "tmu_apbif";
80 vtmu-supply = <&tmu_regulator_node>; 67 vtmu-supply = <&tmu_regulator_node>;
81 #include "exynos4412-tmu-sensor-conf.dtsi" 68 #thermal-sensor-cells = <0>;
82 }; 69 };
83 70
84Example 2): 71Example 2):
@@ -89,7 +76,7 @@ Example 2):
89 interrupts = <0 58 0>; 76 interrupts = <0 58 0>;
90 clocks = <&clock 21>; 77 clocks = <&clock 21>;
91 clock-names = "tmu_apbif"; 78 clock-names = "tmu_apbif";
92 #include "exynos5440-tmu-sensor-conf.dtsi" 79 #thermal-sensor-cells = <0>;
93 }; 80 };
94 81
95Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") 82Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
@@ -99,7 +86,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
99 interrupts = <0 184 0>; 86 interrupts = <0 184 0>;
100 clocks = <&clock 318>, <&clock 318>; 87 clocks = <&clock 318>, <&clock 318>;
101 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 88 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
102 #include "exynos4412-tmu-sensor-conf.dtsi" 89 #thermal-sensor-cells = <0>;
103 }; 90 };
104 91
105 tmu_cpu3: tmu@1006c000 { 92 tmu_cpu3: tmu@1006c000 {
@@ -108,7 +95,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
108 interrupts = <0 185 0>; 95 interrupts = <0 185 0>;
109 clocks = <&clock 318>, <&clock 319>; 96 clocks = <&clock 318>, <&clock 319>;
110 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 97 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
111 #include "exynos4412-tmu-sensor-conf.dtsi" 98 #thermal-sensor-cells = <0>;
112 }; 99 };
113 100
114 tmu_gpu: tmu@100a0000 { 101 tmu_gpu: tmu@100a0000 {
@@ -117,7 +104,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
117 interrupts = <0 215 0>; 104 interrupts = <0 215 0>;
118 clocks = <&clock 319>, <&clock 318>; 105 clocks = <&clock 319>, <&clock 318>;
119 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 106 clock-names = "tmu_apbif", "tmu_triminfo_apbif";
120 #include "exynos4412-tmu-sensor-conf.dtsi" 107 #thermal-sensor-cells = <0>;
121 }; 108 };
122 109
123Note: For multi-instance tmu each instance should have an alias correctly 110Note: For multi-instance tmu each instance should have an alias correctly
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 1719d47a5e2f..cc553f0952c5 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -55,8 +55,7 @@ of heat dissipation). For example a fan's cooling states correspond to
55the different fan speeds possible. Cooling states are referred to by 55the different fan speeds possible. Cooling states are referred to by
56single unsigned integers, where larger numbers mean greater heat 56single unsigned integers, where larger numbers mean greater heat
57dissipation. The precise set of cooling states associated with a device 57dissipation. The precise set of cooling states associated with a device
58(as referred to by the cooling-min-level and cooling-max-level 58should be defined in a particular device's binding.
59properties) should be defined in a particular device's binding.
60For more examples of cooling devices, refer to the example sections below. 59For more examples of cooling devices, refer to the example sections below.
61 60
62Required properties: 61Required properties:
@@ -69,15 +68,6 @@ Required properties:
69 See Cooling device maps section below for more details 68 See Cooling device maps section below for more details
70 on how consumers refer to cooling devices. 69 on how consumers refer to cooling devices.
71 70
72Optional properties:
73- cooling-min-level: An integer indicating the smallest
74 Type: unsigned cooling state accepted. Typically 0.
75 Size: one cell
76
77- cooling-max-level: An integer indicating the largest
78 Type: unsigned cooling state accepted.
79 Size: one cell
80
81* Trip points 71* Trip points
82 72
83The trip node is a node to describe a point in the temperature domain 73The trip node is a node to describe a point in the temperature domain
@@ -226,8 +216,6 @@ cpus {
226 396000 950000 216 396000 950000
227 198000 850000 217 198000 850000
228 >; 218 >;
229 cooling-min-level = <0>;
230 cooling-max-level = <3>;
231 #cooling-cells = <2>; /* min followed by max */ 219 #cooling-cells = <2>; /* min followed by max */
232 }; 220 };
233 ... 221 ...
@@ -241,8 +229,6 @@ cpus {
241 */ 229 */
242 fan0: fan@48 { 230 fan0: fan@48 {
243 ... 231 ...
244 cooling-min-level = <0>;
245 cooling-max-level = <9>;
246 #cooling-cells = <2>; /* min followed by max */ 232 #cooling-cells = <2>; /* min followed by max */
247 }; 233 };
248}; 234};
diff --git a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
new file mode 100644
index 000000000000..ea22dfe485be
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
@@ -0,0 +1,21 @@
1Nuvoton NPCM7xx timer
2
3Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit
4timer counters.
5
6Required properties:
7- compatible : "nuvoton,npcm750-timer" for Poleg NPCM750.
8- reg : Offset and length of the register set for the device.
9- interrupts : Contain the timer interrupt with flags for
10 falling edge.
11- clocks : phandle of timer reference clock (usually a 25 MHz clock).
12
13Example:
14
15timer@f0008000 {
16 compatible = "nuvoton,npcm750-timer";
17 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
18 reg = <0xf0008000 0x50>;
19 clocks = <&clk NPCM7XX_CLK_TIMER>;
20};
21
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
index b4aa7ddb5b13..f82087b220f4 100644
--- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
+++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
@@ -15,7 +15,7 @@ Required properties:
15- interrupts : Should be the clock event device interrupt. 15- interrupts : Should be the clock event device interrupt.
16- clocks : The clocks provided by the SoC to drive the timer, must contain 16- clocks : The clocks provided by the SoC to drive the timer, must contain
17 an entry for each entry in clock-names. 17 an entry for each entry in clock-names.
18- clock-names : Must include the following entries: "igp" and "per". 18- clock-names : Must include the following entries: "ipg" and "per".
19 19
20Example: 20Example:
21tpm5: tpm@40260000 { 21tpm5: tpm@40260000 {
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index c4c00dff4b56..bd1dd316fb23 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -28,7 +28,10 @@ Required properties:
28 - interrupts: one XHCI interrupt should be described here. 28 - interrupts: one XHCI interrupt should be described here.
29 29
30Optional properties: 30Optional properties:
31 - clocks: reference to a clock 31 - clocks: reference to the clocks
32 - clock-names: mandatory if there is a second clock, in this case
33 the name must be "core" for the first clock and "reg" for the
34 second one
32 - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM 35 - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
33 - usb3-lpm-capable: determines if platform is USB3 LPM capable 36 - usb3-lpm-capable: determines if platform is USB3 LPM capable
34 - quirk-broken-port-ped: set if the controller has broken port disable mechanism 37 - quirk-broken-port-ped: set if the controller has broken port disable mechanism
diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
index cf4516dfbf96..d5ec95a7195b 100644
--- a/Documentation/driver-api/firmware/request_firmware.rst
+++ b/Documentation/driver-api/firmware/request_firmware.rst
@@ -17,17 +17,17 @@ an error is returned.
17 17
18request_firmware 18request_firmware
19---------------- 19----------------
20.. kernel-doc:: drivers/base/firmware_class.c 20.. kernel-doc:: drivers/base/firmware_loader/main.c
21 :functions: request_firmware 21 :functions: request_firmware
22 22
23request_firmware_direct 23request_firmware_direct
24----------------------- 24-----------------------
25.. kernel-doc:: drivers/base/firmware_class.c 25.. kernel-doc:: drivers/base/firmware_loader/main.c
26 :functions: request_firmware_direct 26 :functions: request_firmware_direct
27 27
28request_firmware_into_buf 28request_firmware_into_buf
29------------------------- 29-------------------------
30.. kernel-doc:: drivers/base/firmware_class.c 30.. kernel-doc:: drivers/base/firmware_loader/main.c
31 :functions: request_firmware_into_buf 31 :functions: request_firmware_into_buf
32 32
33Asynchronous firmware requests 33Asynchronous firmware requests
@@ -41,7 +41,7 @@ in atomic contexts.
41 41
42request_firmware_nowait 42request_firmware_nowait
43----------------------- 43-----------------------
44.. kernel-doc:: drivers/base/firmware_class.c 44.. kernel-doc:: drivers/base/firmware_loader/main.c
45 :functions: request_firmware_nowait 45 :functions: request_firmware_nowait
46 46
47Special optimizations on reboot 47Special optimizations on reboot
@@ -50,12 +50,12 @@ Special optimizations on reboot
50Some devices have an optimization in place to enable the firmware to be 50Some devices have an optimization in place to enable the firmware to be
51retained during system reboot. When such optimizations are used the driver 51retained during system reboot. When such optimizations are used the driver
52author must ensure the firmware is still available on resume from suspend, 52author must ensure the firmware is still available on resume from suspend,
53this can be done with firmware_request_cache() insted of requesting for the 53this can be done with firmware_request_cache() instead of requesting for the
54firmare to be loaded. 54firmware to be loaded.
55 55
56firmware_request_cache() 56firmware_request_cache()
57----------------------- 57------------------------
58.. kernel-doc:: drivers/base/firmware_class.c 58.. kernel-doc:: drivers/base/firmware_loader/main.c
59 :functions: firmware_request_cache 59 :functions: firmware_request_cache
60 60
61request firmware API expected driver use 61request firmware API expected driver use
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 6d9ff316b608..bee1b9a1702f 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -28,7 +28,7 @@ Device Drivers Base
28.. kernel-doc:: drivers/base/node.c 28.. kernel-doc:: drivers/base/node.c
29 :internal: 29 :internal:
30 30
31.. kernel-doc:: drivers/base/firmware_class.c 31.. kernel-doc:: drivers/base/firmware_loader/main.c
32 :export: 32 :export:
33 33
34.. kernel-doc:: drivers/base/transport_class.c 34.. kernel-doc:: drivers/base/transport_class.c
diff --git a/Documentation/driver-api/usb/typec.rst b/Documentation/driver-api/usb/typec.rst
index feb31946490b..48ff58095f11 100644
--- a/Documentation/driver-api/usb/typec.rst
+++ b/Documentation/driver-api/usb/typec.rst
@@ -210,7 +210,7 @@ If the connector is dual-role capable, there may also be a switch for the data
210role. USB Type-C Connector Class does not supply separate API for them. The 210role. USB Type-C Connector Class does not supply separate API for them. The
211port drivers can use USB Role Class API with those. 211port drivers can use USB Role Class API with those.
212 212
213Illustration of the muxes behind a connector that supports an alternate mode: 213Illustration of the muxes behind a connector that supports an alternate mode::
214 214
215 ------------------------ 215 ------------------------
216 | Connector | 216 | Connector |
diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface
index d04e6e4964ee..fbed645ccd75 100644
--- a/Documentation/i2c/dev-interface
+++ b/Documentation/i2c/dev-interface
@@ -9,8 +9,8 @@ i2c adapters present on your system at a given time. i2cdetect is part of
9the i2c-tools package. 9the i2c-tools package.
10 10
11I2C device files are character device files with major device number 89 11I2C device files are character device files with major device number 89
12and a minor device number corresponding to the number assigned as 12and a minor device number corresponding to the number assigned as
13explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., 13explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
14i2c-10, ...). All 256 minor device numbers are reserved for i2c. 14i2c-10, ...). All 256 minor device numbers are reserved for i2c.
15 15
16 16
@@ -23,11 +23,6 @@ First, you need to include these two headers:
23 #include <linux/i2c-dev.h> 23 #include <linux/i2c-dev.h>
24 #include <i2c/smbus.h> 24 #include <i2c/smbus.h>
25 25
26(Please note that there are two files named "i2c-dev.h" out there. One is
27distributed with the Linux kernel and the other one is included in the
28source tree of i2c-tools. They used to be different in content but since 2012
29they're identical. You should use "linux/i2c-dev.h").
30
31Now, you have to decide which adapter you want to access. You should 26Now, you have to decide which adapter you want to access. You should
32inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this. 27inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
33Adapter numbers are assigned somewhat dynamically, so you can not 28Adapter numbers are assigned somewhat dynamically, so you can not
@@ -38,7 +33,7 @@ Next thing, open the device file, as follows:
38 int file; 33 int file;
39 int adapter_nr = 2; /* probably dynamically determined */ 34 int adapter_nr = 2; /* probably dynamically determined */
40 char filename[20]; 35 char filename[20];
41 36
42 snprintf(filename, 19, "/dev/i2c-%d", adapter_nr); 37 snprintf(filename, 19, "/dev/i2c-%d", adapter_nr);
43 file = open(filename, O_RDWR); 38 file = open(filename, O_RDWR);
44 if (file < 0) { 39 if (file < 0) {
@@ -72,8 +67,10 @@ the device supports them. Both are illustrated below.
72 /* res contains the read word */ 67 /* res contains the read word */
73 } 68 }
74 69
75 /* Using I2C Write, equivalent of 70 /*
76 i2c_smbus_write_word_data(file, reg, 0x6543) */ 71 * Using I2C Write, equivalent of
72 * i2c_smbus_write_word_data(file, reg, 0x6543)
73 */
77 buf[0] = reg; 74 buf[0] = reg;
78 buf[1] = 0x43; 75 buf[1] = 0x43;
79 buf[2] = 0x65; 76 buf[2] = 0x65;
@@ -140,14 +137,14 @@ ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset)
140 set in each message, overriding the values set with the above ioctl's. 137 set in each message, overriding the values set with the above ioctl's.
141 138
142ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args) 139ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args)
143 Not meant to be called directly; instead, use the access functions 140 If possible, use the provided i2c_smbus_* methods described below instead
144 below. 141 of issuing direct ioctls.
145 142
146You can do plain i2c transactions by using read(2) and write(2) calls. 143You can do plain i2c transactions by using read(2) and write(2) calls.
147You do not need to pass the address byte; instead, set it through 144You do not need to pass the address byte; instead, set it through
148ioctl I2C_SLAVE before you try to access the device. 145ioctl I2C_SLAVE before you try to access the device.
149 146
150You can do SMBus level transactions (see documentation file smbus-protocol 147You can do SMBus level transactions (see documentation file smbus-protocol
151for details) through the following functions: 148for details) through the following functions:
152 __s32 i2c_smbus_write_quick(int file, __u8 value); 149 __s32 i2c_smbus_write_quick(int file, __u8 value);
153 __s32 i2c_smbus_read_byte(int file); 150 __s32 i2c_smbus_read_byte(int file);
@@ -158,7 +155,7 @@ for details) through the following functions:
158 __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value); 155 __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value);
159 __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value); 156 __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value);
160 __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values); 157 __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values);
161 __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, 158 __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
162 __u8 *values); 159 __u8 *values);
163All these transactions return -1 on failure; you can read errno to see 160All these transactions return -1 on failure; you can read errno to see
164what happened. The 'write' transactions return 0 on success; the 161what happened. The 'write' transactions return 0 on success; the
@@ -166,10 +163,9 @@ what happened. The 'write' transactions return 0 on success; the
166returns the number of values read. The block buffers need not be longer 163returns the number of values read. The block buffers need not be longer
167than 32 bytes. 164than 32 bytes.
168 165
169The above functions are all inline functions, that resolve to calls to 166The above functions are made available by linking against the libi2c library,
170the i2c_smbus_access function, that on its turn calls a specific ioctl 167which is provided by the i2c-tools project. See:
171with the data in a specific format. Read the source code if you 168https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/.
172want to know what happens behind the screens.
173 169
174 170
175Implementation details 171Implementation details
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 84bb74dcae12..7f7413e597f3 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -217,7 +217,6 @@ Code Seq#(hex) Include File Comments
217'd' 02-40 pcmcia/ds.h conflict! 217'd' 02-40 pcmcia/ds.h conflict!
218'd' F0-FF linux/digi1.h 218'd' F0-FF linux/digi1.h
219'e' all linux/digi1.h conflict! 219'e' all linux/digi1.h conflict!
220'e' 00-1F drivers/net/irda/irtty-sir.h conflict!
221'f' 00-1F linux/ext2_fs.h conflict! 220'f' 00-1F linux/ext2_fs.h conflict!
222'f' 00-1F linux/ext3_fs.h conflict! 221'f' 00-1F linux/ext3_fs.h conflict!
223'f' 00-0F fs/jfs/jfs_dinode.h conflict! 222'f' 00-0F fs/jfs/jfs_dinode.h conflict!
@@ -247,7 +246,6 @@ Code Seq#(hex) Include File Comments
247'm' all linux/synclink.h conflict! 246'm' all linux/synclink.h conflict!
248'm' 00-19 drivers/message/fusion/mptctl.h conflict! 247'm' 00-19 drivers/message/fusion/mptctl.h conflict!
249'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! 248'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
250'm' 00-1F net/irda/irmod.h conflict!
251'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c 249'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
252'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 250'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
253'n' E0-FF linux/matroxfb.h matroxfb 251'n' E0-FF linux/matroxfb.h matroxfb
diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.txt
index 89c66634d600..ecc09a7be5dd 100644
--- a/Documentation/livepatch/shadow-vars.txt
+++ b/Documentation/livepatch/shadow-vars.txt
@@ -34,9 +34,13 @@ meta-data and shadow-data:
34 - data[] - storage for shadow data 34 - data[] - storage for shadow data
35 35
36It is important to note that the klp_shadow_alloc() and 36It is important to note that the klp_shadow_alloc() and
37klp_shadow_get_or_alloc() calls, described below, store a *copy* of the 37klp_shadow_get_or_alloc() are zeroing the variable by default.
38data that the functions are provided. Callers should provide whatever 38They also allow to call a custom constructor function when a non-zero
39mutual exclusion is required of the shadow data. 39value is needed. Callers should provide whatever mutual exclusion
40is required.
41
42Note that the constructor is called under klp_shadow_lock spinlock. It allows
43to do actions that can be done only once when a new variable is allocated.
40 44
41* klp_shadow_get() - retrieve a shadow variable data pointer 45* klp_shadow_get() - retrieve a shadow variable data pointer
42 - search hashtable for <obj, id> pair 46 - search hashtable for <obj, id> pair
@@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data.
47 - WARN and return NULL 51 - WARN and return NULL
48 - if <obj, id> doesn't already exist 52 - if <obj, id> doesn't already exist
49 - allocate a new shadow variable 53 - allocate a new shadow variable
50 - copy data into the new shadow variable 54 - initialize the variable using a custom constructor and data when provided
51 - add <obj, id> to the global hashtable 55 - add <obj, id> to the global hashtable
52 56
53* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable 57* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable
@@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data.
56 - return existing shadow variable 60 - return existing shadow variable
57 - if <obj, id> doesn't already exist 61 - if <obj, id> doesn't already exist
58 - allocate a new shadow variable 62 - allocate a new shadow variable
59 - copy data into the new shadow variable 63 - initialize the variable using a custom constructor and data when provided
60 - add <obj, id> pair to the global hashtable 64 - add <obj, id> pair to the global hashtable
61 65
62* klp_shadow_free() - detach and free a <obj, id> shadow variable 66* klp_shadow_free() - detach and free a <obj, id> shadow variable
63 - find and remove a <obj, id> reference from global hashtable 67 - find and remove a <obj, id> reference from global hashtable
64 - if found, free shadow variable 68 - if found
69 - call destructor function if defined
70 - free shadow variable
65 71
66* klp_shadow_free_all() - detach and free all <*, id> shadow variables 72* klp_shadow_free_all() - detach and free all <*, id> shadow variables
67 - find and remove any <*, id> references from global hashtable 73 - find and remove any <*, id> references from global hashtable
68 - if found, free shadow variable 74 - if found
75 - call destructor function if defined
76 - free shadow variable
69 77
70 78
712. Use cases 792. Use cases
@@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
107 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 115 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
108 116
109 /* Attach a corresponding shadow variable, then initialize it */ 117 /* Attach a corresponding shadow variable, then initialize it */
110 ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp); 118 ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp,
119 NULL, NULL);
111 if (!ps_lock) 120 if (!ps_lock)
112 goto shadow_fail; 121 goto shadow_fail;
113 spin_lock_init(ps_lock); 122 spin_lock_init(ps_lock);
@@ -131,7 +140,7 @@ variable:
131 140
132void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 141void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
133{ 142{
134 klp_shadow_free(sta, PS_LOCK); 143 klp_shadow_free(sta, PS_LOCK, NULL);
135 kfree(sta); 144 kfree(sta);
136 ... 145 ...
137 146
@@ -148,16 +157,24 @@ shadow variables to parents already in-flight.
148For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is 157For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is
149inside ieee80211_sta_ps_deliver_wakeup(): 158inside ieee80211_sta_ps_deliver_wakeup():
150 159
160int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
161{
162 spinlock_t *lock = shadow_data;
163
164 spin_lock_init(lock);
165 return 0;
166}
167
151#define PS_LOCK 1 168#define PS_LOCK 1
152void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 169void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
153{ 170{
154 DEFINE_SPINLOCK(ps_lock_fallback);
155 spinlock_t *ps_lock; 171 spinlock_t *ps_lock;
156 172
157 /* sync with ieee80211_tx_h_unicast_ps_buf */ 173 /* sync with ieee80211_tx_h_unicast_ps_buf */
158 ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, 174 ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK,
159 &ps_lock_fallback, sizeof(ps_lock_fallback), 175 sizeof(*ps_lock), GFP_ATOMIC,
160 GFP_ATOMIC); 176 ps_lock_shadow_ctor, NULL);
177
161 if (ps_lock) 178 if (ps_lock)
162 spin_lock(ps_lock); 179 spin_lock(ps_lock);
163 ... 180 ...
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index a4508ec1816b..fd55c7de9991 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -169,7 +169,7 @@ access to BPF code as well.
169BPF engine and instruction set 169BPF engine and instruction set
170------------------------------ 170------------------------------
171 171
172Under tools/net/ there's a small helper tool called bpf_asm which can 172Under tools/bpf/ there's a small helper tool called bpf_asm which can
173be used to write low-level filters for example scenarios mentioned in the 173be used to write low-level filters for example scenarios mentioned in the
174previous section. Asm-like syntax mentioned here has been implemented in 174previous section. Asm-like syntax mentioned here has been implemented in
175bpf_asm and will be used for further explanations (instead of dealing with 175bpf_asm and will be used for further explanations (instead of dealing with
@@ -359,7 +359,7 @@ $ ./bpf_asm -c foo
359In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF 359In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF
360filters that might not be obvious at first, it's good to test filters before 360filters that might not be obvious at first, it's good to test filters before
361attaching to a live system. For that purpose, there's a small tool called 361attaching to a live system. For that purpose, there's a small tool called
362bpf_dbg under tools/net/ in the kernel source directory. This debugger allows 362bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows
363for testing BPF filters against given pcap files, single stepping through the 363for testing BPF filters against given pcap files, single stepping through the
364BPF code on the pcap's packets and to do BPF machine register dumps. 364BPF code on the pcap's packets and to do BPF machine register dumps.
365 365
@@ -483,7 +483,7 @@ Example output from dmesg:
483[ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 483[ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00
484[ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 484[ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3
485 485
486In the kernel source tree under tools/net/, there's bpf_jit_disasm for 486In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for
487generating disassembly out of the kernel log's hexdump: 487generating disassembly out of the kernel log's hexdump:
488 488
489# ./bpf_jit_disasm 489# ./bpf_jit_disasm
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5dc1a040a2f1..35ffaa281b26 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1390,26 +1390,26 @@ mld_qrv - INTEGER
1390 Default: 2 (as specified by RFC3810 9.1) 1390 Default: 2 (as specified by RFC3810 9.1)
1391 Minimum: 1 (as specified by RFC6636 4.5) 1391 Minimum: 1 (as specified by RFC6636 4.5)
1392 1392
1393max_dst_opts_cnt - INTEGER 1393max_dst_opts_number - INTEGER
1394 Maximum number of non-padding TLVs allowed in a Destination 1394 Maximum number of non-padding TLVs allowed in a Destination
1395 options extension header. If this value is less than zero 1395 options extension header. If this value is less than zero
1396 then unknown options are disallowed and the number of known 1396 then unknown options are disallowed and the number of known
1397 TLVs allowed is the absolute value of this number. 1397 TLVs allowed is the absolute value of this number.
1398 Default: 8 1398 Default: 8
1399 1399
1400max_hbh_opts_cnt - INTEGER 1400max_hbh_opts_number - INTEGER
1401 Maximum number of non-padding TLVs allowed in a Hop-by-Hop 1401 Maximum number of non-padding TLVs allowed in a Hop-by-Hop
1402 options extension header. If this value is less than zero 1402 options extension header. If this value is less than zero
1403 then unknown options are disallowed and the number of known 1403 then unknown options are disallowed and the number of known
1404 TLVs allowed is the absolute value of this number. 1404 TLVs allowed is the absolute value of this number.
1405 Default: 8 1405 Default: 8
1406 1406
1407max dst_opts_len - INTEGER 1407max_dst_opts_length - INTEGER
1408 Maximum length allowed for a Destination options extension 1408 Maximum length allowed for a Destination options extension
1409 header. 1409 header.
1410 Default: INT_MAX (unlimited) 1410 Default: INT_MAX (unlimited)
1411 1411
1412max hbh_opts_len - INTEGER 1412max_hbh_length - INTEGER
1413 Maximum length allowed for a Hop-by-Hop options extension 1413 Maximum length allowed for a Hop-by-Hop options extension
1414 header. 1414 header.
1415 Default: INT_MAX (unlimited) 1415 Default: INT_MAX (unlimited)
@@ -2126,18 +2126,3 @@ max_dgram_qlen - INTEGER
2126 2126
2127 Default: 10 2127 Default: 10
2128 2128
2129
2130UNDOCUMENTED:
2131
2132/proc/sys/net/irda/*
2133 fast_poll_increase FIXME
2134 warn_noreply_time FIXME
2135 discovery_slots FIXME
2136 slot_timeout FIXME
2137 max_baud_rate FIXME
2138 discovery_timeout FIXME
2139 lap_keepalive_time FIXME
2140 max_noreply_time FIXME
2141 max_tx_data_size FIXME
2142 max_tx_window FIXME
2143 min_tx_turn_time FIXME
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 31abd04b9572..6f55eb960a6d 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -168,7 +168,7 @@ update on the CPUs, as discussed below:
168 168
169[Please bear in mind that the kernel requests the microcode images from 169[Please bear in mind that the kernel requests the microcode images from
170userspace, using the request_firmware() function defined in 170userspace, using the request_firmware() function defined in
171drivers/base/firmware_class.c] 171drivers/base/firmware_loader/main.c]
172 172
173 173
174a. When all the CPUs are identical: 174a. When all the CPUs are identical:
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index 00cecf1fcba9..633be1043690 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -157,8 +157,5 @@ memory management. See ``include/sound/sndmagic.h`` for complete list of them. M
157OSS sound drivers have their magic numbers constructed from the soundcard PCI 157OSS sound drivers have their magic numbers constructed from the soundcard PCI
158ID - these are not listed here as well. 158ID - these are not listed here as well.
159 159
160IrDA subsystem also uses large number of own magic numbers, see
161``include/net/irda/irda.h`` for a complete list of them.
162
163HFS is another larger user of magic numbers - you can find them in 160HFS is another larger user of magic numbers - you can find them in
164``fs/hfs/hfs.h``. 161``fs/hfs/hfs.h``.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index e45f0786f3f9..67d9c38e95eb 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -461,9 +461,17 @@ of ftrace. Here is a list of some of the key files:
461 and ticks at the same rate as the hardware clocksource. 461 and ticks at the same rate as the hardware clocksource.
462 462
463 boot: 463 boot:
464 Same as mono. Used to be a separate clock which accounted 464 This is the boot clock (CLOCK_BOOTTIME) and is based on the
465 for the time spent in suspend while CLOCK_MONOTONIC did 465 fast monotonic clock, but also accounts for time spent in
466 not. 466 suspend. Since the clock access is designed for use in
467 tracing in the suspend path, some side effects are possible
468 if clock is accessed after the suspend time is accounted before
469 the fast mono clock is updated. In this case, the clock update
470 appears to happen slightly sooner than it normally would have.
471 Also on 32-bit systems, it's possible that the 64-bit boot offset
472 sees a partial update. These effects are rare and post
473 processing should be able to handle them. See comments in the
474 ktime_get_boot_fast_ns() function for more information.
467 475
468 To set a clock, simply echo the clock name into this file:: 476 To set a clock, simply echo the clock name into this file::
469 477
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1c7958b57fe9..758bf403a169 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
1960ARM 64-bit FP registers have the following id bit patterns: 1960ARM 64-bit FP registers have the following id bit patterns:
1961 0x4030 0000 0012 0 <regno:12> 1961 0x4030 0000 0012 0 <regno:12>
1962 1962
1963ARM firmware pseudo-registers have the following bit pattern:
1964 0x4030 0000 0014 <regno:16>
1965
1963 1966
1964arm64 registers are mapped using the lower 32 bits. The upper 16 of 1967arm64 registers are mapped using the lower 32 bits. The upper 16 of
1965that is the register group type, or coprocessor number: 1968that is the register group type, or coprocessor number:
@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
1976arm64 system registers have the following id bit patterns: 1979arm64 system registers have the following id bit patterns:
1977 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> 1980 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
1978 1981
1982arm64 firmware pseudo-registers have the following bit pattern:
1983 0x6030 0000 0014 <regno:16>
1984
1979 1985
1980MIPS registers are mapped using the lower 32 bits. The upper 16 of that is 1986MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
1981the register group type: 1987the register group type:
@@ -2510,7 +2516,8 @@ Possible features:
2510 and execute guest code when KVM_RUN is called. 2516 and execute guest code when KVM_RUN is called.
2511 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. 2517 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
2512 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). 2518 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
2513 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. 2519 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
2520 backward compatible with v0.2) for the CPU.
2514 Depends on KVM_CAP_ARM_PSCI_0_2. 2521 Depends on KVM_CAP_ARM_PSCI_0_2.
2515 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. 2522 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
2516 Depends on KVM_CAP_ARM_PMU_V3. 2523 Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 000000000000..aafdab887b04
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
1KVM implements the PSCI (Power State Coordination Interface)
2specification in order to provide services such as CPU on/off, reset
3and power-off to the guest.
4
5The PSCI specification is regularly updated to provide new features,
6and KVM implements these updates if they make sense from a virtualization
7point of view.
8
9This means that a guest booted on two different versions of KVM can
10observe two different "firmware" revisions. This could cause issues if
11a given guest is tied to a particular PSCI revision (unlikely), or if
12a migration causes a different PSCI version to be exposed out of the
13blue to an unsuspecting guest.
14
15In order to remedy this situation, KVM exposes a set of "firmware
16pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
17interface. These registers can be saved/restored by userspace, and set
18to a convenient value if required.
19
20The following register is defined:
21
22* KVM_REG_ARM_PSCI_VERSION:
23
24 - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
25 (and thus has already been initialized)
26 - Returns the current PSCI version on GET_ONE_REG (defaulting to the
27 highest PSCI version implemented by KVM and compatible with v0.2)
28 - Allows any PSCI version implemented by KVM and compatible with
29 v0.2 to be set with SET_ONE_REG
30 - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/MAINTAINERS b/MAINTAINERS
index 0a1410d5a621..79bb02ff812f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -564,8 +564,9 @@ S: Maintained
564F: drivers/media/dvb-frontends/af9033* 564F: drivers/media/dvb-frontends/af9033*
565 565
566AFFS FILE SYSTEM 566AFFS FILE SYSTEM
567M: David Sterba <dsterba@suse.com>
567L: linux-fsdevel@vger.kernel.org 568L: linux-fsdevel@vger.kernel.org
568S: Orphan 569S: Odd Fixes
569F: Documentation/filesystems/affs.txt 570F: Documentation/filesystems/affs.txt
570F: fs/affs/ 571F: fs/affs/
571 572
@@ -905,6 +906,8 @@ ANDROID ION DRIVER
905M: Laura Abbott <labbott@redhat.com> 906M: Laura Abbott <labbott@redhat.com>
906M: Sumit Semwal <sumit.semwal@linaro.org> 907M: Sumit Semwal <sumit.semwal@linaro.org>
907L: devel@driverdev.osuosl.org 908L: devel@driverdev.osuosl.org
909L: dri-devel@lists.freedesktop.org
910L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
908S: Supported 911S: Supported
909F: drivers/staging/android/ion 912F: drivers/staging/android/ion
910F: drivers/staging/android/uapi/ion.h 913F: drivers/staging/android/uapi/ion.h
@@ -1208,7 +1211,6 @@ F: drivers/*/*alpine*
1208ARM/ARTPEC MACHINE SUPPORT 1211ARM/ARTPEC MACHINE SUPPORT
1209M: Jesper Nilsson <jesper.nilsson@axis.com> 1212M: Jesper Nilsson <jesper.nilsson@axis.com>
1210M: Lars Persson <lars.persson@axis.com> 1213M: Lars Persson <lars.persson@axis.com>
1211M: Niklas Cassel <niklas.cassel@axis.com>
1212S: Maintained 1214S: Maintained
1213L: linux-arm-kernel@axis.com 1215L: linux-arm-kernel@axis.com
1214F: arch/arm/mach-artpec 1216F: arch/arm/mach-artpec
@@ -1373,7 +1375,8 @@ F: arch/arm/mach-ebsa110/
1373F: drivers/net/ethernet/amd/am79c961a.* 1375F: drivers/net/ethernet/amd/am79c961a.*
1374 1376
1375ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT 1377ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
1376M: Uwe Kleine-König <kernel@pengutronix.de> 1378M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
1379R: Pengutronix Kernel Team <kernel@pengutronix.de>
1377L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1380L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1378S: Maintained 1381S: Maintained
1379N: efm32 1382N: efm32
@@ -1401,7 +1404,8 @@ F: arch/arm/mach-footbridge/
1401 1404
1402ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 1405ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
1403M: Shawn Guo <shawnguo@kernel.org> 1406M: Shawn Guo <shawnguo@kernel.org>
1404M: Sascha Hauer <kernel@pengutronix.de> 1407M: Sascha Hauer <s.hauer@pengutronix.de>
1408R: Pengutronix Kernel Team <kernel@pengutronix.de>
1405R: Fabio Estevam <fabio.estevam@nxp.com> 1409R: Fabio Estevam <fabio.estevam@nxp.com>
1406L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1410L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1407S: Maintained 1411S: Maintained
@@ -1416,7 +1420,8 @@ F: include/soc/imx/
1416 1420
1417ARM/FREESCALE VYBRID ARM ARCHITECTURE 1421ARM/FREESCALE VYBRID ARM ARCHITECTURE
1418M: Shawn Guo <shawnguo@kernel.org> 1422M: Shawn Guo <shawnguo@kernel.org>
1419M: Sascha Hauer <kernel@pengutronix.de> 1423M: Sascha Hauer <s.hauer@pengutronix.de>
1424R: Pengutronix Kernel Team <kernel@pengutronix.de>
1420R: Stefan Agner <stefan@agner.ch> 1425R: Stefan Agner <stefan@agner.ch>
1421L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1426L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1422S: Maintained 1427S: Maintained
@@ -2614,7 +2619,7 @@ S: Maintained
2614F: drivers/net/hamradio/baycom* 2619F: drivers/net/hamradio/baycom*
2615 2620
2616BCACHE (BLOCK LAYER CACHE) 2621BCACHE (BLOCK LAYER CACHE)
2617M: Michael Lyle <mlyle@lyle.org> 2622M: Coly Li <colyli@suse.de>
2618M: Kent Overstreet <kent.overstreet@gmail.com> 2623M: Kent Overstreet <kent.overstreet@gmail.com>
2619L: linux-bcache@vger.kernel.org 2624L: linux-bcache@vger.kernel.org
2620W: http://bcache.evilpiepirate.org 2625W: http://bcache.evilpiepirate.org
@@ -4245,6 +4250,9 @@ F: include/trace/events/fs_dax.h
4245 4250
4246DEVICE DIRECT ACCESS (DAX) 4251DEVICE DIRECT ACCESS (DAX)
4247M: Dan Williams <dan.j.williams@intel.com> 4252M: Dan Williams <dan.j.williams@intel.com>
4253M: Dave Jiang <dave.jiang@intel.com>
4254M: Ross Zwisler <ross.zwisler@linux.intel.com>
4255M: Vishal Verma <vishal.l.verma@intel.com>
4248L: linux-nvdimm@lists.01.org 4256L: linux-nvdimm@lists.01.org
4249S: Supported 4257S: Supported
4250F: drivers/dax/ 4258F: drivers/dax/
@@ -5652,7 +5660,8 @@ F: drivers/net/ethernet/freescale/fec.h
5652F: Documentation/devicetree/bindings/net/fsl-fec.txt 5660F: Documentation/devicetree/bindings/net/fsl-fec.txt
5653 5661
5654FREESCALE IMX / MXC FRAMEBUFFER DRIVER 5662FREESCALE IMX / MXC FRAMEBUFFER DRIVER
5655M: Sascha Hauer <kernel@pengutronix.de> 5663M: Sascha Hauer <s.hauer@pengutronix.de>
5664R: Pengutronix Kernel Team <kernel@pengutronix.de>
5656L: linux-fbdev@vger.kernel.org 5665L: linux-fbdev@vger.kernel.org
5657L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5666L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5658S: Maintained 5667S: Maintained
@@ -5784,6 +5793,14 @@ F: fs/crypto/
5784F: include/linux/fscrypt*.h 5793F: include/linux/fscrypt*.h
5785F: Documentation/filesystems/fscrypt.rst 5794F: Documentation/filesystems/fscrypt.rst
5786 5795
5796FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE
5797M: Jan Kara <jack@suse.cz>
5798R: Amir Goldstein <amir73il@gmail.com>
5799L: linux-fsdevel@vger.kernel.org
5800S: Maintained
5801F: fs/notify/
5802F: include/linux/fsnotify*.h
5803
5787FUJITSU LAPTOP EXTRAS 5804FUJITSU LAPTOP EXTRAS
5788M: Jonathan Woithe <jwoithe@just42.net> 5805M: Jonathan Woithe <jwoithe@just42.net>
5789L: platform-driver-x86@vger.kernel.org 5806L: platform-driver-x86@vger.kernel.org
@@ -6256,7 +6273,7 @@ S: Odd Fixes
6256F: drivers/media/usb/hdpvr/ 6273F: drivers/media/usb/hdpvr/
6257 6274
6258HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER 6275HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
6259M: Jimmy Vance <jimmy.vance@hpe.com> 6276M: Jerry Hoemann <jerry.hoemann@hpe.com>
6260S: Supported 6277S: Supported
6261F: Documentation/watchdog/hpwdt.txt 6278F: Documentation/watchdog/hpwdt.txt
6262F: drivers/watchdog/hpwdt.c 6279F: drivers/watchdog/hpwdt.c
@@ -7396,16 +7413,6 @@ S: Obsolete
7396F: include/uapi/linux/ipx.h 7413F: include/uapi/linux/ipx.h
7397F: drivers/staging/ipx/ 7414F: drivers/staging/ipx/
7398 7415
7399IRDA SUBSYSTEM
7400M: Samuel Ortiz <samuel@sortiz.org>
7401L: irda-users@lists.sourceforge.net (subscribers-only)
7402L: netdev@vger.kernel.org
7403W: http://irda.sourceforge.net/
7404S: Obsolete
7405T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
7406F: Documentation/networking/irda.txt
7407F: drivers/staging/irda/
7408
7409IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 7416IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
7410M: Marc Zyngier <marc.zyngier@arm.com> 7417M: Marc Zyngier <marc.zyngier@arm.com>
7411S: Maintained 7418S: Maintained
@@ -7738,7 +7745,7 @@ F: arch/x86/include/asm/svm.h
7738F: arch/x86/kvm/svm.c 7745F: arch/x86/kvm/svm.c
7739 7746
7740KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) 7747KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
7741M: Christoffer Dall <christoffer.dall@linaro.org> 7748M: Christoffer Dall <christoffer.dall@arm.com>
7742M: Marc Zyngier <marc.zyngier@arm.com> 7749M: Marc Zyngier <marc.zyngier@arm.com>
7743L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7750L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7744L: kvmarm@lists.cs.columbia.edu 7751L: kvmarm@lists.cs.columbia.edu
@@ -7752,7 +7759,7 @@ F: virt/kvm/arm/
7752F: include/kvm/arm_* 7759F: include/kvm/arm_*
7753 7760
7754KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 7761KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
7755M: Christoffer Dall <christoffer.dall@linaro.org> 7762M: Christoffer Dall <christoffer.dall@arm.com>
7756M: Marc Zyngier <marc.zyngier@arm.com> 7763M: Marc Zyngier <marc.zyngier@arm.com>
7757L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7764L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7758L: kvmarm@lists.cs.columbia.edu 7765L: kvmarm@lists.cs.columbia.edu
@@ -8048,6 +8055,9 @@ F: tools/lib/lockdep/
8048 8055
8049LIBNVDIMM BLK: MMIO-APERTURE DRIVER 8056LIBNVDIMM BLK: MMIO-APERTURE DRIVER
8050M: Ross Zwisler <ross.zwisler@linux.intel.com> 8057M: Ross Zwisler <ross.zwisler@linux.intel.com>
8058M: Dan Williams <dan.j.williams@intel.com>
8059M: Vishal Verma <vishal.l.verma@intel.com>
8060M: Dave Jiang <dave.jiang@intel.com>
8051L: linux-nvdimm@lists.01.org 8061L: linux-nvdimm@lists.01.org
8052Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8062Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8053S: Supported 8063S: Supported
@@ -8056,6 +8066,9 @@ F: drivers/nvdimm/region_devs.c
8056 8066
8057LIBNVDIMM BTT: BLOCK TRANSLATION TABLE 8067LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
8058M: Vishal Verma <vishal.l.verma@intel.com> 8068M: Vishal Verma <vishal.l.verma@intel.com>
8069M: Dan Williams <dan.j.williams@intel.com>
8070M: Ross Zwisler <ross.zwisler@linux.intel.com>
8071M: Dave Jiang <dave.jiang@intel.com>
8059L: linux-nvdimm@lists.01.org 8072L: linux-nvdimm@lists.01.org
8060Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8073Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8061S: Supported 8074S: Supported
@@ -8063,6 +8076,9 @@ F: drivers/nvdimm/btt*
8063 8076
8064LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER 8077LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
8065M: Ross Zwisler <ross.zwisler@linux.intel.com> 8078M: Ross Zwisler <ross.zwisler@linux.intel.com>
8079M: Dan Williams <dan.j.williams@intel.com>
8080M: Vishal Verma <vishal.l.verma@intel.com>
8081M: Dave Jiang <dave.jiang@intel.com>
8066L: linux-nvdimm@lists.01.org 8082L: linux-nvdimm@lists.01.org
8067Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8083Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8068S: Supported 8084S: Supported
@@ -8078,6 +8094,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
8078 8094
8079LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM 8095LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
8080M: Dan Williams <dan.j.williams@intel.com> 8096M: Dan Williams <dan.j.williams@intel.com>
8097M: Ross Zwisler <ross.zwisler@linux.intel.com>
8098M: Vishal Verma <vishal.l.verma@intel.com>
8099M: Dave Jiang <dave.jiang@intel.com>
8081L: linux-nvdimm@lists.01.org 8100L: linux-nvdimm@lists.01.org
8082Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8101Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
8083T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git 8102T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
@@ -9765,6 +9784,7 @@ F: include/uapi/linux/net_namespace.h
9765F: tools/testing/selftests/net/ 9784F: tools/testing/selftests/net/
9766F: lib/net_utils.c 9785F: lib/net_utils.c
9767F: lib/random32.c 9786F: lib/random32.c
9787F: Documentation/networking/
9768 9788
9769NETWORKING [IPSEC] 9789NETWORKING [IPSEC]
9770M: Steffen Klassert <steffen.klassert@secunet.com> 9790M: Steffen Klassert <steffen.klassert@secunet.com>
@@ -10881,7 +10901,6 @@ F: drivers/pci/host/
10881F: drivers/pci/dwc/ 10901F: drivers/pci/dwc/
10882 10902
10883PCIE DRIVER FOR AXIS ARTPEC 10903PCIE DRIVER FOR AXIS ARTPEC
10884M: Niklas Cassel <niklas.cassel@axis.com>
10885M: Jesper Nilsson <jesper.nilsson@axis.com> 10904M: Jesper Nilsson <jesper.nilsson@axis.com>
10886L: linux-arm-kernel@axis.com 10905L: linux-arm-kernel@axis.com
10887L: linux-pci@vger.kernel.org 10906L: linux-pci@vger.kernel.org
@@ -12816,7 +12835,8 @@ F: include/linux/siphash.h
12816 12835
12817SIOX 12836SIOX
12818M: Gavin Schenk <g.schenk@eckelmann.de> 12837M: Gavin Schenk <g.schenk@eckelmann.de>
12819M: Uwe Kleine-König <kernel@pengutronix.de> 12838M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
12839R: Pengutronix Kernel Team <kernel@pengutronix.de>
12820S: Supported 12840S: Supported
12821F: drivers/siox/* 12841F: drivers/siox/*
12822F: include/trace/events/siox.h 12842F: include/trace/events/siox.h
@@ -13936,7 +13956,7 @@ THUNDERBOLT DRIVER
13936M: Andreas Noever <andreas.noever@gmail.com> 13956M: Andreas Noever <andreas.noever@gmail.com>
13937M: Michael Jamet <michael.jamet@intel.com> 13957M: Michael Jamet <michael.jamet@intel.com>
13938M: Mika Westerberg <mika.westerberg@linux.intel.com> 13958M: Mika Westerberg <mika.westerberg@linux.intel.com>
13939M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13959M: Yehezkel Bernat <YehezkelShB@gmail.com>
13940T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git 13960T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
13941S: Maintained 13961S: Maintained
13942F: Documentation/admin-guide/thunderbolt.rst 13962F: Documentation/admin-guide/thunderbolt.rst
@@ -13946,7 +13966,7 @@ F: include/linux/thunderbolt.h
13946THUNDERBOLT NETWORK DRIVER 13966THUNDERBOLT NETWORK DRIVER
13947M: Michael Jamet <michael.jamet@intel.com> 13967M: Michael Jamet <michael.jamet@intel.com>
13948M: Mika Westerberg <mika.westerberg@linux.intel.com> 13968M: Mika Westerberg <mika.westerberg@linux.intel.com>
13949M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13969M: Yehezkel Bernat <YehezkelShB@gmail.com>
13950L: netdev@vger.kernel.org 13970L: netdev@vger.kernel.org
13951S: Maintained 13971S: Maintained
13952F: drivers/net/thunderbolt.c 13972F: drivers/net/thunderbolt.c
diff --git a/Makefile b/Makefile
index e811e0c509c5..619a85ad716b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 17 3PATCHLEVEL = 17
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc3
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 8bbb6f85d161..4785fbcc41ed 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -134,37 +134,37 @@
134 function = "gmii"; 134 function = "gmii";
135 groups = "gmii_gmac0_grp"; 135 groups = "gmii_gmac0_grp";
136 }; 136 };
137 /* Settings come from OpenWRT */ 137 /* Settings come from OpenWRT, pins on SL3516 */
138 conf0 { 138 conf0 {
139 pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; 139 pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
140 skew-delay = <0>; 140 skew-delay = <0>;
141 }; 141 };
142 conf1 { 142 conf1 {
143 pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; 143 pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
144 skew-delay = <15>; 144 skew-delay = <15>;
145 }; 145 };
146 conf2 { 146 conf2 {
147 pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; 147 pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
148 skew-delay = <7>; 148 skew-delay = <7>;
149 }; 149 };
150 conf3 { 150 conf3 {
151 pins = "V7 GMAC0 TXC"; 151 pins = "U8 GMAC0 TXC";
152 skew-delay = <11>; 152 skew-delay = <11>;
153 }; 153 };
154 conf4 { 154 conf4 {
155 pins = "P10 GMAC1 TXC"; 155 pins = "V11 GMAC1 TXC";
156 skew-delay = <10>; 156 skew-delay = <10>;
157 }; 157 };
158 conf5 { 158 conf5 {
159 /* The data lines all have default skew */ 159 /* The data lines all have default skew */
160 pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", 160 pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
161 "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", 161 "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
162 "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", 162 "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
163 "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", 163 "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
164 "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", 164 "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
165 "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", 165 "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
166 "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", 166 "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
167 "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; 167 "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
168 skew-delay = <7>; 168 skew-delay = <7>;
169 }; 169 };
170 /* Set up drive strength on GMAC0 to 16 mA */ 170 /* Set up drive strength on GMAC0 to 16 mA */
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 475904894b86..e554b6e039f3 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -163,10 +163,10 @@
163 163
164 cm2: cm2@8000 { 164 cm2: cm2@8000 {
165 compatible = "ti,omap4-cm2", "simple-bus"; 165 compatible = "ti,omap4-cm2", "simple-bus";
166 reg = <0x8000 0x3000>; 166 reg = <0x8000 0x2000>;
167 #address-cells = <1>; 167 #address-cells = <1>;
168 #size-cells = <1>; 168 #size-cells = <1>;
169 ranges = <0 0x8000 0x3000>; 169 ranges = <0 0x8000 0x2000>;
170 170
171 cm2_clocks: clocks { 171 cm2_clocks: clocks {
172 #address-cells = <1>; 172 #address-cells = <1>;
@@ -250,11 +250,11 @@
250 250
251 prm: prm@6000 { 251 prm: prm@6000 {
252 compatible = "ti,omap4-prm"; 252 compatible = "ti,omap4-prm";
253 reg = <0x6000 0x3000>; 253 reg = <0x6000 0x2000>;
254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; 254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
255 #address-cells = <1>; 255 #address-cells = <1>;
256 #size-cells = <1>; 256 #size-cells = <1>;
257 ranges = <0 0x6000 0x3000>; 257 ranges = <0 0x6000 0x2000>;
258 258
259 prm_clocks: clocks { 259 prm_clocks: clocks {
260 #address-cells = <1>; 260 #address-cells = <1>;
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index 2a63fa10c813..553777ac2814 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -1,6 +1,7 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_NO_HZ_IDLE=y 3CONFIG_NO_HZ_IDLE=y
4CONFIG_HIGH_RES_TIMERS=y
4CONFIG_BSD_PROCESS_ACCT=y 5CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_USER_NS=y 6CONFIG_USER_NS=y
6CONFIG_RELAY=y 7CONFIG_RELAY=y
@@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y
12CONFIG_PCI=y 13CONFIG_PCI=y
13CONFIG_PREEMPT=y 14CONFIG_PREEMPT=y
14CONFIG_AEABI=y 15CONFIG_AEABI=y
16CONFIG_HIGHMEM=y
17CONFIG_CMA=y
15CONFIG_CMDLINE="console=ttyS0,115200n8" 18CONFIG_CMDLINE="console=ttyS0,115200n8"
16CONFIG_KEXEC=y 19CONFIG_KEXEC=y
17CONFIG_BINFMT_MISC=y 20CONFIG_BINFMT_MISC=y
18CONFIG_PM=y 21CONFIG_PM=y
22CONFIG_NET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
19CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 25CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
20CONFIG_DEVTMPFS=y 26CONFIG_DEVTMPFS=y
21CONFIG_MTD=y 27CONFIG_MTD=y
22CONFIG_MTD_BLOCK=y 28CONFIG_MTD_BLOCK=y
23CONFIG_MTD_CFI=y 29CONFIG_MTD_CFI=y
30CONFIG_MTD_JEDECPROBE=y
24CONFIG_MTD_CFI_INTELEXT=y 31CONFIG_MTD_CFI_INTELEXT=y
25CONFIG_MTD_CFI_AMDSTD=y 32CONFIG_MTD_CFI_AMDSTD=y
26CONFIG_MTD_CFI_STAA=y 33CONFIG_MTD_CFI_STAA=y
@@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y
33# CONFIG_SCSI_LOWLEVEL is not set 40# CONFIG_SCSI_LOWLEVEL is not set
34CONFIG_ATA=y 41CONFIG_ATA=y
35CONFIG_PATA_FTIDE010=y 42CONFIG_PATA_FTIDE010=y
43CONFIG_NETDEVICES=y
44CONFIG_GEMINI_ETHERNET=y
45CONFIG_MDIO_BITBANG=y
46CONFIG_MDIO_GPIO=y
47CONFIG_REALTEK_PHY=y
36CONFIG_INPUT_EVDEV=y 48CONFIG_INPUT_EVDEV=y
37CONFIG_KEYBOARD_GPIO=y 49CONFIG_KEYBOARD_GPIO=y
38# CONFIG_INPUT_MOUSE is not set 50# CONFIG_INPUT_MOUSE is not set
@@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1
43CONFIG_SERIAL_8250_RUNTIME_UARTS=1 55CONFIG_SERIAL_8250_RUNTIME_UARTS=1
44CONFIG_SERIAL_OF_PLATFORM=y 56CONFIG_SERIAL_OF_PLATFORM=y
45# CONFIG_HW_RANDOM is not set 57# CONFIG_HW_RANDOM is not set
46# CONFIG_HWMON is not set 58CONFIG_I2C_GPIO=y
59CONFIG_SPI=y
60CONFIG_SPI_GPIO=y
61CONFIG_SENSORS_GPIO_FAN=y
62CONFIG_SENSORS_LM75=y
63CONFIG_THERMAL=y
47CONFIG_WATCHDOG=y 64CONFIG_WATCHDOG=y
48CONFIG_GEMINI_WATCHDOG=y 65CONFIG_REGULATOR=y
66CONFIG_REGULATOR_FIXED_VOLTAGE=y
67CONFIG_DRM=y
68CONFIG_DRM_PANEL_ILITEK_IL9322=y
69CONFIG_DRM_TVE200=y
70CONFIG_LOGO=y
49CONFIG_USB=y 71CONFIG_USB=y
50CONFIG_USB_MON=y 72CONFIG_USB_MON=y
51CONFIG_USB_FOTG210_HCD=y 73CONFIG_USB_FOTG210_HCD=y
@@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y
54CONFIG_LEDS_CLASS=y 76CONFIG_LEDS_CLASS=y
55CONFIG_LEDS_GPIO=y 77CONFIG_LEDS_GPIO=y
56CONFIG_LEDS_TRIGGERS=y 78CONFIG_LEDS_TRIGGERS=y
79CONFIG_LEDS_TRIGGER_DISK=y
57CONFIG_LEDS_TRIGGER_HEARTBEAT=y 80CONFIG_LEDS_TRIGGER_HEARTBEAT=y
58CONFIG_RTC_CLASS=y 81CONFIG_RTC_CLASS=y
59CONFIG_DMADEVICES=y 82CONFIG_DMADEVICES=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 2620ce790db0..371fca4e1ab7 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
57CONFIG_MTD_NAND=y 57CONFIG_MTD_NAND=y
58CONFIG_MTD_NAND_DENALI_DT=y 58CONFIG_MTD_NAND_DENALI_DT=y
59CONFIG_MTD_SPI_NOR=y 59CONFIG_MTD_SPI_NOR=y
60# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
60CONFIG_SPI_CADENCE_QUADSPI=y 61CONFIG_SPI_CADENCE_QUADSPI=y
61CONFIG_OF_OVERLAY=y 62CONFIG_OF_OVERLAY=y
62CONFIG_OF_CONFIGFS=y 63CONFIG_OF_CONFIGFS=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c6a749568dd6..c7c28c885a19 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -77,6 +77,9 @@ struct kvm_arch {
77 /* Interrupt controller */ 77 /* Interrupt controller */
78 struct vgic_dist vgic; 78 struct vgic_dist vgic;
79 int max_vcpus; 79 int max_vcpus;
80
81 /* Mandated version of PSCI */
82 u32 psci_version;
80}; 83};
81 84
82#define KVM_NR_MEM_OBJS 40 85#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 2ba95d6fe852..caae4843cb70 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
195#define KVM_REG_ARM_VFP_FPINST 0x1009 195#define KVM_REG_ARM_VFP_FPINST 0x1009
196#define KVM_REG_ARM_VFP_FPINST2 0x100A 196#define KVM_REG_ARM_VFP_FPINST2 0x100A
197 197
198/* KVM-as-firmware specific pseudo-registers */
199#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
200#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
201 KVM_REG_ARM_FW | ((r) & 0xffff))
202#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
203
198/* Device Control API: ARM VGIC */ 204/* Device Control API: ARM VGIC */
199#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 205#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
200#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 206#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 1e0784ebbfd6..a18f33edc471 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <kvm/arm_psci.h>
25#include <asm/cputype.h> 26#include <asm/cputype.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
27#include <asm/kvm.h> 28#include <asm/kvm.h>
@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
176unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 177unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
177{ 178{
178 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) 179 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
180 + kvm_arm_get_fw_num_regs(vcpu)
179 + NUM_TIMER_REGS; 181 + NUM_TIMER_REGS;
180} 182}
181 183
@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
196 uindices++; 198 uindices++;
197 } 199 }
198 200
201 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
202 if (ret)
203 return ret;
204 uindices += kvm_arm_get_fw_num_regs(vcpu);
205
199 ret = copy_timer_indices(vcpu, uindices); 206 ret = copy_timer_indices(vcpu, uindices);
200 if (ret) 207 if (ret)
201 return ret; 208 return ret;
@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
214 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 221 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
215 return get_core_reg(vcpu, reg); 222 return get_core_reg(vcpu, reg);
216 223
224 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
225 return kvm_arm_get_fw_reg(vcpu, reg);
226
217 if (is_timer_reg(reg->id)) 227 if (is_timer_reg(reg->id))
218 return get_timer_reg(vcpu, reg); 228 return get_timer_reg(vcpu, reg);
219 229
@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
230 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 240 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
231 return set_core_reg(vcpu, reg); 241 return set_core_reg(vcpu, reg);
232 242
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
244 return kvm_arm_set_fw_reg(vcpu, reg);
245
233 if (is_timer_reg(reg->id)) 246 if (is_timer_reg(reg->id))
234 return set_timer_reg(vcpu, reg); 247 return set_timer_reg(vcpu, reg);
235 248
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4603c30fef73..0d9ce58bc464 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE 243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) 244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
245 245
246# For rule to generate ti-emif-asm-offsets.h dependency 246$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
247include drivers/memory/Makefile.asm-offsets
248
249arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
250arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c
index 6d4392da7c11..b9846b19e5e2 100644
--- a/arch/arm/mach-omap2/pm-asm-offsets.c
+++ b/arch/arm/mach-omap2/pm-asm-offsets.c
@@ -7,9 +7,12 @@
7 7
8#include <linux/kbuild.h> 8#include <linux/kbuild.h>
9#include <linux/platform_data/pm33xx.h> 9#include <linux/platform_data/pm33xx.h>
10#include <linux/ti-emif-sram.h>
10 11
11int main(void) 12int main(void)
12{ 13{
14 ti_emif_asm_offsets();
15
13 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, 16 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
14 offsetof(struct am33xx_pm_sram_data, wfi_flags)); 17 offsetof(struct am33xx_pm_sram_data, wfi_flags));
15 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, 18 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
index 218d79930b04..322b3bb868b4 100644
--- a/arch/arm/mach-omap2/sleep33xx.S
+++ b/arch/arm/mach-omap2/sleep33xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index b24be624e8b9..8903814a6677 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index 59589a4a0d4b..885e8f12e4b9 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = {
427 .dev_id = "spi_gpio", 427 .dev_id = "spi_gpio",
428 .table = { 428 .table = {
429 GPIO_LOOKUP("GPIOB", 4, 429 GPIO_LOOKUP("GPIOB", 4,
430 "gpio-sck", GPIO_ACTIVE_HIGH), 430 "sck", GPIO_ACTIVE_HIGH),
431 GPIO_LOOKUP("GPIOB", 9, 431 GPIO_LOOKUP("GPIOB", 9,
432 "gpio-mosi", GPIO_ACTIVE_HIGH), 432 "mosi", GPIO_ACTIVE_HIGH),
433 GPIO_LOOKUP("GPIOH", 10, 433 GPIO_LOOKUP("GPIOH", 10,
434 "cs", GPIO_ACTIVE_HIGH), 434 "cs", GPIO_ACTIVE_HIGH),
435 { }, 435 { },
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 15402861bb59..87f7d2f9f17c 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
56KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) 56KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
57KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) 57KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
58 58
59ifeq ($(cc-name),clang)
60KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128
61else
59KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) 62KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
63endif
60 64
61ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) 65ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
62KBUILD_CPPFLAGS += -mbig-endian 66KBUILD_CPPFLAGS += -mbig-endian
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 4eef36b22538..88e712ea757a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -212,3 +212,7 @@
212 pinctrl-0 = <&uart_ao_a_pins>; 212 pinctrl-0 = <&uart_ao_a_pins>;
213 pinctrl-names = "default"; 213 pinctrl-names = "default";
214}; 214};
215
216&usb0 {
217 status = "okay";
218};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 22bf37404ff1..3e3eb31748a3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -271,3 +271,15 @@
271 pinctrl-0 = <&uart_ao_a_pins>; 271 pinctrl-0 = <&uart_ao_a_pins>;
272 pinctrl-names = "default"; 272 pinctrl-names = "default";
273}; 273};
274
275&usb0 {
276 status = "okay";
277};
278
279&usb2_phy0 {
280 /*
281 * even though the schematics don't show it:
282 * HDMI_5V is also used as supply for the USB VBUS.
283 */
284 phy-supply = <&hdmi_5v>;
285};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 69c721a70e44..6739697be1de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -215,3 +215,7 @@
215 pinctrl-0 = <&uart_ao_a_pins>; 215 pinctrl-0 = <&uart_ao_a_pins>;
216 pinctrl-names = "default"; 216 pinctrl-names = "default";
217}; 217};
218
219&usb0 {
220 status = "okay";
221};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 0a0953fbc7d4..0cfd701809de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -185,3 +185,7 @@
185 pinctrl-0 = <&uart_ao_a_pins>; 185 pinctrl-0 = <&uart_ao_a_pins>;
186 pinctrl-names = "default"; 186 pinctrl-names = "default";
187}; 187};
188
189&usb0 {
190 status = "okay";
191};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index e1a39cbed8c9..dba365ed4bd5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -20,6 +20,67 @@
20 no-map; 20 no-map;
21 }; 21 };
22 }; 22 };
23
24 soc {
25 usb0: usb@c9000000 {
26 status = "disabled";
27 compatible = "amlogic,meson-gxl-dwc3";
28 #address-cells = <2>;
29 #size-cells = <2>;
30 ranges;
31
32 clocks = <&clkc CLKID_USB>;
33 clock-names = "usb_general";
34 resets = <&reset RESET_USB_OTG>;
35 reset-names = "usb_otg";
36
37 dwc3: dwc3@c9000000 {
38 compatible = "snps,dwc3";
39 reg = <0x0 0xc9000000 0x0 0x100000>;
40 interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
41 dr_mode = "host";
42 maximum-speed = "high-speed";
43 snps,dis_u2_susphy_quirk;
44 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
45 };
46 };
47 };
48};
49
50&apb {
51 usb2_phy0: phy@78000 {
52 compatible = "amlogic,meson-gxl-usb2-phy";
53 #phy-cells = <0>;
54 reg = <0x0 0x78000 0x0 0x20>;
55 clocks = <&clkc CLKID_USB>;
56 clock-names = "phy";
57 resets = <&reset RESET_USB_OTG>;
58 reset-names = "phy";
59 status = "okay";
60 };
61
62 usb2_phy1: phy@78020 {
63 compatible = "amlogic,meson-gxl-usb2-phy";
64 #phy-cells = <0>;
65 reg = <0x0 0x78020 0x0 0x20>;
66 clocks = <&clkc CLKID_USB>;
67 clock-names = "phy";
68 resets = <&reset RESET_USB_OTG>;
69 reset-names = "phy";
70 status = "okay";
71 };
72
73 usb3_phy: phy@78080 {
74 compatible = "amlogic,meson-gxl-usb3-phy";
75 #phy-cells = <0>;
76 reg = <0x0 0x78080 0x0 0x20>;
77 interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
78 clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
79 clock-names = "phy", "peripheral";
80 resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
81 reset-names = "phy", "peripheral";
82 status = "okay";
83 };
23}; 84};
24 85
25&ethmac { 86&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 4fd46c1546a7..0868da476e41 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -406,3 +406,7 @@
406 status = "okay"; 406 status = "okay";
407 vref-supply = <&vddio_ao18>; 407 vref-supply = <&vddio_ao18>;
408}; 408};
409
410&usb0 {
411 status = "okay";
412};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index d076a7c425dd..247888d68a3a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -80,6 +80,19 @@
80 }; 80 };
81}; 81};
82 82
83&apb {
84 usb2_phy2: phy@78040 {
85 compatible = "amlogic,meson-gxl-usb2-phy";
86 #phy-cells = <0>;
87 reg = <0x0 0x78040 0x0 0x20>;
88 clocks = <&clkc CLKID_USB>;
89 clock-names = "phy";
90 resets = <&reset RESET_USB_OTG>;
91 reset-names = "phy";
92 status = "okay";
93 };
94};
95
83&clkc_AO { 96&clkc_AO {
84 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; 97 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
85}; 98};
@@ -100,3 +113,7 @@
100&hdmi_tx { 113&hdmi_tx {
101 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 114 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
102}; 115};
116
117&dwc3 {
118 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
119};
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 2ac43221ddb6..69804c5f1197 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -56,8 +56,6 @@
56 56
57 gpio_keys { 57 gpio_keys {
58 compatible = "gpio-keys"; 58 compatible = "gpio-keys";
59 #address-cells = <1>;
60 #size-cells = <0>;
61 59
62 power-button { 60 power-button {
63 debounce_interval = <50>; 61 debounce_interval = <50>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
index 4b5465da81d8..8c68e0c26f1b 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
@@ -36,11 +36,11 @@
36 #size-cells = <1>; 36 #size-cells = <1>;
37 ranges = <0x0 0x0 0x67d00000 0x00800000>; 37 ranges = <0x0 0x0 0x67d00000 0x00800000>;
38 38
39 sata0: ahci@210000 { 39 sata0: ahci@0 {
40 compatible = "brcm,iproc-ahci", "generic-ahci"; 40 compatible = "brcm,iproc-ahci", "generic-ahci";
41 reg = <0x00210000 0x1000>; 41 reg = <0x00000000 0x1000>;
42 reg-names = "ahci"; 42 reg-names = "ahci";
43 interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; 43 interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
44 #address-cells = <1>; 44 #address-cells = <1>;
45 #size-cells = <0>; 45 #size-cells = <0>;
46 status = "disabled"; 46 status = "disabled";
@@ -52,9 +52,9 @@
52 }; 52 };
53 }; 53 };
54 54
55 sata_phy0: sata_phy@212100 { 55 sata_phy0: sata_phy@2100 {
56 compatible = "brcm,iproc-sr-sata-phy"; 56 compatible = "brcm,iproc-sr-sata-phy";
57 reg = <0x00212100 0x1000>; 57 reg = <0x00002100 0x1000>;
58 reg-names = "phy"; 58 reg-names = "phy";
59 #address-cells = <1>; 59 #address-cells = <1>;
60 #size-cells = <0>; 60 #size-cells = <0>;
@@ -66,11 +66,11 @@
66 }; 66 };
67 }; 67 };
68 68
69 sata1: ahci@310000 { 69 sata1: ahci@10000 {
70 compatible = "brcm,iproc-ahci", "generic-ahci"; 70 compatible = "brcm,iproc-ahci", "generic-ahci";
71 reg = <0x00310000 0x1000>; 71 reg = <0x00010000 0x1000>;
72 reg-names = "ahci"; 72 reg-names = "ahci";
73 interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
74 #address-cells = <1>; 74 #address-cells = <1>;
75 #size-cells = <0>; 75 #size-cells = <0>;
76 status = "disabled"; 76 status = "disabled";
@@ -82,9 +82,9 @@
82 }; 82 };
83 }; 83 };
84 84
85 sata_phy1: sata_phy@312100 { 85 sata_phy1: sata_phy@12100 {
86 compatible = "brcm,iproc-sr-sata-phy"; 86 compatible = "brcm,iproc-sr-sata-phy";
87 reg = <0x00312100 0x1000>; 87 reg = <0x00012100 0x1000>;
88 reg-names = "phy"; 88 reg-names = "phy";
89 #address-cells = <1>; 89 #address-cells = <1>;
90 #size-cells = <0>; 90 #size-cells = <0>;
@@ -96,11 +96,11 @@
96 }; 96 };
97 }; 97 };
98 98
99 sata2: ahci@120000 { 99 sata2: ahci@20000 {
100 compatible = "brcm,iproc-ahci", "generic-ahci"; 100 compatible = "brcm,iproc-ahci", "generic-ahci";
101 reg = <0x00120000 0x1000>; 101 reg = <0x00020000 0x1000>;
102 reg-names = "ahci"; 102 reg-names = "ahci";
103 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; 103 interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
104 #address-cells = <1>; 104 #address-cells = <1>;
105 #size-cells = <0>; 105 #size-cells = <0>;
106 status = "disabled"; 106 status = "disabled";
@@ -112,9 +112,9 @@
112 }; 112 };
113 }; 113 };
114 114
115 sata_phy2: sata_phy@122100 { 115 sata_phy2: sata_phy@22100 {
116 compatible = "brcm,iproc-sr-sata-phy"; 116 compatible = "brcm,iproc-sr-sata-phy";
117 reg = <0x00122100 0x1000>; 117 reg = <0x00022100 0x1000>;
118 reg-names = "phy"; 118 reg-names = "phy";
119 #address-cells = <1>; 119 #address-cells = <1>;
120 #size-cells = <0>; 120 #size-cells = <0>;
@@ -126,11 +126,11 @@
126 }; 126 };
127 }; 127 };
128 128
129 sata3: ahci@130000 { 129 sata3: ahci@30000 {
130 compatible = "brcm,iproc-ahci", "generic-ahci"; 130 compatible = "brcm,iproc-ahci", "generic-ahci";
131 reg = <0x00130000 0x1000>; 131 reg = <0x00030000 0x1000>;
132 reg-names = "ahci"; 132 reg-names = "ahci";
133 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; 133 interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
134 #address-cells = <1>; 134 #address-cells = <1>;
135 #size-cells = <0>; 135 #size-cells = <0>;
136 status = "disabled"; 136 status = "disabled";
@@ -142,9 +142,9 @@
142 }; 142 };
143 }; 143 };
144 144
145 sata_phy3: sata_phy@132100 { 145 sata_phy3: sata_phy@32100 {
146 compatible = "brcm,iproc-sr-sata-phy"; 146 compatible = "brcm,iproc-sr-sata-phy";
147 reg = <0x00132100 0x1000>; 147 reg = <0x00032100 0x1000>;
148 reg-names = "phy"; 148 reg-names = "phy";
149 #address-cells = <1>; 149 #address-cells = <1>;
150 #size-cells = <0>; 150 #size-cells = <0>;
@@ -156,11 +156,11 @@
156 }; 156 };
157 }; 157 };
158 158
159 sata4: ahci@330000 { 159 sata4: ahci@100000 {
160 compatible = "brcm,iproc-ahci", "generic-ahci"; 160 compatible = "brcm,iproc-ahci", "generic-ahci";
161 reg = <0x00330000 0x1000>; 161 reg = <0x00100000 0x1000>;
162 reg-names = "ahci"; 162 reg-names = "ahci";
163 interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; 163 interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <0>; 165 #size-cells = <0>;
166 status = "disabled"; 166 status = "disabled";
@@ -172,9 +172,9 @@
172 }; 172 };
173 }; 173 };
174 174
175 sata_phy4: sata_phy@332100 { 175 sata_phy4: sata_phy@102100 {
176 compatible = "brcm,iproc-sr-sata-phy"; 176 compatible = "brcm,iproc-sr-sata-phy";
177 reg = <0x00332100 0x1000>; 177 reg = <0x00102100 0x1000>;
178 reg-names = "phy"; 178 reg-names = "phy";
179 #address-cells = <1>; 179 #address-cells = <1>;
180 #size-cells = <0>; 180 #size-cells = <0>;
@@ -186,11 +186,11 @@
186 }; 186 };
187 }; 187 };
188 188
189 sata5: ahci@400000 { 189 sata5: ahci@110000 {
190 compatible = "brcm,iproc-ahci", "generic-ahci"; 190 compatible = "brcm,iproc-ahci", "generic-ahci";
191 reg = <0x00400000 0x1000>; 191 reg = <0x00110000 0x1000>;
192 reg-names = "ahci"; 192 reg-names = "ahci";
193 interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; 193 interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
196 status = "disabled"; 196 status = "disabled";
@@ -202,9 +202,9 @@
202 }; 202 };
203 }; 203 };
204 204
205 sata_phy5: sata_phy@402100 { 205 sata_phy5: sata_phy@112100 {
206 compatible = "brcm,iproc-sr-sata-phy"; 206 compatible = "brcm,iproc-sr-sata-phy";
207 reg = <0x00402100 0x1000>; 207 reg = <0x00112100 0x1000>;
208 reg-names = "phy"; 208 reg-names = "phy";
209 #address-cells = <1>; 209 #address-cells = <1>;
210 #size-cells = <0>; 210 #size-cells = <0>;
@@ -216,11 +216,11 @@
216 }; 216 };
217 }; 217 };
218 218
219 sata6: ahci@410000 { 219 sata6: ahci@120000 {
220 compatible = "brcm,iproc-ahci", "generic-ahci"; 220 compatible = "brcm,iproc-ahci", "generic-ahci";
221 reg = <0x00410000 0x1000>; 221 reg = <0x00120000 0x1000>;
222 reg-names = "ahci"; 222 reg-names = "ahci";
223 interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; 223 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
224 #address-cells = <1>; 224 #address-cells = <1>;
225 #size-cells = <0>; 225 #size-cells = <0>;
226 status = "disabled"; 226 status = "disabled";
@@ -232,9 +232,9 @@
232 }; 232 };
233 }; 233 };
234 234
235 sata_phy6: sata_phy@412100 { 235 sata_phy6: sata_phy@122100 {
236 compatible = "brcm,iproc-sr-sata-phy"; 236 compatible = "brcm,iproc-sr-sata-phy";
237 reg = <0x00412100 0x1000>; 237 reg = <0x00122100 0x1000>;
238 reg-names = "phy"; 238 reg-names = "phy";
239 #address-cells = <1>; 239 #address-cells = <1>;
240 #size-cells = <0>; 240 #size-cells = <0>;
@@ -246,11 +246,11 @@
246 }; 246 };
247 }; 247 };
248 248
249 sata7: ahci@420000 { 249 sata7: ahci@130000 {
250 compatible = "brcm,iproc-ahci", "generic-ahci"; 250 compatible = "brcm,iproc-ahci", "generic-ahci";
251 reg = <0x00420000 0x1000>; 251 reg = <0x00130000 0x1000>;
252 reg-names = "ahci"; 252 reg-names = "ahci";
253 interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; 253 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
254 #address-cells = <1>; 254 #address-cells = <1>;
255 #size-cells = <0>; 255 #size-cells = <0>;
256 status = "disabled"; 256 status = "disabled";
@@ -262,9 +262,9 @@
262 }; 262 };
263 }; 263 };
264 264
265 sata_phy7: sata_phy@422100 { 265 sata_phy7: sata_phy@132100 {
266 compatible = "brcm,iproc-sr-sata-phy"; 266 compatible = "brcm,iproc-sr-sata-phy";
267 reg = <0x00422100 0x1000>; 267 reg = <0x00132100 0x1000>;
268 reg-names = "phy"; 268 reg-names = "phy";
269 #address-cells = <1>; 269 #address-cells = <1>;
270 #size-cells = <0>; 270 #size-cells = <0>;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ab46bc70add6..469de8acd06f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -75,6 +75,9 @@ struct kvm_arch {
75 75
76 /* Interrupt controller */ 76 /* Interrupt controller */
77 struct vgic_dist vgic; 77 struct vgic_dist vgic;
78
79 /* Mandated version of PSCI */
80 u32 psci_version;
78}; 81};
79 82
80#define KVM_NR_MEM_OBJS 40 83#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index b6dbbe3123a9..97d0ef12e2ff 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -39,7 +39,7 @@ struct mod_arch_specific {
39u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, 39u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
40 Elf64_Sym *sym); 40 Elf64_Sym *sym);
41 41
42u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); 42u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
43 43
44#ifdef CONFIG_RANDOMIZE_BASE 44#ifdef CONFIG_RANDOMIZE_BASE
45extern u64 module_alloc_base; 45extern u64 module_alloc_base;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7e2c27e63cd8..7c4c8f318ba9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
230 } 230 }
231} 231}
232 232
233extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 233extern void __sync_icache_dcache(pte_t pteval);
234 234
235/* 235/*
236 * PTE bits configuration in the presence of hardware Dirty Bit Management 236 * PTE bits configuration in the presence of hardware Dirty Bit Management
@@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
253 pte_t old_pte; 253 pte_t old_pte;
254 254
255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
256 __sync_icache_dcache(pte, addr); 256 __sync_icache_dcache(pte);
257 257
258 /* 258 /*
259 * If the existing pte is valid, check for potential race with 259 * If the existing pte is valid, check for potential race with
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 9abbf3044654..04b3256f8e6d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
208 208
209/* KVM-as-firmware specific pseudo-registers */
210#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
211#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
212 KVM_REG_ARM_FW | ((r) & 0xffff))
213#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
214
209/* Device Control API: ARM VGIC */ 215/* Device Control API: ARM VGIC */
210#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 216#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
211#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 217#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 536d572e5596..9d1b06d67c53 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
868 static const struct midr_range kpti_safe_list[] = { 868 static const struct midr_range kpti_safe_list[] = {
869 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 869 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
870 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 870 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
871 { /* sentinel */ }
871 }; 872 };
872 char const *str = "command line option"; 873 char const *str = "command line option";
873 874
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index fa3637284a3d..f0690c2ca3e0 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
43} 43}
44 44
45#ifdef CONFIG_ARM64_ERRATUM_843419 45#ifdef CONFIG_ARM64_ERRATUM_843419
46u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) 46u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
47{ 47{
48 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 48 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
49 &mod->arch.init; 49 &mod->arch.init;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 719fde8dcc19..155fd91e78f4 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
215 insn &= ~BIT(31); 215 insn &= ~BIT(31);
216 } else { 216 } else {
217 /* out of range for ADR -> emit a veneer */ 217 /* out of range for ADR -> emit a veneer */
218 val = module_emit_adrp_veneer(mod, place, val & ~0xfff); 218 val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
219 if (!val) 219 if (!val)
220 return -ENOEXEC; 220 return -ENOEXEC;
221 insn = aarch64_insn_gen_branch_imm((u64)place, val, 221 insn = aarch64_insn_gen_branch_imm((u64)place, val,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 71d99af24ef2..7ff81fed46e1 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -25,6 +25,7 @@
25#include <linux/sched/signal.h> 25#include <linux/sched/signal.h>
26#include <linux/sched/task_stack.h> 26#include <linux/sched/task_stack.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/nospec.h>
28#include <linux/smp.h> 29#include <linux/smp.h>
29#include <linux/ptrace.h> 30#include <linux/ptrace.h>
30#include <linux/user.h> 31#include <linux/user.h>
@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
249 250
250 switch (note_type) { 251 switch (note_type) {
251 case NT_ARM_HW_BREAK: 252 case NT_ARM_HW_BREAK:
252 if (idx < ARM_MAX_BRP) 253 if (idx >= ARM_MAX_BRP)
253 bp = tsk->thread.debug.hbp_break[idx]; 254 goto out;
255 idx = array_index_nospec(idx, ARM_MAX_BRP);
256 bp = tsk->thread.debug.hbp_break[idx];
254 break; 257 break;
255 case NT_ARM_HW_WATCH: 258 case NT_ARM_HW_WATCH:
256 if (idx < ARM_MAX_WRP) 259 if (idx >= ARM_MAX_WRP)
257 bp = tsk->thread.debug.hbp_watch[idx]; 260 goto out;
261 idx = array_index_nospec(idx, ARM_MAX_WRP);
262 bp = tsk->thread.debug.hbp_watch[idx];
258 break; 263 break;
259 } 264 }
260 265
266out:
261 return bp; 267 return bp;
262} 268}
263 269
@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1458{ 1464{
1459 int ret; 1465 int ret;
1460 u32 kdata; 1466 u32 kdata;
1461 mm_segment_t old_fs = get_fs();
1462 1467
1463 set_fs(KERNEL_DS);
1464 /* Watchpoint */ 1468 /* Watchpoint */
1465 if (num < 0) { 1469 if (num < 0) {
1466 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1470 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1471 } else { 1475 } else {
1472 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1476 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1473 } 1477 }
1474 set_fs(old_fs);
1475 1478
1476 if (!ret) 1479 if (!ret)
1477 ret = put_user(kdata, data); 1480 ret = put_user(kdata, data);
@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1484{ 1487{
1485 int ret; 1488 int ret;
1486 u32 kdata = 0; 1489 u32 kdata = 0;
1487 mm_segment_t old_fs = get_fs();
1488 1490
1489 if (num == 0) 1491 if (num == 0)
1490 return 0; 1492 return 0;
@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1493 if (ret) 1495 if (ret)
1494 return ret; 1496 return ret;
1495 1497
1496 set_fs(KERNEL_DS);
1497 if (num < 0) 1498 if (num < 0)
1498 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1499 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1499 else 1500 else
1500 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1501 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1501 set_fs(old_fs);
1502 1502
1503 return ret; 1503 return ret;
1504} 1504}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ba964da31a25..8bbdc17e49df 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
277 * If we were single stepping, we want to get the step exception after 277 * If we were single stepping, we want to get the step exception after
278 * we return from the trap. 278 * we return from the trap.
279 */ 279 */
280 user_fastforward_single_step(current); 280 if (user_mode(regs))
281 user_fastforward_single_step(current);
281} 282}
282 283
283static LIST_HEAD(undef_hook); 284static LIST_HEAD(undef_hook);
@@ -366,7 +367,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
366 } 367 }
367 368
368 /* Force signals we don't understand to SIGKILL */ 369 /* Force signals we don't understand to SIGKILL */
369 if (WARN_ON(signal != SIGKILL || 370 if (WARN_ON(signal != SIGKILL &&
370 siginfo_layout(signal, code) != SIL_FAULT)) { 371 siginfo_layout(signal, code) != SIL_FAULT)) {
371 signal = SIGKILL; 372 signal = SIGKILL;
372 } 373 }
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 959e50d2588c..56a0260ceb11 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <kvm/arm_psci.h>
28#include <asm/cputype.h> 29#include <asm/cputype.h>
29#include <linux/uaccess.h> 30#include <linux/uaccess.h>
30#include <asm/kvm.h> 31#include <asm/kvm.h>
@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
205unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 206unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
206{ 207{
207 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 208 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
208 + NUM_TIMER_REGS; 209 + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
209} 210}
210 211
211/** 212/**
@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
225 uindices++; 226 uindices++;
226 } 227 }
227 228
229 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
230 if (ret)
231 return ret;
232 uindices += kvm_arm_get_fw_num_regs(vcpu);
233
228 ret = copy_timer_indices(vcpu, uindices); 234 ret = copy_timer_indices(vcpu, uindices);
229 if (ret) 235 if (ret)
230 return ret; 236 return ret;
@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 249 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
244 return get_core_reg(vcpu, reg); 250 return get_core_reg(vcpu, reg);
245 251
252 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
253 return kvm_arm_get_fw_reg(vcpu, reg);
254
246 if (is_timer_reg(reg->id)) 255 if (is_timer_reg(reg->id))
247 return get_timer_reg(vcpu, reg); 256 return get_timer_reg(vcpu, reg);
248 257
@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
259 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 268 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
260 return set_core_reg(vcpu, reg); 269 return set_core_reg(vcpu, reg);
261 270
271 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
272 return kvm_arm_set_fw_reg(vcpu, reg);
273
262 if (is_timer_reg(reg->id)) 274 if (is_timer_reg(reg->id))
263 return set_timer_reg(vcpu, reg); 275 return set_timer_reg(vcpu, reg);
264 276
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 806b0b126a64..6e3b969391fd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
996 996
997 if (id == SYS_ID_AA64PFR0_EL1) { 997 if (id == SYS_ID_AA64PFR0_EL1) {
998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
999 pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", 999 kvm_debug("SVE unsupported for guests, suppressing\n");
1000 task_pid_nr(current));
1001 1000
1002 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1001 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1003 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1002 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1004 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1003 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1005 pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", 1004 kvm_debug("LORegions unsupported for guests, suppressing\n");
1006 task_pid_nr(current));
1007 1005
1008 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1006 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1009 } 1007 }
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 0ead8a1d1679..137710f4dac3 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ 19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
20 -fcall-saved-x18 -fomit-frame-pointer 20 -fcall-saved-x18 -fomit-frame-pointer
21CFLAGS_REMOVE_atomic_ll_sc.o := -pg 21CFLAGS_REMOVE_atomic_ll_sc.o := -pg
22GCOV_PROFILE_atomic_ll_sc.o := n
23KASAN_SANITIZE_atomic_ll_sc.o := n
24KCOV_INSTRUMENT_atomic_ll_sc.o := n
25UBSAN_SANITIZE_atomic_ll_sc.o := n
22 26
23lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o 27lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index e36ed5087b5c..1059884f9a6f 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
58 flush_ptrace_access(vma, page, uaddr, dst, len); 58 flush_ptrace_access(vma, page, uaddr, dst, len);
59} 59}
60 60
61void __sync_icache_dcache(pte_t pte, unsigned long addr) 61void __sync_icache_dcache(pte_t pte)
62{ 62{
63 struct page *page = pte_page(pte); 63 struct page *page = pte_page(pte);
64 64
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index dabfc1ecda3d..12145874c02b 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -204,7 +204,7 @@ void __init kasan_init(void)
204 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 204 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
205 205
206 kasan_map_populate(kimg_shadow_start, kimg_shadow_end, 206 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
207 pfn_to_nid(virt_to_pfn(lm_alias(_text)))); 207 early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
208 208
209 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, 209 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
210 (void *)mod_shadow_start); 210 (void *)mod_shadow_start);
@@ -224,7 +224,7 @@ void __init kasan_init(void)
224 224
225 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), 225 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
226 (unsigned long)kasan_mem_to_shadow(end), 226 (unsigned long)kasan_mem_to_shadow(end),
227 pfn_to_nid(virt_to_pfn(start))); 227 early_pfn_to_nid(virt_to_pfn(start)));
228 } 228 }
229 229
230 /* 230 /*
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 9e8621d94ee9..e17262ad125e 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
216 memcpy((void *) dst, src, count); 216 memcpy((void *) dst, src, count);
217} 217}
218 218
219static inline void memset_io(volatile void __iomem *addr, int value,
220 size_t size)
221{
222 memset((void __force *)addr, value, size);
223}
224
219#define PCI_IO_ADDR (volatile void __iomem *) 225#define PCI_IO_ADDR (volatile void __iomem *)
220 226
221/* 227/*
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
index 617506d1a559..7cd0a2259269 100644
--- a/arch/hexagon/lib/checksum.c
+++ b/arch/hexagon/lib/checksum.c
@@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
199 memcpy(dst, src, len); 199 memcpy(dst, src, len);
200 return csum_partial(dst, len, sum); 200 return csum_partial(dst, len, sum);
201} 201}
202EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
index 1bd105428f61..65af3f6ba81c 100644
--- a/arch/mips/boot/dts/img/boston.dts
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -51,6 +51,8 @@
51 ranges = <0x02000000 0 0x40000000 51 ranges = <0x02000000 0 0x40000000
52 0x40000000 0 0x40000000>; 52 0x40000000 0 0x40000000>;
53 53
54 bus-range = <0x00 0xff>;
55
54 interrupt-map-mask = <0 0 0 7>; 56 interrupt-map-mask = <0 0 0 7>;
55 interrupt-map = <0 0 0 1 &pci0_intc 1>, 57 interrupt-map = <0 0 0 1 &pci0_intc 1>,
56 <0 0 0 2 &pci0_intc 2>, 58 <0 0 0 2 &pci0_intc 2>,
@@ -79,6 +81,8 @@
79 ranges = <0x02000000 0 0x20000000 81 ranges = <0x02000000 0 0x20000000
80 0x20000000 0 0x20000000>; 82 0x20000000 0 0x20000000>;
81 83
84 bus-range = <0x00 0xff>;
85
82 interrupt-map-mask = <0 0 0 7>; 86 interrupt-map-mask = <0 0 0 7>;
83 interrupt-map = <0 0 0 1 &pci1_intc 1>, 87 interrupt-map = <0 0 0 1 &pci1_intc 1>,
84 <0 0 0 2 &pci1_intc 2>, 88 <0 0 0 2 &pci1_intc 2>,
@@ -107,6 +111,8 @@
107 ranges = <0x02000000 0 0x16000000 111 ranges = <0x02000000 0 0x16000000
108 0x16000000 0 0x100000>; 112 0x16000000 0 0x100000>;
109 113
114 bus-range = <0x00 0xff>;
115
110 interrupt-map-mask = <0 0 0 7>; 116 interrupt-map-mask = <0 0 0 7>;
111 interrupt-map = <0 0 0 1 &pci2_intc 1>, 117 interrupt-map = <0 0 0 1 &pci2_intc 1>,
112 <0 0 0 2 &pci2_intc 2>, 118 <0 0 0 2 &pci2_intc 2>,
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 0cbf3af37eca..a7d0b836f2f7 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr)
307#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) 307#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
308#define war_io_reorder_wmb() wmb() 308#define war_io_reorder_wmb() wmb()
309#else 309#else
310#define war_io_reorder_wmb() do { } while (0) 310#define war_io_reorder_wmb() barrier()
311#endif 311#endif
312 312
313#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 313#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
@@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
377 BUG(); \ 377 BUG(); \
378 } \ 378 } \
379 \ 379 \
380 /* prevent prefetching of coherent DMA data prematurely */ \
381 rmb(); \
380 return pfx##ioswab##bwlq(__mem, __val); \ 382 return pfx##ioswab##bwlq(__mem, __val); \
381} 383}
382 384
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index b71306947290..06629011a434 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
654{ 654{
655 __kernel_size_t res; 655 __kernel_size_t res;
656 656
657#ifdef CONFIG_CPU_MICROMIPS
658/* micromips memset / bzero also clobbers t7 & t8 */
659#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
660#else
661#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
662#endif /* CONFIG_CPU_MICROMIPS */
663
657 if (eva_kernel_access()) { 664 if (eva_kernel_access()) {
658 __asm__ __volatile__( 665 __asm__ __volatile__(
659 "move\t$4, %1\n\t" 666 "move\t$4, %1\n\t"
@@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
663 "move\t%0, $6" 670 "move\t%0, $6"
664 : "=r" (res) 671 : "=r" (res)
665 : "r" (addr), "r" (size) 672 : "r" (addr), "r" (size)
666 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 673 : bzero_clobbers);
667 } else { 674 } else {
668 might_fault(); 675 might_fault();
669 __asm__ __volatile__( 676 __asm__ __volatile__(
@@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
674 "move\t%0, $6" 681 "move\t%0, $6"
675 : "=r" (res) 682 : "=r" (res)
676 : "r" (addr), "r" (size) 683 : "r" (addr), "r" (size)
677 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 684 : bzero_clobbers);
678 } 685 }
679 686
680 return res; 687 return res;
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index a1456664d6c2..f7327979a8f8 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -219,7 +219,7 @@
2191: PTR_ADDIU a0, 1 /* fill bytewise */ 2191: PTR_ADDIU a0, 1 /* fill bytewise */
220 R10KCBARRIER(0(ra)) 220 R10KCBARRIER(0(ra))
221 bne t1, a0, 1b 221 bne t1, a0, 1b
222 sb a1, -1(a0) 222 EX(sb, a1, -1(a0), .Lsmall_fixup\@)
223 223
2242: jr ra /* done */ 2242: jr ra /* done */
225 move a2, zero 225 move a2, zero
@@ -252,13 +252,18 @@
252 PTR_L t0, TI_TASK($28) 252 PTR_L t0, TI_TASK($28)
253 andi a2, STORMASK 253 andi a2, STORMASK
254 LONG_L t0, THREAD_BUADDR(t0) 254 LONG_L t0, THREAD_BUADDR(t0)
255 LONG_ADDU a2, t1 255 LONG_ADDU a2, a0
256 jr ra 256 jr ra
257 LONG_SUBU a2, t0 257 LONG_SUBU a2, t0
258 258
259.Llast_fixup\@: 259.Llast_fixup\@:
260 jr ra 260 jr ra
261 andi v1, a2, STORMASK 261 nop
262
263.Lsmall_fixup\@:
264 PTR_SUBU a2, t1, a0
265 jr ra
266 PTR_ADDIU a2, 1
262 267
263 .endm 268 .endm
264 269
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index eafd06ab59ef..e5de34d00b1a 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o
23obj-$(CONFIG_PA11) += pci-dma.o 23obj-$(CONFIG_PA11) += pci-dma.o
24obj-$(CONFIG_PCI) += pci.o 24obj-$(CONFIG_PCI) += pci.o
25obj-$(CONFIG_MODULES) += module.o 25obj-$(CONFIG_MODULES) += module.o
26obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o 26obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o
27obj-$(CONFIG_STACKTRACE)+= stacktrace.o 27obj-$(CONFIG_STACKTRACE)+= stacktrace.o
28obj-$(CONFIG_AUDIT) += audit.o 28obj-$(CONFIG_AUDIT) += audit.o
29obj64-$(CONFIG_AUDIT) += compat_audit.o 29obj64-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
index d1c2d2e658cf..2f3ff7a27881 100644
--- a/arch/powerpc/include/asm/powernv.h
+++ b/arch/powerpc/include/asm/powernv.h
@@ -15,7 +15,7 @@
15extern void powernv_set_nmmu_ptcr(unsigned long ptcr); 15extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
16extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 16extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
17 unsigned long flags, 17 unsigned long flags,
18 struct npu_context *(*cb)(struct npu_context *, void *), 18 void (*cb)(struct npu_context *, void *),
19 void *priv); 19 void *priv);
20extern void pnv_npu2_destroy_context(struct npu_context *context, 20extern void pnv_npu2_destroy_context(struct npu_context *context,
21 struct pci_dev *gpdev); 21 struct pci_dev *gpdev);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 2d4956e97aa9..ee5a67d57aab 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
807 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 807 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
808 808
809 /* PCI Command: 0x4 */ 809 /* PCI Command: 0x4 */
810 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); 810 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
811 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
811 812
812 /* Check the PCIe link is ready */ 813 /* Check the PCIe link is ready */
813 eeh_bridge_check_link(edev); 814 eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 79d005445c6c..e734f6e45abc 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
553#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 553#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
554 lbz r0,HSTATE_HWTHREAD_STATE(r13) 554 lbz r0,HSTATE_HWTHREAD_STATE(r13)
555 cmpwi r0,KVM_HWTHREAD_IN_KERNEL 555 cmpwi r0,KVM_HWTHREAD_IN_KERNEL
556 beq 1f 556 beq 0f
557 li r0,KVM_HWTHREAD_IN_KERNEL 557 li r0,KVM_HWTHREAD_IN_KERNEL
558 stb r0,HSTATE_HWTHREAD_STATE(r13) 558 stb r0,HSTATE_HWTHREAD_STATE(r13)
559 /* Order setting hwthread_state vs. testing hwthread_req */ 559 /* Order setting hwthread_state vs. testing hwthread_req */
560 sync 560 sync
561 lbz r0,HSTATE_HWTHREAD_REQ(r13) 5610: lbz r0,HSTATE_HWTHREAD_REQ(r13)
562 cmpwi r0,0 562 cmpwi r0,0
563 beq 1f 563 beq 1f
564 b kvm_start_guest 564 b kvm_start_guest
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index fe6fc63251fe..38c5b4764bfe 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
441 if (pfn != ULONG_MAX) { 441 if (pfn != ULONG_MAX) {
442 *phys_addr = 442 *phys_addr =
443 (pfn << PAGE_SHIFT); 443 (pfn << PAGE_SHIFT);
444 handled = 1;
445 } 444 }
446 } 445 }
447 } 446 }
@@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
532 * kernel/exception-64s.h 531 * kernel/exception-64s.h
533 */ 532 */
534 if (get_paca()->in_mce < MAX_MCE_DEPTH) 533 if (get_paca()->in_mce < MAX_MCE_DEPTH)
535 if (!mce_find_instr_ea_and_pfn(regs, addr, 534 mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
536 phys_addr))
537 handled = 1;
538 } 535 }
539 found = 1; 536 found = 1;
540 } 537 }
@@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
572 const struct mce_ierror_table itable[]) 569 const struct mce_ierror_table itable[])
573{ 570{
574 struct mce_error_info mce_err = { 0 }; 571 struct mce_error_info mce_err = { 0 };
575 uint64_t addr, phys_addr; 572 uint64_t addr, phys_addr = ULONG_MAX;
576 uint64_t srr1 = regs->msr; 573 uint64_t srr1 = regs->msr;
577 long handled; 574 long handled;
578 575
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 44c30dd38067..b78f142a4148 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void)
890 return; 890 return;
891 891
892 l1d_size = ppc64_caches.l1d.size; 892 l1d_size = ppc64_caches.l1d.size;
893
894 /*
895 * If there is no d-cache-size property in the device tree, l1d_size
896 * could be zero. That leads to the loop in the asm wrapping around to
897 * 2^64-1, and then walking off the end of the fallback area and
898 * eventually causing a page fault which is fatal. Just default to
899 * something vaguely sane.
900 */
901 if (!l1d_size)
902 l1d_size = (64 * 1024);
903
893 limit = min(ppc64_bolted_size(), ppc64_rma_size); 904 limit = min(ppc64_bolted_size(), ppc64_rma_size);
894 905
895 /* 906 /*
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e16ec7b3b427..9ca7148b5881 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
566#endif 566#endif
567 567
568#ifdef CONFIG_NMI_IPI 568#ifdef CONFIG_NMI_IPI
569static void stop_this_cpu(struct pt_regs *regs) 569static void nmi_stop_this_cpu(struct pt_regs *regs)
570#else 570{
571 /*
572 * This is a special case because it never returns, so the NMI IPI
573 * handling would never mark it as done, which makes any later
574 * smp_send_nmi_ipi() call spin forever. Mark it done now.
575 *
576 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
577 */
578 nmi_ipi_lock();
579 nmi_ipi_busy_count--;
580 nmi_ipi_unlock();
581
582 /* Remove this CPU */
583 set_cpu_online(smp_processor_id(), false);
584
585 spin_begin();
586 while (1)
587 spin_cpu_relax();
588}
589
590void smp_send_stop(void)
591{
592 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
593}
594
595#else /* CONFIG_NMI_IPI */
596
571static void stop_this_cpu(void *dummy) 597static void stop_this_cpu(void *dummy)
572#endif
573{ 598{
574 /* Remove this CPU */ 599 /* Remove this CPU */
575 set_cpu_online(smp_processor_id(), false); 600 set_cpu_online(smp_processor_id(), false);
@@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy)
582 607
583void smp_send_stop(void) 608void smp_send_stop(void)
584{ 609{
585#ifdef CONFIG_NMI_IPI 610 static bool stopped = false;
586 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); 611
587#else 612 /*
613 * Prevent waiting on csd lock from a previous smp_send_stop.
614 * This is racy, but in general callers try to do the right
615 * thing and only fire off one smp_send_stop (e.g., see
616 * kernel/panic.c)
617 */
618 if (stopped)
619 return;
620
621 stopped = true;
622
588 smp_call_function(stop_this_cpu, NULL, 0); 623 smp_call_function(stop_this_cpu, NULL, 0);
589#endif
590} 624}
625#endif /* CONFIG_NMI_IPI */
591 626
592struct thread_info *current_set[NR_CPUS]; 627struct thread_info *current_set[NR_CPUS];
593 628
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6038e2e7aee0..876d4f294fdd 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
306} 306}
307 307
308#ifdef CONFIG_ALTIVEC
309void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
310{
311 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
312}
313#endif
314
308void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 315void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
309{ 316{
310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); 317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 35f80ab7cbd8..288fe4f0db4e 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
55 unsigned int *target = (unsigned int *)branch_target(src); 55 unsigned int *target = (unsigned int *)branch_target(src);
56 56
57 /* Branch within the section doesn't need translating */ 57 /* Branch within the section doesn't need translating */
58 if (target < alt_start || target >= alt_end) { 58 if (target < alt_start || target > alt_end) {
59 instr = translate_branch(dest, src); 59 instr = translate_branch(dest, src);
60 if (!instr) 60 if (!instr)
61 return 1; 61 return 1;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 737f8a4632cc..c3c39b02b2ba 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
133 start, start + size, rc); 133 start, start + size, rc);
134 return -EFAULT; 134 return -EFAULT;
135 } 135 }
136 flush_inval_dcache_range(start, start + size);
136 137
137 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 138 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
138} 139}
@@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
159 160
160 /* Remove htab bolted mappings for this section of memory */ 161 /* Remove htab bolted mappings for this section of memory */
161 start = (unsigned long)__va(start); 162 start = (unsigned long)__va(start);
163 flush_inval_dcache_range(start, start + size);
162 ret = remove_section_mapping(start, start + size); 164 ret = remove_section_mapping(start, start + size);
163 165
164 /* Ensure all vmalloc mappings are flushed in case they also 166 /* Ensure all vmalloc mappings are flushed in case they also
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9033c8194eda..ccc421503363 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
1093 LOAD_INT(c), LOAD_FRAC(c), 1093 LOAD_INT(c), LOAD_FRAC(c),
1094 count_active_contexts(), 1094 count_active_contexts(),
1095 atomic_read(&nr_spu_contexts), 1095 atomic_read(&nr_spu_contexts),
1096 idr_get_cursor(&task_active_pid_ns(current)->idr)); 1096 idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
1097 return 0; 1097 return 0;
1098} 1098}
1099 1099
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index de470caf0784..fc222a0c2ac4 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
82 .open = simple_open, 82 .open = simple_open,
83}; 83};
84 84
85static void flush_memory_region(u64 base, u64 size)
86{
87 unsigned long line_size = ppc64_caches.l1d.size;
88 u64 end = base + size;
89 u64 addr;
90
91 base = round_down(base, line_size);
92 end = round_up(end, line_size);
93
94 for (addr = base; addr < end; addr += line_size)
95 asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
96}
97
98static int check_memblock_online(struct memory_block *mem, void *arg) 85static int check_memblock_online(struct memory_block *mem, void *arg)
99{ 86{
100 if (mem->state != MEM_ONLINE) 87 if (mem->state != MEM_ONLINE)
@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
132 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, 119 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
133 change_memblock_state); 120 change_memblock_state);
134 121
135 /* RCU grace period? */
136 flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
137 nr_pages << PAGE_SHIFT);
138
139 lock_device_hotplug(); 122 lock_device_hotplug();
140 remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); 123 remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
141 unlock_device_hotplug(); 124 unlock_device_hotplug();
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 69a4f9e8bd55..525e966dce34 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -34,6 +34,19 @@
34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu) 34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
35 35
36/* 36/*
37 * spinlock to protect initialisation of an npu_context for a particular
38 * mm_struct.
39 */
40static DEFINE_SPINLOCK(npu_context_lock);
41
42/*
43 * When an address shootdown range exceeds this threshold we invalidate the
44 * entire TLB on the GPU for the given PID rather than each specific address in
45 * the range.
46 */
47#define ATSD_THRESHOLD (2*1024*1024)
48
49/*
37 * Other types of TCE cache invalidation are not functional in the 50 * Other types of TCE cache invalidation are not functional in the
38 * hardware. 51 * hardware.
39 */ 52 */
@@ -401,7 +414,7 @@ struct npu_context {
401 bool nmmu_flush; 414 bool nmmu_flush;
402 415
403 /* Callback to stop translation requests on a given GPU */ 416 /* Callback to stop translation requests on a given GPU */
404 struct npu_context *(*release_cb)(struct npu_context *, void *); 417 void (*release_cb)(struct npu_context *context, void *priv);
405 418
406 /* 419 /*
407 * Private pointer passed to the above callback for usage by 420 * Private pointer passed to the above callback for usage by
@@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
671 struct npu_context *npu_context = mn_to_npu_context(mn); 684 struct npu_context *npu_context = mn_to_npu_context(mn);
672 unsigned long address; 685 unsigned long address;
673 686
674 for (address = start; address < end; address += PAGE_SIZE) 687 if (end - start > ATSD_THRESHOLD) {
675 mmio_invalidate(npu_context, 1, address, false); 688 /*
689 * Just invalidate the entire PID if the address range is too
690 * large.
691 */
692 mmio_invalidate(npu_context, 0, 0, true);
693 } else {
694 for (address = start; address < end; address += PAGE_SIZE)
695 mmio_invalidate(npu_context, 1, address, false);
676 696
677 /* Do the flush only on the final addess == end */ 697 /* Do the flush only on the final addess == end */
678 mmio_invalidate(npu_context, 1, address, true); 698 mmio_invalidate(npu_context, 1, address, true);
699 }
679} 700}
680 701
681static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 702static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
696 * Returns an error if there no contexts are currently available or a 717 * Returns an error if there no contexts are currently available or a
697 * npu_context which should be passed to pnv_npu2_handle_fault(). 718 * npu_context which should be passed to pnv_npu2_handle_fault().
698 * 719 *
699 * mmap_sem must be held in write mode. 720 * mmap_sem must be held in write mode and must not be called from interrupt
721 * context.
700 */ 722 */
701struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 723struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
702 unsigned long flags, 724 unsigned long flags,
703 struct npu_context *(*cb)(struct npu_context *, void *), 725 void (*cb)(struct npu_context *, void *),
704 void *priv) 726 void *priv)
705{ 727{
706 int rc; 728 int rc;
@@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
743 /* 765 /*
744 * Setup the NPU context table for a particular GPU. These need to be 766 * Setup the NPU context table for a particular GPU. These need to be
745 * per-GPU as we need the tables to filter ATSDs when there are no 767 * per-GPU as we need the tables to filter ATSDs when there are no
746 * active contexts on a particular GPU. 768 * active contexts on a particular GPU. It is safe for these to be
769 * called concurrently with destroy as the OPAL call takes appropriate
770 * locks and refcounts on init/destroy.
747 */ 771 */
748 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, 772 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
749 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 773 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
@@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
754 * We store the npu pci device so we can more easily get at the 778 * We store the npu pci device so we can more easily get at the
755 * associated npus. 779 * associated npus.
756 */ 780 */
781 spin_lock(&npu_context_lock);
757 npu_context = mm->context.npu_context; 782 npu_context = mm->context.npu_context;
783 if (npu_context) {
784 if (npu_context->release_cb != cb ||
785 npu_context->priv != priv) {
786 spin_unlock(&npu_context_lock);
787 opal_npu_destroy_context(nphb->opal_id, mm->context.id,
788 PCI_DEVID(gpdev->bus->number,
789 gpdev->devfn));
790 return ERR_PTR(-EINVAL);
791 }
792
793 WARN_ON(!kref_get_unless_zero(&npu_context->kref));
794 }
795 spin_unlock(&npu_context_lock);
796
758 if (!npu_context) { 797 if (!npu_context) {
798 /*
799 * We can set up these fields without holding the
800 * npu_context_lock as the npu_context hasn't been returned to
801 * the caller meaning it can't be destroyed. Parallel allocation
802 * is protected against by mmap_sem.
803 */
759 rc = -ENOMEM; 804 rc = -ENOMEM;
760 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); 805 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
761 if (npu_context) { 806 if (npu_context) {
@@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
774 } 819 }
775 820
776 mm->context.npu_context = npu_context; 821 mm->context.npu_context = npu_context;
777 } else {
778 WARN_ON(!kref_get_unless_zero(&npu_context->kref));
779 } 822 }
780 823
781 npu_context->release_cb = cb; 824 npu_context->release_cb = cb;
@@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref)
814 mm_context_remove_copro(npu_context->mm); 857 mm_context_remove_copro(npu_context->mm);
815 858
816 npu_context->mm->context.npu_context = NULL; 859 npu_context->mm->context.npu_context = NULL;
817 mmu_notifier_unregister(&npu_context->mn,
818 npu_context->mm);
819
820 kfree(npu_context);
821} 860}
822 861
862/*
863 * Destroy a context on the given GPU. May free the npu_context if it is no
864 * longer active on any GPUs. Must not be called from interrupt context.
865 */
823void pnv_npu2_destroy_context(struct npu_context *npu_context, 866void pnv_npu2_destroy_context(struct npu_context *npu_context,
824 struct pci_dev *gpdev) 867 struct pci_dev *gpdev)
825{ 868{
869 int removed;
826 struct pnv_phb *nphb; 870 struct pnv_phb *nphb;
827 struct npu *npu; 871 struct npu *npu;
828 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); 872 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
@@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
844 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); 888 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
845 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, 889 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
846 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 890 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
847 kref_put(&npu_context->kref, pnv_npu2_release_context); 891 spin_lock(&npu_context_lock);
892 removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
893 spin_unlock(&npu_context_lock);
894
895 /*
896 * We need to do this outside of pnv_npu2_release_context so that it is
897 * outside the spinlock as mmu_notifier_destroy uses SRCU.
898 */
899 if (removed) {
900 mmu_notifier_unregister(&npu_context->mn,
901 npu_context->mm);
902
903 kfree(npu_context);
904 }
905
848} 906}
849EXPORT_SYMBOL(pnv_npu2_destroy_context); 907EXPORT_SYMBOL(pnv_npu2_destroy_context);
850 908
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f8868864f373..aa2a5139462e 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
48 48
49 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 49 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
50 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 50 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
51 if (rc == OPAL_BUSY_EVENT) 51 if (rc == OPAL_BUSY_EVENT) {
52 mdelay(OPAL_BUSY_DELAY_MS);
52 opal_poll_events(NULL); 53 opal_poll_events(NULL);
53 else if (rc == OPAL_BUSY) 54 } else if (rc == OPAL_BUSY) {
54 mdelay(10); 55 mdelay(OPAL_BUSY_DELAY_MS);
56 }
55 } 57 }
56 if (rc != OPAL_SUCCESS) 58 if (rc != OPAL_SUCCESS)
57 return 0; 59 return 0;
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index d22aeb0b69e1..b48454be5b98 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
389 if (xive_pool_vps == XIVE_INVALID_VP) 389 if (xive_pool_vps == XIVE_INVALID_VP)
390 return; 390 return;
391 391
392 /* Check if pool VP already active, if it is, pull it */
393 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
394 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
395
392 /* Enable the pool VP */ 396 /* Enable the pool VP */
393 vp = xive_pool_vps + cpu; 397 vp = xive_pool_vps + cpu;
394 pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); 398 pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 23d8acca5c90..cd4fd85fde84 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -11,6 +11,7 @@ config RISCV
11 select ARCH_WANT_FRAME_POINTERS 11 select ARCH_WANT_FRAME_POINTERS
12 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
13 select COMMON_CLK 13 select COMMON_CLK
14 select DMA_DIRECT_OPS
14 select GENERIC_CLOCKEVENTS 15 select GENERIC_CLOCKEVENTS
15 select GENERIC_CPU_DEVICES 16 select GENERIC_CPU_DEVICES
16 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
@@ -89,9 +90,6 @@ config PGTABLE_LEVELS
89config HAVE_KPROBES 90config HAVE_KPROBES
90 def_bool n 91 def_bool n
91 92
92config DMA_DIRECT_OPS
93 def_bool y
94
95menu "Platform type" 93menu "Platform type"
96 94
97choice 95choice
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1e5fd280fb4d..4286a5f83876 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += fcntl.h
15generic-y += futex.h 15generic-y += futex.h
16generic-y += hardirq.h 16generic-y += hardirq.h
17generic-y += hash.h 17generic-y += hash.h
18generic-y += handle_irq.h
19generic-y += hw_irq.h 18generic-y += hw_irq.h
20generic-y += ioctl.h 19generic-y += ioctl.h
21generic-y += ioctls.h 20generic-y += ioctls.h
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 324568d33921..f6561b783b61 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. 52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
53# Make sure only to export the intended __vdso_xxx symbol offsets. 53# Make sure only to export the intended __vdso_xxx symbol offsets.
54quiet_cmd_vdsold = VDSOLD $@ 54quiet_cmd_vdsold = VDSOLD $@
55 cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ 55 cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ 56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
57 $(CROSS_COMPILE)objcopy \ 57 $(CROSS_COMPILE)objcopy \
58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ 58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 9fdff3fe1a42..e63940bb57cd 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/
8obj-y += net/ 8obj-y += net/
9obj-$(CONFIG_PCI) += pci/ 9obj-$(CONFIG_PCI) += pci/
10obj-$(CONFIG_NUMA) += numa/ 10obj-$(CONFIG_NUMA) += numa/
11obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 32a0d5b958bf..199ac3e4da1d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -47,10 +47,6 @@ config PGSTE
47config ARCH_SUPPORTS_DEBUG_PAGEALLOC 47config ARCH_SUPPORTS_DEBUG_PAGEALLOC
48 def_bool y 48 def_bool y
49 49
50config KEXEC
51 def_bool y
52 select KEXEC_CORE
53
54config AUDIT_ARCH 50config AUDIT_ARCH
55 def_bool y 51 def_bool y
56 52
@@ -290,12 +286,12 @@ config MARCH_Z13
290 older machines. 286 older machines.
291 287
292config MARCH_Z14 288config MARCH_Z14
293 bool "IBM z14" 289 bool "IBM z14 ZR1 and z14"
294 select HAVE_MARCH_Z14_FEATURES 290 select HAVE_MARCH_Z14_FEATURES
295 help 291 help
296 Select this to enable optimizations for IBM z14 (3906 series). 292 Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
297 The kernel will be slightly faster but will not work on older 293 and 3906 series). The kernel will be slightly faster but will not
298 machines. 294 work on older machines.
299 295
300endchoice 296endchoice
301 297
@@ -525,6 +521,26 @@ source kernel/Kconfig.preempt
525 521
526source kernel/Kconfig.hz 522source kernel/Kconfig.hz
527 523
524config KEXEC
525 def_bool y
526 select KEXEC_CORE
527
528config KEXEC_FILE
529 bool "kexec file based system call"
530 select KEXEC_CORE
531 select BUILD_BIN2C
532 depends on CRYPTO
533 depends on CRYPTO_SHA256
534 depends on CRYPTO_SHA256_S390
535 help
536 Enable the kexec file based system call. In contrast to the normal
537 kexec system call this system call takes file descriptors for the
538 kernel and initramfs as arguments.
539
540config ARCH_HAS_KEXEC_PURGATORY
541 def_bool y
542 depends on KEXEC_FILE
543
528config ARCH_RANDOM 544config ARCH_RANDOM
529 def_bool y 545 def_bool y
530 prompt "s390 architectural random number generation API" 546 prompt "s390 architectural random number generation API"
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index da9dad35c28e..d1fa37fcce83 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -3,12 +3,6 @@
3# Makefile for the linux s390-specific parts of the memory manager. 3# Makefile for the linux s390-specific parts of the memory manager.
4# 4#
5 5
6COMPILE_VERSION := __linux_compile_version_id__`hostname | \
7 tr -c '[0-9A-Za-z]' '_'`__`date | \
8 tr -c '[0-9A-Za-z]' '_'`_t
9
10ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
11
12targets := image 6targets := image
13targets += bzImage 7targets += bzImage
14subdir- := compressed 8subdir- := compressed
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index ae06b9b4c02f..2088cc140629 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
1sizes.h 1sizes.h
2vmlinux 2vmlinux
3vmlinux.lds 3vmlinux.lds
4vmlinux.bin.full
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/debug_defconfig
index 5af8458951cf..6176fe9795ca 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -24,13 +24,13 @@ CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y 24CONFIG_CGROUP_DEVICE=y
25CONFIG_CGROUP_CPUACCT=y 25CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y 26CONFIG_CGROUP_PERF=y
27CONFIG_CHECKPOINT_RESTORE=y
28CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
29CONFIG_USER_NS=y 28CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 29CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 30CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 31CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set 32# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_CHECKPOINT_RESTORE=y
34CONFIG_BPF_SYSCALL=y 34CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 35CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 36# CONFIG_COMPAT_BRK is not set
@@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y
59CONFIG_DEFAULT_DEADLINE=y 59CONFIG_DEFAULT_DEADLINE=y
60CONFIG_LIVEPATCH=y 60CONFIG_LIVEPATCH=y
61CONFIG_TUNE_ZEC12=y 61CONFIG_TUNE_ZEC12=y
62CONFIG_NR_CPUS=256 62CONFIG_NR_CPUS=512
63CONFIG_NUMA=y 63CONFIG_NUMA=y
64CONFIG_PREEMPT=y 64CONFIG_PREEMPT=y
65CONFIG_HZ_100=y 65CONFIG_HZ_100=y
66CONFIG_KEXEC_FILE=y
66CONFIG_MEMORY_HOTPLUG=y 67CONFIG_MEMORY_HOTPLUG=y
67CONFIG_MEMORY_HOTREMOVE=y 68CONFIG_MEMORY_HOTREMOVE=y
68CONFIG_KSM=y 69CONFIG_KSM=y
@@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m
305CONFIG_IP6_NF_NAT=m 306CONFIG_IP6_NF_NAT=m
306CONFIG_IP6_NF_TARGET_MASQUERADE=m 307CONFIG_IP6_NF_TARGET_MASQUERADE=m
307CONFIG_NF_TABLES_BRIDGE=m 308CONFIG_NF_TABLES_BRIDGE=m
308CONFIG_NET_SCTPPROBE=m
309CONFIG_RDS=m 309CONFIG_RDS=m
310CONFIG_RDS_RDMA=m 310CONFIG_RDS_RDMA=m
311CONFIG_RDS_TCP=m 311CONFIG_RDS_TCP=m
@@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m
364CONFIG_NET_ACT_SKBEDIT=m 364CONFIG_NET_ACT_SKBEDIT=m
365CONFIG_NET_ACT_CSUM=m 365CONFIG_NET_ACT_CSUM=m
366CONFIG_DNS_RESOLVER=y 366CONFIG_DNS_RESOLVER=y
367CONFIG_OPENVSWITCH=m
367CONFIG_NETLINK_DIAG=m 368CONFIG_NETLINK_DIAG=m
368CONFIG_CGROUP_NET_PRIO=y 369CONFIG_CGROUP_NET_PRIO=y
369CONFIG_BPF_JIT=y 370CONFIG_BPF_JIT=y
370CONFIG_NET_PKTGEN=m 371CONFIG_NET_PKTGEN=m
371CONFIG_NET_TCPPROBE=m
372CONFIG_DEVTMPFS=y 372CONFIG_DEVTMPFS=y
373CONFIG_DMA_CMA=y 373CONFIG_DMA_CMA=y
374CONFIG_CMA_SIZE_MBYTES=0 374CONFIG_CMA_SIZE_MBYTES=0
@@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m
380CONFIG_BLK_DEV_NBD=m 380CONFIG_BLK_DEV_NBD=m
381CONFIG_BLK_DEV_RAM=y 381CONFIG_BLK_DEV_RAM=y
382CONFIG_BLK_DEV_RAM_SIZE=32768 382CONFIG_BLK_DEV_RAM_SIZE=32768
383CONFIG_BLK_DEV_RAM_DAX=y
384CONFIG_VIRTIO_BLK=y 383CONFIG_VIRTIO_BLK=y
385CONFIG_BLK_DEV_RBD=m 384CONFIG_BLK_DEV_RBD=m
385CONFIG_BLK_DEV_NVME=m
386CONFIG_ENCLOSURE_SERVICES=m 386CONFIG_ENCLOSURE_SERVICES=m
387CONFIG_GENWQE=m 387CONFIG_GENWQE=m
388CONFIG_RAID_ATTRS=m 388CONFIG_RAID_ATTRS=m
@@ -461,6 +461,7 @@ CONFIG_PPTP=m
461CONFIG_PPPOL2TP=m 461CONFIG_PPPOL2TP=m
462CONFIG_PPP_ASYNC=m 462CONFIG_PPP_ASYNC=m
463CONFIG_PPP_SYNC_TTY=m 463CONFIG_PPP_SYNC_TTY=m
464CONFIG_INPUT_EVDEV=y
464# CONFIG_INPUT_KEYBOARD is not set 465# CONFIG_INPUT_KEYBOARD is not set
465# CONFIG_INPUT_MOUSE is not set 466# CONFIG_INPUT_MOUSE is not set
466# CONFIG_SERIO is not set 467# CONFIG_SERIO is not set
@@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y
474CONFIG_WATCHDOG_NOWAYOUT=y 475CONFIG_WATCHDOG_NOWAYOUT=y
475CONFIG_SOFT_WATCHDOG=m 476CONFIG_SOFT_WATCHDOG=m
476CONFIG_DIAG288_WATCHDOG=m 477CONFIG_DIAG288_WATCHDOG=m
478CONFIG_DRM=y
479CONFIG_DRM_VIRTIO_GPU=y
480CONFIG_FRAMEBUFFER_CONSOLE=y
477# CONFIG_HID is not set 481# CONFIG_HID is not set
478# CONFIG_USB_SUPPORT is not set 482# CONFIG_USB_SUPPORT is not set
479CONFIG_INFINIBAND=m 483CONFIG_INFINIBAND=m
@@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m
482CONFIG_MLX5_INFINIBAND=m 486CONFIG_MLX5_INFINIBAND=m
483CONFIG_VFIO=m 487CONFIG_VFIO=m
484CONFIG_VFIO_PCI=m 488CONFIG_VFIO_PCI=m
489CONFIG_VIRTIO_PCI=m
485CONFIG_VIRTIO_BALLOON=m 490CONFIG_VIRTIO_BALLOON=m
491CONFIG_VIRTIO_INPUT=y
486CONFIG_EXT4_FS=y 492CONFIG_EXT4_FS=y
487CONFIG_EXT4_FS_POSIX_ACL=y 493CONFIG_EXT4_FS_POSIX_ACL=y
488CONFIG_EXT4_FS_SECURITY=y 494CONFIG_EXT4_FS_SECURITY=y
@@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y
641CONFIG_TEST_BPF=m 647CONFIG_TEST_BPF=m
642CONFIG_BUG_ON_DATA_CORRUPTION=y 648CONFIG_BUG_ON_DATA_CORRUPTION=y
643CONFIG_S390_PTDUMP=y 649CONFIG_S390_PTDUMP=y
650CONFIG_PERSISTENT_KEYRINGS=y
651CONFIG_BIG_KEYS=y
644CONFIG_ENCRYPTED_KEYS=m 652CONFIG_ENCRYPTED_KEYS=m
645CONFIG_SECURITY=y 653CONFIG_SECURITY=y
646CONFIG_SECURITY_NETWORK=y 654CONFIG_SECURITY_NETWORK=y
@@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y
649CONFIG_SECURITY_SELINUX_BOOTPARAM=y 657CONFIG_SECURITY_SELINUX_BOOTPARAM=y
650CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 658CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
651CONFIG_SECURITY_SELINUX_DISABLE=y 659CONFIG_SECURITY_SELINUX_DISABLE=y
660CONFIG_INTEGRITY_SIGNATURE=y
661CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
652CONFIG_IMA=y 662CONFIG_IMA=y
663CONFIG_IMA_DEFAULT_HASH_SHA256=y
664CONFIG_IMA_WRITE_POLICY=y
653CONFIG_IMA_APPRAISE=y 665CONFIG_IMA_APPRAISE=y
654CONFIG_CRYPTO_RSA=m
655CONFIG_CRYPTO_DH=m 666CONFIG_CRYPTO_DH=m
656CONFIG_CRYPTO_ECDH=m 667CONFIG_CRYPTO_ECDH=m
657CONFIG_CRYPTO_USER=m 668CONFIG_CRYPTO_USER=m
669# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
658CONFIG_CRYPTO_PCRYPT=m 670CONFIG_CRYPTO_PCRYPT=m
659CONFIG_CRYPTO_CRYPTD=m 671CONFIG_CRYPTO_CRYPTD=m
660CONFIG_CRYPTO_MCRYPTD=m 672CONFIG_CRYPTO_MCRYPTD=m
661CONFIG_CRYPTO_TEST=m 673CONFIG_CRYPTO_TEST=m
662CONFIG_CRYPTO_GCM=m
663CONFIG_CRYPTO_CHACHA20POLY1305=m 674CONFIG_CRYPTO_CHACHA20POLY1305=m
664CONFIG_CRYPTO_LRW=m 675CONFIG_CRYPTO_LRW=m
665CONFIG_CRYPTO_PCBC=m 676CONFIG_CRYPTO_PCBC=m
@@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m
707CONFIG_CRYPTO_AES_S390=m 718CONFIG_CRYPTO_AES_S390=m
708CONFIG_CRYPTO_GHASH_S390=m 719CONFIG_CRYPTO_GHASH_S390=m
709CONFIG_CRYPTO_CRC32_S390=y 720CONFIG_CRYPTO_CRC32_S390=y
710CONFIG_ASYMMETRIC_KEY_TYPE=y 721CONFIG_PKCS7_MESSAGE_PARSER=y
711CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 722CONFIG_SYSTEM_TRUSTED_KEYRING=y
712CONFIG_X509_CERTIFICATE_PARSER=m
713CONFIG_CRC7=m 723CONFIG_CRC7=m
714CONFIG_CRC8=m 724CONFIG_CRC8=m
715CONFIG_RANDOM32_SELFTEST=y 725CONFIG_RANDOM32_SELFTEST=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
deleted file mode 100644
index d52eafe57ae8..000000000000
--- a/arch/s390/configs/gcov_defconfig
+++ /dev/null
@@ -1,661 +0,0 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_AUDIT=y
4CONFIG_NO_HZ_IDLE=y
5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_BSD_PROCESS_ACCT_V3=y
8CONFIG_TASKSTATS=y
9CONFIG_TASK_DELAY_ACCT=y
10CONFIG_TASK_XACCT=y
11CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y
14CONFIG_NUMA_BALANCING=y
15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
16CONFIG_MEMCG=y
17CONFIG_MEMCG_SWAP=y
18CONFIG_BLK_CGROUP=y
19CONFIG_CFS_BANDWIDTH=y
20CONFIG_RT_GROUP_SCHED=y
21CONFIG_CGROUP_PIDS=y
22CONFIG_CGROUP_FREEZER=y
23CONFIG_CGROUP_HUGETLB=y
24CONFIG_CPUSETS=y
25CONFIG_CGROUP_DEVICE=y
26CONFIG_CGROUP_CPUACCT=y
27CONFIG_CGROUP_PERF=y
28CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_NAMESPACES=y
30CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
35CONFIG_BPF_SYSCALL=y
36CONFIG_USERFAULTFD=y
37# CONFIG_COMPAT_BRK is not set
38CONFIG_PROFILING=y
39CONFIG_OPROFILE=m
40CONFIG_KPROBES=y
41CONFIG_JUMP_LABEL=y
42CONFIG_GCOV_KERNEL=y
43CONFIG_GCOV_PROFILE_ALL=y
44CONFIG_MODULES=y
45CONFIG_MODULE_FORCE_LOAD=y
46CONFIG_MODULE_UNLOAD=y
47CONFIG_MODULE_FORCE_UNLOAD=y
48CONFIG_MODVERSIONS=y
49CONFIG_MODULE_SRCVERSION_ALL=y
50CONFIG_BLK_DEV_INTEGRITY=y
51CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y
54CONFIG_PARTITION_ADVANCED=y
55CONFIG_IBM_PARTITION=y
56CONFIG_BSD_DISKLABEL=y
57CONFIG_MINIX_SUBPARTITION=y
58CONFIG_SOLARIS_X86_PARTITION=y
59CONFIG_UNIXWARE_DISKLABEL=y
60CONFIG_CFQ_GROUP_IOSCHED=y
61CONFIG_DEFAULT_DEADLINE=y
62CONFIG_LIVEPATCH=y
63CONFIG_TUNE_ZEC12=y
64CONFIG_NR_CPUS=512
65CONFIG_NUMA=y
66CONFIG_HZ_100=y
67CONFIG_MEMORY_HOTPLUG=y
68CONFIG_MEMORY_HOTREMOVE=y
69CONFIG_KSM=y
70CONFIG_TRANSPARENT_HUGEPAGE=y
71CONFIG_CLEANCACHE=y
72CONFIG_FRONTSWAP=y
73CONFIG_MEM_SOFT_DIRTY=y
74CONFIG_ZSWAP=y
75CONFIG_ZBUD=m
76CONFIG_ZSMALLOC=m
77CONFIG_ZSMALLOC_STAT=y
78CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
79CONFIG_IDLE_PAGE_TRACKING=y
80CONFIG_PCI=y
81CONFIG_HOTPLUG_PCI=y
82CONFIG_HOTPLUG_PCI_S390=y
83CONFIG_CHSC_SCH=y
84CONFIG_CRASH_DUMP=y
85CONFIG_BINFMT_MISC=m
86CONFIG_HIBERNATION=y
87CONFIG_NET=y
88CONFIG_PACKET=y
89CONFIG_PACKET_DIAG=m
90CONFIG_UNIX=y
91CONFIG_UNIX_DIAG=m
92CONFIG_XFRM_USER=m
93CONFIG_NET_KEY=m
94CONFIG_SMC=m
95CONFIG_SMC_DIAG=m
96CONFIG_INET=y
97CONFIG_IP_MULTICAST=y
98CONFIG_IP_ADVANCED_ROUTER=y
99CONFIG_IP_MULTIPLE_TABLES=y
100CONFIG_IP_ROUTE_MULTIPATH=y
101CONFIG_IP_ROUTE_VERBOSE=y
102CONFIG_NET_IPIP=m
103CONFIG_NET_IPGRE_DEMUX=m
104CONFIG_NET_IPGRE=m
105CONFIG_NET_IPGRE_BROADCAST=y
106CONFIG_IP_MROUTE=y
107CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
108CONFIG_IP_PIMSM_V1=y
109CONFIG_IP_PIMSM_V2=y
110CONFIG_SYN_COOKIES=y
111CONFIG_NET_IPVTI=m
112CONFIG_INET_AH=m
113CONFIG_INET_ESP=m
114CONFIG_INET_IPCOMP=m
115CONFIG_INET_XFRM_MODE_TRANSPORT=m
116CONFIG_INET_XFRM_MODE_TUNNEL=m
117CONFIG_INET_XFRM_MODE_BEET=m
118CONFIG_INET_DIAG=m
119CONFIG_INET_UDP_DIAG=m
120CONFIG_TCP_CONG_ADVANCED=y
121CONFIG_TCP_CONG_HSTCP=m
122CONFIG_TCP_CONG_HYBLA=m
123CONFIG_TCP_CONG_SCALABLE=m
124CONFIG_TCP_CONG_LP=m
125CONFIG_TCP_CONG_VENO=m
126CONFIG_TCP_CONG_YEAH=m
127CONFIG_TCP_CONG_ILLINOIS=m
128CONFIG_IPV6_ROUTER_PREF=y
129CONFIG_INET6_AH=m
130CONFIG_INET6_ESP=m
131CONFIG_INET6_IPCOMP=m
132CONFIG_IPV6_MIP6=m
133CONFIG_INET6_XFRM_MODE_TRANSPORT=m
134CONFIG_INET6_XFRM_MODE_TUNNEL=m
135CONFIG_INET6_XFRM_MODE_BEET=m
136CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
137CONFIG_IPV6_VTI=m
138CONFIG_IPV6_SIT=m
139CONFIG_IPV6_GRE=m
140CONFIG_IPV6_MULTIPLE_TABLES=y
141CONFIG_IPV6_SUBTREES=y
142CONFIG_NETFILTER=y
143CONFIG_NF_CONNTRACK=m
144CONFIG_NF_CONNTRACK_SECMARK=y
145CONFIG_NF_CONNTRACK_EVENTS=y
146CONFIG_NF_CONNTRACK_TIMEOUT=y
147CONFIG_NF_CONNTRACK_TIMESTAMP=y
148CONFIG_NF_CONNTRACK_AMANDA=m
149CONFIG_NF_CONNTRACK_FTP=m
150CONFIG_NF_CONNTRACK_H323=m
151CONFIG_NF_CONNTRACK_IRC=m
152CONFIG_NF_CONNTRACK_NETBIOS_NS=m
153CONFIG_NF_CONNTRACK_SNMP=m
154CONFIG_NF_CONNTRACK_PPTP=m
155CONFIG_NF_CONNTRACK_SANE=m
156CONFIG_NF_CONNTRACK_SIP=m
157CONFIG_NF_CONNTRACK_TFTP=m
158CONFIG_NF_CT_NETLINK=m
159CONFIG_NF_CT_NETLINK_TIMEOUT=m
160CONFIG_NF_TABLES=m
161CONFIG_NFT_EXTHDR=m
162CONFIG_NFT_META=m
163CONFIG_NFT_CT=m
164CONFIG_NFT_COUNTER=m
165CONFIG_NFT_LOG=m
166CONFIG_NFT_LIMIT=m
167CONFIG_NFT_NAT=m
168CONFIG_NFT_COMPAT=m
169CONFIG_NFT_HASH=m
170CONFIG_NETFILTER_XT_SET=m
171CONFIG_NETFILTER_XT_TARGET_AUDIT=m
172CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
173CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
174CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
175CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
176CONFIG_NETFILTER_XT_TARGET_CT=m
177CONFIG_NETFILTER_XT_TARGET_DSCP=m
178CONFIG_NETFILTER_XT_TARGET_HMARK=m
179CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
180CONFIG_NETFILTER_XT_TARGET_LOG=m
181CONFIG_NETFILTER_XT_TARGET_MARK=m
182CONFIG_NETFILTER_XT_TARGET_NFLOG=m
183CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
184CONFIG_NETFILTER_XT_TARGET_TEE=m
185CONFIG_NETFILTER_XT_TARGET_TPROXY=m
186CONFIG_NETFILTER_XT_TARGET_TRACE=m
187CONFIG_NETFILTER_XT_TARGET_SECMARK=m
188CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
189CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
190CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
191CONFIG_NETFILTER_XT_MATCH_BPF=m
192CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
193CONFIG_NETFILTER_XT_MATCH_COMMENT=m
194CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
195CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
196CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
197CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
198CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
199CONFIG_NETFILTER_XT_MATCH_CPU=m
200CONFIG_NETFILTER_XT_MATCH_DCCP=m
201CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
202CONFIG_NETFILTER_XT_MATCH_DSCP=m
203CONFIG_NETFILTER_XT_MATCH_ESP=m
204CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
205CONFIG_NETFILTER_XT_MATCH_HELPER=m
206CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
207CONFIG_NETFILTER_XT_MATCH_IPVS=m
208CONFIG_NETFILTER_XT_MATCH_LENGTH=m
209CONFIG_NETFILTER_XT_MATCH_LIMIT=m
210CONFIG_NETFILTER_XT_MATCH_MAC=m
211CONFIG_NETFILTER_XT_MATCH_MARK=m
212CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
213CONFIG_NETFILTER_XT_MATCH_NFACCT=m
214CONFIG_NETFILTER_XT_MATCH_OSF=m
215CONFIG_NETFILTER_XT_MATCH_OWNER=m
216CONFIG_NETFILTER_XT_MATCH_POLICY=m
217CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
218CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
219CONFIG_NETFILTER_XT_MATCH_QUOTA=m
220CONFIG_NETFILTER_XT_MATCH_RATEEST=m
221CONFIG_NETFILTER_XT_MATCH_REALM=m
222CONFIG_NETFILTER_XT_MATCH_RECENT=m
223CONFIG_NETFILTER_XT_MATCH_STATE=m
224CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
225CONFIG_NETFILTER_XT_MATCH_STRING=m
226CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
227CONFIG_NETFILTER_XT_MATCH_TIME=m
228CONFIG_NETFILTER_XT_MATCH_U32=m
229CONFIG_IP_SET=m
230CONFIG_IP_SET_BITMAP_IP=m
231CONFIG_IP_SET_BITMAP_IPMAC=m
232CONFIG_IP_SET_BITMAP_PORT=m
233CONFIG_IP_SET_HASH_IP=m
234CONFIG_IP_SET_HASH_IPPORT=m
235CONFIG_IP_SET_HASH_IPPORTIP=m
236CONFIG_IP_SET_HASH_IPPORTNET=m
237CONFIG_IP_SET_HASH_NETPORTNET=m
238CONFIG_IP_SET_HASH_NET=m
239CONFIG_IP_SET_HASH_NETNET=m
240CONFIG_IP_SET_HASH_NETPORT=m
241CONFIG_IP_SET_HASH_NETIFACE=m
242CONFIG_IP_SET_LIST_SET=m
243CONFIG_IP_VS=m
244CONFIG_IP_VS_PROTO_TCP=y
245CONFIG_IP_VS_PROTO_UDP=y
246CONFIG_IP_VS_PROTO_ESP=y
247CONFIG_IP_VS_PROTO_AH=y
248CONFIG_IP_VS_RR=m
249CONFIG_IP_VS_WRR=m
250CONFIG_IP_VS_LC=m
251CONFIG_IP_VS_WLC=m
252CONFIG_IP_VS_LBLC=m
253CONFIG_IP_VS_LBLCR=m
254CONFIG_IP_VS_DH=m
255CONFIG_IP_VS_SH=m
256CONFIG_IP_VS_SED=m
257CONFIG_IP_VS_NQ=m
258CONFIG_IP_VS_FTP=m
259CONFIG_IP_VS_PE_SIP=m
260CONFIG_NF_CONNTRACK_IPV4=m
261CONFIG_NF_TABLES_IPV4=m
262CONFIG_NFT_CHAIN_ROUTE_IPV4=m
263CONFIG_NF_TABLES_ARP=m
264CONFIG_NFT_CHAIN_NAT_IPV4=m
265CONFIG_IP_NF_IPTABLES=m
266CONFIG_IP_NF_MATCH_AH=m
267CONFIG_IP_NF_MATCH_ECN=m
268CONFIG_IP_NF_MATCH_RPFILTER=m
269CONFIG_IP_NF_MATCH_TTL=m
270CONFIG_IP_NF_FILTER=m
271CONFIG_IP_NF_TARGET_REJECT=m
272CONFIG_IP_NF_NAT=m
273CONFIG_IP_NF_TARGET_MASQUERADE=m
274CONFIG_IP_NF_MANGLE=m
275CONFIG_IP_NF_TARGET_CLUSTERIP=m
276CONFIG_IP_NF_TARGET_ECN=m
277CONFIG_IP_NF_TARGET_TTL=m
278CONFIG_IP_NF_RAW=m
279CONFIG_IP_NF_SECURITY=m
280CONFIG_IP_NF_ARPTABLES=m
281CONFIG_IP_NF_ARPFILTER=m
282CONFIG_IP_NF_ARP_MANGLE=m
283CONFIG_NF_CONNTRACK_IPV6=m
284CONFIG_NF_TABLES_IPV6=m
285CONFIG_NFT_CHAIN_ROUTE_IPV6=m
286CONFIG_NFT_CHAIN_NAT_IPV6=m
287CONFIG_IP6_NF_IPTABLES=m
288CONFIG_IP6_NF_MATCH_AH=m
289CONFIG_IP6_NF_MATCH_EUI64=m
290CONFIG_IP6_NF_MATCH_FRAG=m
291CONFIG_IP6_NF_MATCH_OPTS=m
292CONFIG_IP6_NF_MATCH_HL=m
293CONFIG_IP6_NF_MATCH_IPV6HEADER=m
294CONFIG_IP6_NF_MATCH_MH=m
295CONFIG_IP6_NF_MATCH_RPFILTER=m
296CONFIG_IP6_NF_MATCH_RT=m
297CONFIG_IP6_NF_TARGET_HL=m
298CONFIG_IP6_NF_FILTER=m
299CONFIG_IP6_NF_TARGET_REJECT=m
300CONFIG_IP6_NF_MANGLE=m
301CONFIG_IP6_NF_RAW=m
302CONFIG_IP6_NF_SECURITY=m
303CONFIG_IP6_NF_NAT=m
304CONFIG_IP6_NF_TARGET_MASQUERADE=m
305CONFIG_NF_TABLES_BRIDGE=m
306CONFIG_NET_SCTPPROBE=m
307CONFIG_RDS=m
308CONFIG_RDS_RDMA=m
309CONFIG_RDS_TCP=m
310CONFIG_L2TP=m
311CONFIG_L2TP_DEBUGFS=m
312CONFIG_L2TP_V3=y
313CONFIG_L2TP_IP=m
314CONFIG_L2TP_ETH=m
315CONFIG_BRIDGE=m
316CONFIG_VLAN_8021Q=m
317CONFIG_VLAN_8021Q_GVRP=y
318CONFIG_NET_SCHED=y
319CONFIG_NET_SCH_CBQ=m
320CONFIG_NET_SCH_HTB=m
321CONFIG_NET_SCH_HFSC=m
322CONFIG_NET_SCH_PRIO=m
323CONFIG_NET_SCH_MULTIQ=m
324CONFIG_NET_SCH_RED=m
325CONFIG_NET_SCH_SFB=m
326CONFIG_NET_SCH_SFQ=m
327CONFIG_NET_SCH_TEQL=m
328CONFIG_NET_SCH_TBF=m
329CONFIG_NET_SCH_GRED=m
330CONFIG_NET_SCH_DSMARK=m
331CONFIG_NET_SCH_NETEM=m
332CONFIG_NET_SCH_DRR=m
333CONFIG_NET_SCH_MQPRIO=m
334CONFIG_NET_SCH_CHOKE=m
335CONFIG_NET_SCH_QFQ=m
336CONFIG_NET_SCH_CODEL=m
337CONFIG_NET_SCH_FQ_CODEL=m
338CONFIG_NET_SCH_INGRESS=m
339CONFIG_NET_SCH_PLUG=m
340CONFIG_NET_CLS_BASIC=m
341CONFIG_NET_CLS_TCINDEX=m
342CONFIG_NET_CLS_ROUTE4=m
343CONFIG_NET_CLS_FW=m
344CONFIG_NET_CLS_U32=m
345CONFIG_CLS_U32_PERF=y
346CONFIG_CLS_U32_MARK=y
347CONFIG_NET_CLS_RSVP=m
348CONFIG_NET_CLS_RSVP6=m
349CONFIG_NET_CLS_FLOW=m
350CONFIG_NET_CLS_CGROUP=y
351CONFIG_NET_CLS_BPF=m
352CONFIG_NET_CLS_ACT=y
353CONFIG_NET_ACT_POLICE=m
354CONFIG_NET_ACT_GACT=m
355CONFIG_GACT_PROB=y
356CONFIG_NET_ACT_MIRRED=m
357CONFIG_NET_ACT_IPT=m
358CONFIG_NET_ACT_NAT=m
359CONFIG_NET_ACT_PEDIT=m
360CONFIG_NET_ACT_SIMP=m
361CONFIG_NET_ACT_SKBEDIT=m
362CONFIG_NET_ACT_CSUM=m
363CONFIG_DNS_RESOLVER=y
364CONFIG_NETLINK_DIAG=m
365CONFIG_CGROUP_NET_PRIO=y
366CONFIG_BPF_JIT=y
367CONFIG_NET_PKTGEN=m
368CONFIG_NET_TCPPROBE=m
369CONFIG_DEVTMPFS=y
370CONFIG_DMA_CMA=y
371CONFIG_CMA_SIZE_MBYTES=0
372CONFIG_CONNECTOR=y
373CONFIG_ZRAM=m
374CONFIG_BLK_DEV_LOOP=m
375CONFIG_BLK_DEV_CRYPTOLOOP=m
376CONFIG_BLK_DEV_DRBD=m
377CONFIG_BLK_DEV_NBD=m
378CONFIG_BLK_DEV_RAM=y
379CONFIG_BLK_DEV_RAM_SIZE=32768
380CONFIG_BLK_DEV_RAM_DAX=y
381CONFIG_VIRTIO_BLK=y
382CONFIG_ENCLOSURE_SERVICES=m
383CONFIG_GENWQE=m
384CONFIG_RAID_ATTRS=m
385CONFIG_SCSI=y
386CONFIG_BLK_DEV_SD=y
387CONFIG_CHR_DEV_ST=m
388CONFIG_CHR_DEV_OSST=m
389CONFIG_BLK_DEV_SR=m
390CONFIG_CHR_DEV_SG=y
391CONFIG_CHR_DEV_SCH=m
392CONFIG_SCSI_ENCLOSURE=m
393CONFIG_SCSI_CONSTANTS=y
394CONFIG_SCSI_LOGGING=y
395CONFIG_SCSI_SPI_ATTRS=m
396CONFIG_SCSI_FC_ATTRS=y
397CONFIG_SCSI_SAS_LIBSAS=m
398CONFIG_SCSI_SRP_ATTRS=m
399CONFIG_ISCSI_TCP=m
400CONFIG_SCSI_DEBUG=m
401CONFIG_ZFCP=y
402CONFIG_SCSI_VIRTIO=m
403CONFIG_SCSI_DH=y
404CONFIG_SCSI_DH_RDAC=m
405CONFIG_SCSI_DH_HP_SW=m
406CONFIG_SCSI_DH_EMC=m
407CONFIG_SCSI_DH_ALUA=m
408CONFIG_SCSI_OSD_INITIATOR=m
409CONFIG_SCSI_OSD_ULD=m
410CONFIG_MD=y
411CONFIG_BLK_DEV_MD=y
412CONFIG_MD_LINEAR=m
413CONFIG_MD_MULTIPATH=m
414CONFIG_MD_FAULTY=m
415CONFIG_BLK_DEV_DM=m
416CONFIG_DM_CRYPT=m
417CONFIG_DM_SNAPSHOT=m
418CONFIG_DM_THIN_PROVISIONING=m
419CONFIG_DM_MIRROR=m
420CONFIG_DM_LOG_USERSPACE=m
421CONFIG_DM_RAID=m
422CONFIG_DM_ZERO=m
423CONFIG_DM_MULTIPATH=m
424CONFIG_DM_MULTIPATH_QL=m
425CONFIG_DM_MULTIPATH_ST=m
426CONFIG_DM_DELAY=m
427CONFIG_DM_UEVENT=y
428CONFIG_DM_FLAKEY=m
429CONFIG_DM_VERITY=m
430CONFIG_DM_SWITCH=m
431CONFIG_NETDEVICES=y
432CONFIG_BONDING=m
433CONFIG_DUMMY=m
434CONFIG_EQUALIZER=m
435CONFIG_IFB=m
436CONFIG_MACVLAN=m
437CONFIG_MACVTAP=m
438CONFIG_VXLAN=m
439CONFIG_TUN=m
440CONFIG_VETH=m
441CONFIG_VIRTIO_NET=m
442CONFIG_NLMON=m
443# CONFIG_NET_VENDOR_ARC is not set
444# CONFIG_NET_VENDOR_CHELSIO is not set
445# CONFIG_NET_VENDOR_INTEL is not set
446# CONFIG_NET_VENDOR_MARVELL is not set
447CONFIG_MLX4_EN=m
448CONFIG_MLX5_CORE=m
449CONFIG_MLX5_CORE_EN=y
450# CONFIG_NET_VENDOR_NATSEMI is not set
451CONFIG_PPP=m
452CONFIG_PPP_BSDCOMP=m
453CONFIG_PPP_DEFLATE=m
454CONFIG_PPP_MPPE=m
455CONFIG_PPPOE=m
456CONFIG_PPTP=m
457CONFIG_PPPOL2TP=m
458CONFIG_PPP_ASYNC=m
459CONFIG_PPP_SYNC_TTY=m
460# CONFIG_INPUT_KEYBOARD is not set
461# CONFIG_INPUT_MOUSE is not set
462# CONFIG_SERIO is not set
463CONFIG_LEGACY_PTY_COUNT=0
464CONFIG_HW_RANDOM_VIRTIO=m
465CONFIG_RAW_DRIVER=m
466CONFIG_HANGCHECK_TIMER=m
467CONFIG_TN3270_FS=y
468# CONFIG_HWMON is not set
469CONFIG_WATCHDOG=y
470CONFIG_WATCHDOG_NOWAYOUT=y
471CONFIG_SOFT_WATCHDOG=m
472CONFIG_DIAG288_WATCHDOG=m
473# CONFIG_HID is not set
474# CONFIG_USB_SUPPORT is not set
475CONFIG_INFINIBAND=m
476CONFIG_INFINIBAND_USER_ACCESS=m
477CONFIG_MLX4_INFINIBAND=m
478CONFIG_MLX5_INFINIBAND=m
479CONFIG_VFIO=m
480CONFIG_VFIO_PCI=m
481CONFIG_VIRTIO_BALLOON=m
482CONFIG_EXT4_FS=y
483CONFIG_EXT4_FS_POSIX_ACL=y
484CONFIG_EXT4_FS_SECURITY=y
485CONFIG_EXT4_ENCRYPTION=y
486CONFIG_JBD2_DEBUG=y
487CONFIG_JFS_FS=m
488CONFIG_JFS_POSIX_ACL=y
489CONFIG_JFS_SECURITY=y
490CONFIG_JFS_STATISTICS=y
491CONFIG_XFS_FS=y
492CONFIG_XFS_QUOTA=y
493CONFIG_XFS_POSIX_ACL=y
494CONFIG_XFS_RT=y
495CONFIG_GFS2_FS=m
496CONFIG_GFS2_FS_LOCKING_DLM=y
497CONFIG_OCFS2_FS=m
498CONFIG_BTRFS_FS=y
499CONFIG_BTRFS_FS_POSIX_ACL=y
500CONFIG_NILFS2_FS=m
501CONFIG_FS_DAX=y
502CONFIG_EXPORTFS_BLOCK_OPS=y
503CONFIG_FANOTIFY=y
504CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
505CONFIG_QUOTA_NETLINK_INTERFACE=y
506CONFIG_QFMT_V1=m
507CONFIG_QFMT_V2=m
508CONFIG_AUTOFS4_FS=m
509CONFIG_FUSE_FS=y
510CONFIG_CUSE=m
511CONFIG_OVERLAY_FS=m
512CONFIG_OVERLAY_FS_REDIRECT_DIR=y
513CONFIG_FSCACHE=m
514CONFIG_CACHEFILES=m
515CONFIG_ISO9660_FS=y
516CONFIG_JOLIET=y
517CONFIG_ZISOFS=y
518CONFIG_UDF_FS=m
519CONFIG_MSDOS_FS=m
520CONFIG_VFAT_FS=m
521CONFIG_NTFS_FS=m
522CONFIG_NTFS_RW=y
523CONFIG_PROC_KCORE=y
524CONFIG_TMPFS=y
525CONFIG_TMPFS_POSIX_ACL=y
526CONFIG_HUGETLBFS=y
527CONFIG_CONFIGFS_FS=m
528CONFIG_ECRYPT_FS=m
529CONFIG_CRAMFS=m
530CONFIG_SQUASHFS=m
531CONFIG_SQUASHFS_XATTR=y
532CONFIG_SQUASHFS_LZO=y
533CONFIG_SQUASHFS_XZ=y
534CONFIG_ROMFS_FS=m
535CONFIG_NFS_FS=m
536CONFIG_NFS_V3_ACL=y
537CONFIG_NFS_V4=m
538CONFIG_NFS_SWAP=y
539CONFIG_NFSD=m
540CONFIG_NFSD_V3_ACL=y
541CONFIG_NFSD_V4=y
542CONFIG_NFSD_V4_SECURITY_LABEL=y
543CONFIG_CIFS=m
544CONFIG_CIFS_STATS=y
545CONFIG_CIFS_STATS2=y
546CONFIG_CIFS_WEAK_PW_HASH=y
547CONFIG_CIFS_UPCALL=y
548CONFIG_CIFS_XATTR=y
549CONFIG_CIFS_POSIX=y
550# CONFIG_CIFS_DEBUG is not set
551CONFIG_CIFS_DFS_UPCALL=y
552CONFIG_NLS_DEFAULT="utf8"
553CONFIG_NLS_CODEPAGE_437=m
554CONFIG_NLS_CODEPAGE_850=m
555CONFIG_NLS_ASCII=m
556CONFIG_NLS_ISO8859_1=m
557CONFIG_NLS_ISO8859_15=m
558CONFIG_NLS_UTF8=m
559CONFIG_DLM=m
560CONFIG_PRINTK_TIME=y
561CONFIG_DEBUG_INFO=y
562CONFIG_DEBUG_INFO_DWARF4=y
563CONFIG_GDB_SCRIPTS=y
564# CONFIG_ENABLE_MUST_CHECK is not set
565CONFIG_FRAME_WARN=1024
566CONFIG_UNUSED_SYMBOLS=y
567CONFIG_MAGIC_SYSRQ=y
568CONFIG_DEBUG_MEMORY_INIT=y
569CONFIG_PANIC_ON_OOPS=y
570CONFIG_RCU_TORTURE_TEST=m
571CONFIG_RCU_CPU_STALL_TIMEOUT=60
572CONFIG_LATENCYTOP=y
573CONFIG_SCHED_TRACER=y
574CONFIG_FTRACE_SYSCALLS=y
575CONFIG_STACK_TRACER=y
576CONFIG_BLK_DEV_IO_TRACE=y
577CONFIG_FUNCTION_PROFILER=y
578CONFIG_HIST_TRIGGERS=y
579CONFIG_LKDTM=m
580CONFIG_PERCPU_TEST=m
581CONFIG_ATOMIC64_SELFTEST=y
582CONFIG_TEST_BPF=m
583CONFIG_BUG_ON_DATA_CORRUPTION=y
584CONFIG_S390_PTDUMP=y
585CONFIG_PERSISTENT_KEYRINGS=y
586CONFIG_BIG_KEYS=y
587CONFIG_ENCRYPTED_KEYS=m
588CONFIG_SECURITY=y
589CONFIG_SECURITY_NETWORK=y
590CONFIG_SECURITY_SELINUX=y
591CONFIG_SECURITY_SELINUX_BOOTPARAM=y
592CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
593CONFIG_SECURITY_SELINUX_DISABLE=y
594CONFIG_INTEGRITY_SIGNATURE=y
595CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
596CONFIG_IMA=y
597CONFIG_IMA_WRITE_POLICY=y
598CONFIG_IMA_APPRAISE=y
599CONFIG_CRYPTO_DH=m
600CONFIG_CRYPTO_ECDH=m
601CONFIG_CRYPTO_USER=m
602# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
603CONFIG_CRYPTO_PCRYPT=m
604CONFIG_CRYPTO_CRYPTD=m
605CONFIG_CRYPTO_MCRYPTD=m
606CONFIG_CRYPTO_TEST=m
607CONFIG_CRYPTO_CHACHA20POLY1305=m
608CONFIG_CRYPTO_LRW=m
609CONFIG_CRYPTO_PCBC=m
610CONFIG_CRYPTO_KEYWRAP=m
611CONFIG_CRYPTO_XCBC=m
612CONFIG_CRYPTO_VMAC=m
613CONFIG_CRYPTO_CRC32=m
614CONFIG_CRYPTO_MICHAEL_MIC=m
615CONFIG_CRYPTO_RMD128=m
616CONFIG_CRYPTO_RMD160=m
617CONFIG_CRYPTO_RMD256=m
618CONFIG_CRYPTO_RMD320=m
619CONFIG_CRYPTO_SHA512=m
620CONFIG_CRYPTO_SHA3=m
621CONFIG_CRYPTO_TGR192=m
622CONFIG_CRYPTO_WP512=m
623CONFIG_CRYPTO_AES_TI=m
624CONFIG_CRYPTO_ANUBIS=m
625CONFIG_CRYPTO_BLOWFISH=m
626CONFIG_CRYPTO_CAMELLIA=m
627CONFIG_CRYPTO_CAST5=m
628CONFIG_CRYPTO_CAST6=m
629CONFIG_CRYPTO_FCRYPT=m
630CONFIG_CRYPTO_KHAZAD=m
631CONFIG_CRYPTO_SALSA20=m
632CONFIG_CRYPTO_SEED=m
633CONFIG_CRYPTO_SERPENT=m
634CONFIG_CRYPTO_TEA=m
635CONFIG_CRYPTO_TWOFISH=m
636CONFIG_CRYPTO_842=m
637CONFIG_CRYPTO_LZ4=m
638CONFIG_CRYPTO_LZ4HC=m
639CONFIG_CRYPTO_ANSI_CPRNG=m
640CONFIG_CRYPTO_USER_API_HASH=m
641CONFIG_CRYPTO_USER_API_SKCIPHER=m
642CONFIG_CRYPTO_USER_API_RNG=m
643CONFIG_CRYPTO_USER_API_AEAD=m
644CONFIG_ZCRYPT=m
645CONFIG_PKEY=m
646CONFIG_CRYPTO_PAES_S390=m
647CONFIG_CRYPTO_SHA1_S390=m
648CONFIG_CRYPTO_SHA256_S390=m
649CONFIG_CRYPTO_SHA512_S390=m
650CONFIG_CRYPTO_DES_S390=m
651CONFIG_CRYPTO_AES_S390=m
652CONFIG_CRYPTO_GHASH_S390=m
653CONFIG_CRYPTO_CRC32_S390=y
654CONFIG_CRC7=m
655CONFIG_CRC8=m
656CONFIG_CORDIC=m
657CONFIG_CMM=m
658CONFIG_APPLDATA_BASE=y
659CONFIG_KVM=m
660CONFIG_KVM_S390_UCONTROL=y
661CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 20ed149e1137..c105bcc6d7a6 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -25,13 +25,13 @@ CONFIG_CPUSETS=y
25CONFIG_CGROUP_DEVICE=y 25CONFIG_CGROUP_DEVICE=y
26CONFIG_CGROUP_CPUACCT=y 26CONFIG_CGROUP_CPUACCT=y
27CONFIG_CGROUP_PERF=y 27CONFIG_CGROUP_PERF=y
28CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_NAMESPACES=y 28CONFIG_NAMESPACES=y
30CONFIG_USER_NS=y 29CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 31CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 32CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set 33# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_CHECKPOINT_RESTORE=y
35CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
36CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
37# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y
45CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
46CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
47CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_MODULE_SIG=y
49CONFIG_MODULE_SIG_SHA256=y
48CONFIG_BLK_DEV_INTEGRITY=y 50CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 51CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y 52CONFIG_BLK_WBT=y
@@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y
62CONFIG_NR_CPUS=512 64CONFIG_NR_CPUS=512
63CONFIG_NUMA=y 65CONFIG_NUMA=y
64CONFIG_HZ_100=y 66CONFIG_HZ_100=y
67CONFIG_KEXEC_FILE=y
65CONFIG_MEMORY_HOTPLUG=y 68CONFIG_MEMORY_HOTPLUG=y
66CONFIG_MEMORY_HOTREMOVE=y 69CONFIG_MEMORY_HOTREMOVE=y
67CONFIG_KSM=y 70CONFIG_KSM=y
@@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m
301CONFIG_IP6_NF_NAT=m 304CONFIG_IP6_NF_NAT=m
302CONFIG_IP6_NF_TARGET_MASQUERADE=m 305CONFIG_IP6_NF_TARGET_MASQUERADE=m
303CONFIG_NF_TABLES_BRIDGE=m 306CONFIG_NF_TABLES_BRIDGE=m
304CONFIG_NET_SCTPPROBE=m
305CONFIG_RDS=m 307CONFIG_RDS=m
306CONFIG_RDS_RDMA=m 308CONFIG_RDS_RDMA=m
307CONFIG_RDS_TCP=m 309CONFIG_RDS_TCP=m
@@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m
359CONFIG_NET_ACT_SKBEDIT=m 361CONFIG_NET_ACT_SKBEDIT=m
360CONFIG_NET_ACT_CSUM=m 362CONFIG_NET_ACT_CSUM=m
361CONFIG_DNS_RESOLVER=y 363CONFIG_DNS_RESOLVER=y
364CONFIG_OPENVSWITCH=m
362CONFIG_NETLINK_DIAG=m 365CONFIG_NETLINK_DIAG=m
363CONFIG_CGROUP_NET_PRIO=y 366CONFIG_CGROUP_NET_PRIO=y
364CONFIG_BPF_JIT=y 367CONFIG_BPF_JIT=y
365CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
366CONFIG_NET_TCPPROBE=m
367CONFIG_DEVTMPFS=y 369CONFIG_DEVTMPFS=y
368CONFIG_DMA_CMA=y 370CONFIG_DMA_CMA=y
369CONFIG_CMA_SIZE_MBYTES=0 371CONFIG_CMA_SIZE_MBYTES=0
@@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m
375CONFIG_BLK_DEV_NBD=m 377CONFIG_BLK_DEV_NBD=m
376CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
377CONFIG_BLK_DEV_RAM_SIZE=32768 379CONFIG_BLK_DEV_RAM_SIZE=32768
378CONFIG_BLK_DEV_RAM_DAX=y
379CONFIG_VIRTIO_BLK=y 380CONFIG_VIRTIO_BLK=y
381CONFIG_BLK_DEV_RBD=m
382CONFIG_BLK_DEV_NVME=m
380CONFIG_ENCLOSURE_SERVICES=m 383CONFIG_ENCLOSURE_SERVICES=m
381CONFIG_GENWQE=m 384CONFIG_GENWQE=m
382CONFIG_RAID_ATTRS=m 385CONFIG_RAID_ATTRS=m
@@ -455,6 +458,7 @@ CONFIG_PPTP=m
455CONFIG_PPPOL2TP=m 458CONFIG_PPPOL2TP=m
456CONFIG_PPP_ASYNC=m 459CONFIG_PPP_ASYNC=m
457CONFIG_PPP_SYNC_TTY=m 460CONFIG_PPP_SYNC_TTY=m
461CONFIG_INPUT_EVDEV=y
458# CONFIG_INPUT_KEYBOARD is not set 462# CONFIG_INPUT_KEYBOARD is not set
459# CONFIG_INPUT_MOUSE is not set 463# CONFIG_INPUT_MOUSE is not set
460# CONFIG_SERIO is not set 464# CONFIG_SERIO is not set
@@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y
468CONFIG_WATCHDOG_NOWAYOUT=y 472CONFIG_WATCHDOG_NOWAYOUT=y
469CONFIG_SOFT_WATCHDOG=m 473CONFIG_SOFT_WATCHDOG=m
470CONFIG_DIAG288_WATCHDOG=m 474CONFIG_DIAG288_WATCHDOG=m
475CONFIG_DRM=y
476CONFIG_DRM_VIRTIO_GPU=y
477CONFIG_FRAMEBUFFER_CONSOLE=y
471# CONFIG_HID is not set 478# CONFIG_HID is not set
472# CONFIG_USB_SUPPORT is not set 479# CONFIG_USB_SUPPORT is not set
473CONFIG_INFINIBAND=m 480CONFIG_INFINIBAND=m
@@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m
476CONFIG_MLX5_INFINIBAND=m 483CONFIG_MLX5_INFINIBAND=m
477CONFIG_VFIO=m 484CONFIG_VFIO=m
478CONFIG_VFIO_PCI=m 485CONFIG_VFIO_PCI=m
486CONFIG_VIRTIO_PCI=m
479CONFIG_VIRTIO_BALLOON=m 487CONFIG_VIRTIO_BALLOON=m
488CONFIG_VIRTIO_INPUT=y
480CONFIG_EXT4_FS=y 489CONFIG_EXT4_FS=y
481CONFIG_EXT4_FS_POSIX_ACL=y 490CONFIG_EXT4_FS_POSIX_ACL=y
482CONFIG_EXT4_FS_SECURITY=y 491CONFIG_EXT4_FS_SECURITY=y
@@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m
507CONFIG_FUSE_FS=y 516CONFIG_FUSE_FS=y
508CONFIG_CUSE=m 517CONFIG_CUSE=m
509CONFIG_OVERLAY_FS=m 518CONFIG_OVERLAY_FS=m
510CONFIG_OVERLAY_FS_REDIRECT_DIR=y
511CONFIG_FSCACHE=m 519CONFIG_FSCACHE=m
512CONFIG_CACHEFILES=m 520CONFIG_CACHEFILES=m
513CONFIG_ISO9660_FS=y 521CONFIG_ISO9660_FS=y
@@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
592CONFIG_INTEGRITY_SIGNATURE=y 600CONFIG_INTEGRITY_SIGNATURE=y
593CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 601CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
594CONFIG_IMA=y 602CONFIG_IMA=y
603CONFIG_IMA_DEFAULT_HASH_SHA256=y
595CONFIG_IMA_WRITE_POLICY=y 604CONFIG_IMA_WRITE_POLICY=y
596CONFIG_IMA_APPRAISE=y 605CONFIG_IMA_APPRAISE=y
606CONFIG_CRYPTO_FIPS=y
597CONFIG_CRYPTO_DH=m 607CONFIG_CRYPTO_DH=m
598CONFIG_CRYPTO_ECDH=m 608CONFIG_CRYPTO_ECDH=m
599CONFIG_CRYPTO_USER=m 609CONFIG_CRYPTO_USER=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 46a3178d8bc6..f40600eb1762 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y
8CONFIG_TASK_DELAY_ACCT=y 8CONFIG_TASK_DELAY_ACCT=y
9CONFIG_TASK_XACCT=y 9CONFIG_TASK_XACCT=y
10CONFIG_TASK_IO_ACCOUNTING=y 10CONFIG_TASK_IO_ACCOUNTING=y
11# CONFIG_CPU_ISOLATION is not set
11CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
13CONFIG_CGROUPS=y 14CONFIG_CGROUPS=y
@@ -23,12 +24,12 @@ CONFIG_CPUSETS=y
23CONFIG_CGROUP_DEVICE=y 24CONFIG_CGROUP_DEVICE=y
24CONFIG_CGROUP_CPUACCT=y 25CONFIG_CGROUP_CPUACCT=y
25CONFIG_CGROUP_PERF=y 26CONFIG_CGROUP_PERF=y
26CONFIG_CHECKPOINT_RESTORE=y
27CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 30CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set 31# CONFIG_SYSFS_SYSCALL is not set
32CONFIG_CHECKPOINT_RESTORE=y
32CONFIG_BPF_SYSCALL=y 33CONFIG_BPF_SYSCALL=y
33CONFIG_USERFAULTFD=y 34CONFIG_USERFAULTFD=y
34# CONFIG_COMPAT_BRK is not set 35# CONFIG_COMPAT_BRK is not set
@@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y
47CONFIG_NR_CPUS=256 48CONFIG_NR_CPUS=256
48CONFIG_NUMA=y 49CONFIG_NUMA=y
49CONFIG_HZ_100=y 50CONFIG_HZ_100=y
51CONFIG_KEXEC_FILE=y
50CONFIG_MEMORY_HOTPLUG=y 52CONFIG_MEMORY_HOTPLUG=y
51CONFIG_MEMORY_HOTREMOVE=y 53CONFIG_MEMORY_HOTREMOVE=y
52CONFIG_KSM=y 54CONFIG_KSM=y
@@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m
129CONFIG_TUN=m 131CONFIG_TUN=m
130CONFIG_VIRTIO_NET=y 132CONFIG_VIRTIO_NET=y
131# CONFIG_NET_VENDOR_ALACRITECH is not set 133# CONFIG_NET_VENDOR_ALACRITECH is not set
134# CONFIG_NET_VENDOR_CORTINA is not set
132# CONFIG_NET_VENDOR_SOLARFLARE is not set 135# CONFIG_NET_VENDOR_SOLARFLARE is not set
136# CONFIG_NET_VENDOR_SOCIONEXT is not set
133# CONFIG_NET_VENDOR_SYNOPSYS is not set 137# CONFIG_NET_VENDOR_SYNOPSYS is not set
134# CONFIG_INPUT is not set 138# CONFIG_INPUT is not set
135# CONFIG_SERIO is not set 139# CONFIG_SERIO is not set
140# CONFIG_VT is not set
136CONFIG_DEVKMEM=y 141CONFIG_DEVKMEM=y
137CONFIG_RAW_DRIVER=m 142CONFIG_RAW_DRIVER=m
138CONFIG_VIRTIO_BALLOON=y 143CONFIG_VIRTIO_BALLOON=y
@@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
177CONFIG_STACK_TRACER=y 182CONFIG_STACK_TRACER=y
178CONFIG_BLK_DEV_IO_TRACE=y 183CONFIG_BLK_DEV_IO_TRACE=y
179CONFIG_FUNCTION_PROFILER=y 184CONFIG_FUNCTION_PROFILER=y
180CONFIG_KPROBES_SANITY_TEST=y 185# CONFIG_RUNTIME_TESTING_MENU is not set
181CONFIG_S390_PTDUMP=y 186CONFIG_S390_PTDUMP=y
182CONFIG_CRYPTO_CRYPTD=m 187CONFIG_CRYPTO_CRYPTD=m
188CONFIG_CRYPTO_AUTHENC=m
183CONFIG_CRYPTO_TEST=m 189CONFIG_CRYPTO_TEST=m
184CONFIG_CRYPTO_CCM=m 190CONFIG_CRYPTO_CCM=m
185CONFIG_CRYPTO_GCM=m 191CONFIG_CRYPTO_GCM=m
186CONFIG_CRYPTO_CBC=y 192CONFIG_CRYPTO_CBC=y
193CONFIG_CRYPTO_CFB=m
187CONFIG_CRYPTO_CTS=m 194CONFIG_CRYPTO_CTS=m
188CONFIG_CRYPTO_LRW=m 195CONFIG_CRYPTO_LRW=m
189CONFIG_CRYPTO_PCBC=m 196CONFIG_CRYPTO_PCBC=m
@@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m
213CONFIG_CRYPTO_SALSA20=m 220CONFIG_CRYPTO_SALSA20=m
214CONFIG_CRYPTO_SEED=m 221CONFIG_CRYPTO_SEED=m
215CONFIG_CRYPTO_SERPENT=m 222CONFIG_CRYPTO_SERPENT=m
223CONFIG_CRYPTO_SM4=m
224CONFIG_CRYPTO_SPECK=m
216CONFIG_CRYPTO_TEA=m 225CONFIG_CRYPTO_TEA=m
217CONFIG_CRYPTO_TWOFISH=m 226CONFIG_CRYPTO_TWOFISH=m
218CONFIG_CRYPTO_DEFLATE=m 227CONFIG_CRYPTO_DEFLATE=m
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 43bbe63e2992..06b513d192b9 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb)
320 320
321 if (sb->s_root) 321 if (sb->s_root)
322 hypfs_delete_tree(sb->s_root); 322 hypfs_delete_tree(sb->s_root);
323 if (sb_info->update_file) 323 if (sb_info && sb_info->update_file)
324 hypfs_remove(sb_info->update_file); 324 hypfs_remove(sb_info->update_file);
325 kfree(sb->s_fs_info); 325 kfree(sb->s_fs_info);
326 sb->s_fs_info = NULL; 326 sb->s_fs_info = NULL;
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 1d708a419326..825dd0f7f221 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -46,4 +46,27 @@
46static inline void crash_setup_regs(struct pt_regs *newregs, 46static inline void crash_setup_regs(struct pt_regs *newregs,
47 struct pt_regs *oldregs) { } 47 struct pt_regs *oldregs) { }
48 48
49struct kimage;
50struct s390_load_data {
51 /* Pointer to the kernel buffer. Used to register cmdline etc.. */
52 void *kernel_buf;
53
54 /* Total size of loaded segments in memory. Used as an offset. */
55 size_t memsz;
56
57 /* Load address of initrd. Used to register INITRD_START in kernel. */
58 unsigned long initrd_load_addr;
59};
60
61int kexec_file_add_purgatory(struct kimage *image,
62 struct s390_load_data *data);
63int kexec_file_add_initrd(struct kimage *image,
64 struct s390_load_data *data,
65 char *initrd, unsigned long initrd_len);
66int *kexec_file_update_kernel(struct kimage *iamge,
67 struct s390_load_data *data);
68
69extern const struct kexec_file_ops s390_kexec_image_ops;
70extern const struct kexec_file_ops s390_kexec_elf_ops;
71
49#endif /*_S390_KEXEC_H */ 72#endif /*_S390_KEXEC_H */
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
new file mode 100644
index 000000000000..e297bcfc476f
--- /dev/null
+++ b/arch/s390/include/asm/purgatory.h
@@ -0,0 +1,17 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 2018
4 *
5 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
6 */
7
8#ifndef _S390_PURGATORY_H_
9#define _S390_PURGATORY_H_
10#ifndef __ASSEMBLY__
11
12#include <linux/purgatory.h>
13
14int verify_sha256_digest(void);
15
16#endif /* __ASSEMBLY__ */
17#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 124154fdfc97..9c30ebe046f3 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -1,7 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * S390 version 3 * S390 version
4 * Copyright IBM Corp. 1999, 2010 4 * Copyright IBM Corp. 1999, 2017
5 */ 5 */
6#ifndef _ASM_S390_SETUP_H 6#ifndef _ASM_S390_SETUP_H
7#define _ASM_S390_SETUP_H 7#define _ASM_S390_SETUP_H
@@ -37,17 +37,31 @@
37#define LPP_MAGIC _BITUL(31) 37#define LPP_MAGIC _BITUL(31)
38#define LPP_PID_MASK _AC(0xffffffff, UL) 38#define LPP_PID_MASK _AC(0xffffffff, UL)
39 39
40/* Offsets to entry points in kernel/head.S */
41
42#define STARTUP_NORMAL_OFFSET 0x10000
43#define STARTUP_KDUMP_OFFSET 0x10010
44
45/* Offsets to parameters in kernel/head.S */
46
47#define IPL_DEVICE_OFFSET 0x10400
48#define INITRD_START_OFFSET 0x10408
49#define INITRD_SIZE_OFFSET 0x10410
50#define OLDMEM_BASE_OFFSET 0x10418
51#define OLDMEM_SIZE_OFFSET 0x10420
52#define COMMAND_LINE_OFFSET 0x10480
53
40#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
41 55
42#include <asm/lowcore.h> 56#include <asm/lowcore.h>
43#include <asm/types.h> 57#include <asm/types.h>
44 58
45#define IPL_DEVICE (*(unsigned long *) (0x10400)) 59#define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET))
46#define INITRD_START (*(unsigned long *) (0x10408)) 60#define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET))
47#define INITRD_SIZE (*(unsigned long *) (0x10410)) 61#define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET))
48#define OLDMEM_BASE (*(unsigned long *) (0x10418)) 62#define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET))
49#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 63#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
50#define COMMAND_LINE ((char *) (0x10480)) 64#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
51 65
52extern int memory_end_set; 66extern int memory_end_set;
53extern unsigned long memory_end; 67extern unsigned long memory_end;
@@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void);
121 135
122#else /* __ASSEMBLY__ */ 136#else /* __ASSEMBLY__ */
123 137
124#define IPL_DEVICE 0x10400 138#define IPL_DEVICE (IPL_DEVICE_OFFSET)
125#define INITRD_START 0x10408 139#define INITRD_START (INITRD_START_OFFSET)
126#define INITRD_SIZE 0x10410 140#define INITRD_SIZE (INITRD_SIZE_OFFSET)
127#define OLDMEM_BASE 0x10418 141#define OLDMEM_BASE (OLDMEM_BASE_OFFSET)
128#define OLDMEM_SIZE 0x10420 142#define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET)
129#define COMMAND_LINE 0x10480 143#define COMMAND_LINE (COMMAND_LINE_OFFSET)
130 144
131#endif /* __ASSEMBLY__ */ 145#endif /* __ASSEMBLY__ */
132#endif /* _ASM_S390_SETUP_H */ 146#endif /* _ASM_S390_SETUP_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 83ba57533ce6..3c883c368eb0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -45,6 +45,9 @@ struct thread_info {
45void arch_release_task_struct(struct task_struct *tsk); 45void arch_release_task_struct(struct task_struct *tsk);
46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
47 47
48void arch_setup_new_exec(void);
49#define arch_setup_new_exec arch_setup_new_exec
50
48#endif 51#endif
49 52
50/* 53/*
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h
index c57f9d28d894..9a14a611ed82 100644
--- a/arch/s390/include/uapi/asm/signal.h
+++ b/arch/s390/include/uapi/asm/signal.h
@@ -97,22 +97,31 @@ typedef unsigned long sigset_t;
97#include <asm-generic/signal-defs.h> 97#include <asm-generic/signal-defs.h>
98 98
99#ifndef __KERNEL__ 99#ifndef __KERNEL__
100/* Here we must cater to libcs that poke about in kernel headers. */
101 100
101/*
102 * There are two system calls in regard to sigaction, sys_rt_sigaction
103 * and sys_sigaction. Internally the kernel uses the struct old_sigaction
104 * for the older sys_sigaction system call, and the kernel version of the
105 * struct sigaction for the newer sys_rt_sigaction.
106 *
107 * The uapi definition for struct sigaction has made a strange distinction
108 * between 31-bit and 64-bit in the past. For 64-bit the uapi structure
109 * looks like the kernel struct sigaction, but for 31-bit it used to
110 * look like the kernel struct old_sigaction. That practically made the
111 * structure unusable for either system call. To get around this problem
112 * the glibc always had its own definitions for the sigaction structures.
113 *
114 * The current struct sigaction uapi definition below is suitable for the
115 * sys_rt_sigaction system call only.
116 */
102struct sigaction { 117struct sigaction {
103 union { 118 union {
104 __sighandler_t _sa_handler; 119 __sighandler_t _sa_handler;
105 void (*_sa_sigaction)(int, struct siginfo *, void *); 120 void (*_sa_sigaction)(int, struct siginfo *, void *);
106 } _u; 121 } _u;
107#ifndef __s390x__ /* lovely */
108 sigset_t sa_mask;
109 unsigned long sa_flags;
110 void (*sa_restorer)(void);
111#else /* __s390x__ */
112 unsigned long sa_flags; 122 unsigned long sa_flags;
113 void (*sa_restorer)(void); 123 void (*sa_restorer)(void);
114 sigset_t sa_mask; 124 sigset_t sa_mask;
115#endif /* __s390x__ */
116}; 125};
117 126
118#define sa_handler _u._sa_handler 127#define sa_handler _u._sa_handler
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index b06a6f79c1ec..84ea6225efb4 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
82obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 82obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
83obj-$(CONFIG_UPROBES) += uprobes.o 83obj-$(CONFIG_UPROBES) += uprobes.o
84 84
85obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
86obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
87
85obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o 88obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
86obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o 89obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
87 90
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index cfe2c45c5180..eb2a5c0443cd 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
10#include <linux/kbuild.h> 10#include <linux/kbuild.h>
11#include <linux/kvm_host.h> 11#include <linux/kvm_host.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/purgatory.h>
13#include <asm/idle.h> 14#include <asm/idle.h>
14#include <asm/vdso.h> 15#include <asm/vdso.h>
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
@@ -204,5 +205,9 @@ int main(void)
204 OFFSET(__GMAP_ASCE, gmap, asce); 205 OFFSET(__GMAP_ASCE, gmap, asce);
205 OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); 206 OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
206 OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); 207 OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
208 /* kexec_sha_region */
209 OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
210 OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
211 DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
207 return 0; 212 return 0;
208} 213}
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 11e9d8b5c1b0..607c5e9fba3d 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int,
182COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); 182COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
183COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); 183COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
184COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); 184COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
185COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
new file mode 100644
index 000000000000..5a286b012043
--- /dev/null
+++ b/arch/s390/kernel/kexec_elf.c
@@ -0,0 +1,147 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ELF loader for kexec_file_load system call.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/kexec.h>
13#include <asm/setup.h>
14
15static int kexec_file_add_elf_kernel(struct kimage *image,
16 struct s390_load_data *data,
17 char *kernel, unsigned long kernel_len)
18{
19 struct kexec_buf buf;
20 const Elf_Ehdr *ehdr;
21 const Elf_Phdr *phdr;
22 int i, ret;
23
24 ehdr = (Elf_Ehdr *)kernel;
25 buf.image = image;
26
27 phdr = (void *)ehdr + ehdr->e_phoff;
28 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
29 if (phdr->p_type != PT_LOAD)
30 continue;
31
32 buf.buffer = kernel + phdr->p_offset;
33 buf.bufsz = phdr->p_filesz;
34
35 buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
36 buf.memsz = phdr->p_memsz;
37
38 if (phdr->p_paddr == 0) {
39 data->kernel_buf = buf.buffer;
40 data->memsz += STARTUP_NORMAL_OFFSET;
41
42 buf.buffer += STARTUP_NORMAL_OFFSET;
43 buf.bufsz -= STARTUP_NORMAL_OFFSET;
44
45 buf.mem += STARTUP_NORMAL_OFFSET;
46 buf.memsz -= STARTUP_NORMAL_OFFSET;
47 }
48
49 if (image->type == KEXEC_TYPE_CRASH)
50 buf.mem += crashk_res.start;
51
52 ret = kexec_add_buffer(&buf);
53 if (ret)
54 return ret;
55
56 data->memsz += buf.memsz;
57 }
58
59 return 0;
60}
61
62static void *s390_elf_load(struct kimage *image,
63 char *kernel, unsigned long kernel_len,
64 char *initrd, unsigned long initrd_len,
65 char *cmdline, unsigned long cmdline_len)
66{
67 struct s390_load_data data = {0};
68 const Elf_Ehdr *ehdr;
69 const Elf_Phdr *phdr;
70 size_t size;
71 int i, ret;
72
73 /* image->fobs->probe already checked for valid ELF magic number. */
74 ehdr = (Elf_Ehdr *)kernel;
75
76 if (ehdr->e_type != ET_EXEC ||
77 ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
78 !elf_check_arch(ehdr))
79 return ERR_PTR(-EINVAL);
80
81 if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr))
82 return ERR_PTR(-EINVAL);
83
84 size = ehdr->e_ehsize + ehdr->e_phoff;
85 size += ehdr->e_phentsize * ehdr->e_phnum;
86 if (size > kernel_len)
87 return ERR_PTR(-EINVAL);
88
89 phdr = (void *)ehdr + ehdr->e_phoff;
90 size = ALIGN(size, phdr->p_align);
91 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
92 if (phdr->p_type == PT_INTERP)
93 return ERR_PTR(-EINVAL);
94
95 if (phdr->p_offset > kernel_len)
96 return ERR_PTR(-EINVAL);
97
98 size += ALIGN(phdr->p_filesz, phdr->p_align);
99 }
100
101 if (size > kernel_len)
102 return ERR_PTR(-EINVAL);
103
104 ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len);
105 if (ret)
106 return ERR_PTR(ret);
107
108 if (!data.memsz)
109 return ERR_PTR(-EINVAL);
110
111 if (initrd) {
112 ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
113 if (ret)
114 return ERR_PTR(ret);
115 }
116
117 ret = kexec_file_add_purgatory(image, &data);
118 if (ret)
119 return ERR_PTR(ret);
120
121 return kexec_file_update_kernel(image, &data);
122}
123
124static int s390_elf_probe(const char *buf, unsigned long len)
125{
126 const Elf_Ehdr *ehdr;
127
128 if (len < sizeof(Elf_Ehdr))
129 return -ENOEXEC;
130
131 ehdr = (Elf_Ehdr *)buf;
132
133 /* Only check the ELF magic number here and do proper validity check
134 * in the loader. Any check here that fails would send the erroneous
135 * ELF file to the image loader that does not care what it gets.
136 * (Most likely) causing behavior not intended by the user.
137 */
138 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
139 return -ENOEXEC;
140
141 return 0;
142}
143
144const struct kexec_file_ops s390_kexec_elf_ops = {
145 .probe = s390_elf_probe,
146 .load = s390_elf_load,
147};
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c
new file mode 100644
index 000000000000..3800852595e8
--- /dev/null
+++ b/arch/s390/kernel/kexec_image.c
@@ -0,0 +1,76 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Image loader for kexec_file_load system call.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/kexec.h>
13#include <asm/setup.h>
14
15static int kexec_file_add_image_kernel(struct kimage *image,
16 struct s390_load_data *data,
17 char *kernel, unsigned long kernel_len)
18{
19 struct kexec_buf buf;
20 int ret;
21
22 buf.image = image;
23
24 buf.buffer = kernel + STARTUP_NORMAL_OFFSET;
25 buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET;
26
27 buf.mem = STARTUP_NORMAL_OFFSET;
28 if (image->type == KEXEC_TYPE_CRASH)
29 buf.mem += crashk_res.start;
30 buf.memsz = buf.bufsz;
31
32 ret = kexec_add_buffer(&buf);
33
34 data->kernel_buf = kernel;
35 data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET;
36
37 return ret;
38}
39
40static void *s390_image_load(struct kimage *image,
41 char *kernel, unsigned long kernel_len,
42 char *initrd, unsigned long initrd_len,
43 char *cmdline, unsigned long cmdline_len)
44{
45 struct s390_load_data data = {0};
46 int ret;
47
48 ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len);
49 if (ret)
50 return ERR_PTR(ret);
51
52 if (initrd) {
53 ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
54 if (ret)
55 return ERR_PTR(ret);
56 }
57
58 ret = kexec_file_add_purgatory(image, &data);
59 if (ret)
60 return ERR_PTR(ret);
61
62 return kexec_file_update_kernel(image, &data);
63}
64
65static int s390_image_probe(const char *buf, unsigned long len)
66{
67 /* Can't reliably tell if an image is valid. Therefore give the
68 * user whatever he wants.
69 */
70 return 0;
71}
72
73const struct kexec_file_ops s390_kexec_image_ops = {
74 .probe = s390_image_probe,
75 .load = s390_image_load,
76};
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
new file mode 100644
index 000000000000..f413f57f8d20
--- /dev/null
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -0,0 +1,245 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * s390 code for kexec_file_load system call
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/elf.h>
11#include <linux/kexec.h>
12#include <asm/setup.h>
13
14const struct kexec_file_ops * const kexec_file_loaders[] = {
15 &s390_kexec_elf_ops,
16 &s390_kexec_image_ops,
17 NULL,
18};
19
20int *kexec_file_update_kernel(struct kimage *image,
21 struct s390_load_data *data)
22{
23 unsigned long *loc;
24
25 if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE)
26 return ERR_PTR(-EINVAL);
27
28 if (image->cmdline_buf_len)
29 memcpy(data->kernel_buf + COMMAND_LINE_OFFSET,
30 image->cmdline_buf, image->cmdline_buf_len);
31
32 if (image->type == KEXEC_TYPE_CRASH) {
33 loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET);
34 *loc = crashk_res.start;
35
36 loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET);
37 *loc = crashk_res.end - crashk_res.start + 1;
38 }
39
40 if (image->initrd_buf) {
41 loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET);
42 *loc = data->initrd_load_addr;
43
44 loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET);
45 *loc = image->initrd_buf_len;
46 }
47
48 return NULL;
49}
50
51static int kexec_file_update_purgatory(struct kimage *image)
52{
53 u64 entry, type;
54 int ret;
55
56 if (image->type == KEXEC_TYPE_CRASH) {
57 entry = STARTUP_KDUMP_OFFSET;
58 type = KEXEC_TYPE_CRASH;
59 } else {
60 entry = STARTUP_NORMAL_OFFSET;
61 type = KEXEC_TYPE_DEFAULT;
62 }
63
64 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
65 sizeof(entry), false);
66 if (ret)
67 return ret;
68
69 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
70 sizeof(type), false);
71 if (ret)
72 return ret;
73
74 if (image->type == KEXEC_TYPE_CRASH) {
75 u64 crash_size;
76
77 ret = kexec_purgatory_get_set_symbol(image, "crash_start",
78 &crashk_res.start,
79 sizeof(crashk_res.start),
80 false);
81 if (ret)
82 return ret;
83
84 crash_size = crashk_res.end - crashk_res.start + 1;
85 ret = kexec_purgatory_get_set_symbol(image, "crash_size",
86 &crash_size,
87 sizeof(crash_size),
88 false);
89 }
90 return ret;
91}
92
93int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
94{
95 struct kexec_buf buf;
96 int ret;
97
98 buf.image = image;
99
100 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
101 buf.mem = data->memsz;
102 if (image->type == KEXEC_TYPE_CRASH)
103 buf.mem += crashk_res.start;
104
105 ret = kexec_load_purgatory(image, &buf);
106 if (ret)
107 return ret;
108
109 ret = kexec_file_update_purgatory(image);
110 return ret;
111}
112
113int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
114 char *initrd, unsigned long initrd_len)
115{
116 struct kexec_buf buf;
117 int ret;
118
119 buf.image = image;
120
121 buf.buffer = initrd;
122 buf.bufsz = initrd_len;
123
124 data->memsz = ALIGN(data->memsz, PAGE_SIZE);
125 buf.mem = data->memsz;
126 if (image->type == KEXEC_TYPE_CRASH)
127 buf.mem += crashk_res.start;
128 buf.memsz = buf.bufsz;
129
130 data->initrd_load_addr = buf.mem;
131 data->memsz += buf.memsz;
132
133 ret = kexec_add_buffer(&buf);
134 return ret;
135}
136
137/*
138 * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole
139 * and provide kbuf->mem by hand.
140 */
141int arch_kexec_walk_mem(struct kexec_buf *kbuf,
142 int (*func)(struct resource *, void *))
143{
144 return 1;
145}
146
147int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
148 Elf_Shdr *section,
149 const Elf_Shdr *relsec,
150 const Elf_Shdr *symtab)
151{
152 Elf_Rela *relas;
153 int i;
154
155 relas = (void *)pi->ehdr + relsec->sh_offset;
156
157 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
158 const Elf_Sym *sym; /* symbol to relocate */
159 unsigned long addr; /* final location after relocation */
160 unsigned long val; /* relocated symbol value */
161 void *loc; /* tmp location to modify */
162
163 sym = (void *)pi->ehdr + symtab->sh_offset;
164 sym += ELF64_R_SYM(relas[i].r_info);
165
166 if (sym->st_shndx == SHN_UNDEF)
167 return -ENOEXEC;
168
169 if (sym->st_shndx == SHN_COMMON)
170 return -ENOEXEC;
171
172 if (sym->st_shndx >= pi->ehdr->e_shnum &&
173 sym->st_shndx != SHN_ABS)
174 return -ENOEXEC;
175
176 loc = pi->purgatory_buf;
177 loc += section->sh_offset;
178 loc += relas[i].r_offset;
179
180 val = sym->st_value;
181 if (sym->st_shndx != SHN_ABS)
182 val += pi->sechdrs[sym->st_shndx].sh_addr;
183 val += relas[i].r_addend;
184
185 addr = section->sh_addr + relas[i].r_offset;
186
187 switch (ELF64_R_TYPE(relas[i].r_info)) {
188 case R_390_8: /* Direct 8 bit. */
189 *(u8 *)loc = val;
190 break;
191 case R_390_12: /* Direct 12 bit. */
192 *(u16 *)loc &= 0xf000;
193 *(u16 *)loc |= val & 0xfff;
194 break;
195 case R_390_16: /* Direct 16 bit. */
196 *(u16 *)loc = val;
197 break;
198 case R_390_20: /* Direct 20 bit. */
199 *(u32 *)loc &= 0xf00000ff;
200 *(u32 *)loc |= (val & 0xfff) << 16; /* DL */
201 *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
202 break;
203 case R_390_32: /* Direct 32 bit. */
204 *(u32 *)loc = val;
205 break;
206 case R_390_64: /* Direct 64 bit. */
207 *(u64 *)loc = val;
208 break;
209 case R_390_PC16: /* PC relative 16 bit. */
210 *(u16 *)loc = (val - addr);
211 break;
212 case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
213 *(u16 *)loc = (val - addr) >> 1;
214 break;
215 case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
216 *(u32 *)loc = (val - addr) >> 1;
217 break;
218 case R_390_PC32: /* PC relative 32 bit. */
219 *(u32 *)loc = (val - addr);
220 break;
221 case R_390_PC64: /* PC relative 64 bit. */
222 *(u64 *)loc = (val - addr);
223 break;
224 default:
225 break;
226 }
227 }
228 return 0;
229}
230
231int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
232 unsigned long buf_len)
233{
234 /* A kernel must be at least large enough to contain head.S. During
235 * load memory in head.S will be accessed, e.g. to register the next
236 * command line. If the next kernel were smaller the current kernel
237 * will panic at load.
238 *
239 * 0x11000 = sizeof(head.S)
240 */
241 if (buf_len < 0x11000)
242 return -ENOEXEC;
243
244 return kexec_image_probe_default(image, buf, buf_len);
245}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 5a83be955c70..0dc8ac8548ee 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr,
465 apply_alternatives(aseg, aseg + s->sh_size); 465 apply_alternatives(aseg, aseg + s->sh_size);
466 466
467 if (IS_ENABLED(CONFIG_EXPOLINE) && 467 if (IS_ENABLED(CONFIG_EXPOLINE) &&
468 (!strcmp(".nospec_call_table", secname))) 468 (!strncmp(".s390_indirect", secname, 14)))
469 nospec_revert(aseg, aseg + s->sh_size); 469 nospec_revert(aseg, aseg + s->sh_size);
470 470
471 if (IS_ENABLED(CONFIG_EXPOLINE) && 471 if (IS_ENABLED(CONFIG_EXPOLINE) &&
472 (!strcmp(".nospec_return_table", secname))) 472 (!strncmp(".s390_return", secname, 12)))
473 nospec_revert(aseg, aseg + s->sh_size); 473 nospec_revert(aseg, aseg + s->sh_size);
474 } 474 }
475 475
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index f236ce8757e8..46d49a11663f 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/cpu.h>
4#include <asm/nospec-branch.h> 5#include <asm/nospec-branch.h>
5 6
6static int __init nobp_setup_early(char *str) 7static int __init nobp_setup_early(char *str)
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index c5bc3f209652..feebb2944882 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); 123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); 124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); 125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
126CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); 126CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); 127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); 128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); 129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); 179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
182CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); 182CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); 183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); 184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); 185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
371}; 371};
372 372
373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { 373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
374 CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), 374 CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), 375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), 376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), 377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
431}; 431};
432 432
433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { 433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
434 CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), 434 CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), 435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), 436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), 437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
@@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
583 model = cpumcf_z13_pmu_event_attr; 583 model = cpumcf_z13_pmu_event_attr;
584 break; 584 break;
585 case 0x3906: 585 case 0x3906:
586 case 0x3907:
586 model = cpumcf_z14_pmu_event_attr; 587 model = cpumcf_z14_pmu_event_attr;
587 break; 588 break;
588 default: 589 default:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 70576a2f69cf..6e758bb6cd29 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/init_task.h> 31#include <linux/init_task.h>
32#include <asm/cpu_mf.h>
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/processor.h> 34#include <asm/processor.h>
34#include <asm/vtimer.h> 35#include <asm/vtimer.h>
@@ -48,6 +49,15 @@ void flush_thread(void)
48{ 49{
49} 50}
50 51
52void arch_setup_new_exec(void)
53{
54 if (S390_lowcore.current_pid != current->pid) {
55 S390_lowcore.current_pid = current->pid;
56 if (test_facility(40))
57 lpp(&S390_lowcore.lpp);
58 }
59}
60
51void arch_release_task_struct(struct task_struct *tsk) 61void arch_release_task_struct(struct task_struct *tsk)
52{ 62{
53 runtime_instr_release(tsk); 63 runtime_instr_release(tsk);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index fc3b4aa185cc..d82a9ec64ea9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -821,6 +821,7 @@ static int __init setup_hwcaps(void)
821 strcpy(elf_platform, "z13"); 821 strcpy(elf_platform, "z13");
822 break; 822 break;
823 case 0x3906: 823 case 0x3906:
824 case 0x3907:
824 strcpy(elf_platform, "z14"); 825 strcpy(elf_platform, "z14");
825 break; 826 break;
826 } 827 }
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index b38d48464368..8b210ead7956 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -388,3 +388,4 @@
388378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage 388378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
389379 common statx sys_statx compat_sys_statx 389379 common statx sys_statx compat_sys_statx
390380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi 390380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
391381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index d9d1f512f019..5007fac01bb5 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
150 return orig; 150 return orig;
151} 151}
152 152
153bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
154 struct pt_regs *regs)
155{
156 if (ctx == RP_CHECK_CHAIN_CALL)
157 return user_stack_pointer(regs) <= ret->stack;
158 else
159 return user_stack_pointer(regs) < ret->stack;
160}
161
153/* Instruction Emulation */ 162/* Instruction Emulation */
154 163
155static void adjust_psw_addr(psw_t *psw, unsigned long len) 164static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore
new file mode 100644
index 000000000000..e9e66f178a6d
--- /dev/null
+++ b/arch/s390/purgatory/.gitignore
@@ -0,0 +1,2 @@
1kexec-purgatory.c
2purgatory.ro
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
new file mode 100644
index 000000000000..e9525bc1b4a6
--- /dev/null
+++ b/arch/s390/purgatory/Makefile
@@ -0,0 +1,37 @@
1# SPDX-License-Identifier: GPL-2.0
2
3OBJECT_FILES_NON_STANDARD := y
4
5purgatory-y := head.o purgatory.o string.o sha256.o mem.o
6
7targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
8PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
9
10$(obj)/sha256.o: $(srctree)/lib/sha256.c
11 $(call if_changed_rule,cc_o_c)
12
13$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
14 $(call if_changed_rule,as_o_S)
15
16$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
17 $(call if_changed_rule,cc_o_c)
18
19LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
20LDFLAGS_purgatory.ro += -z nodefaultlib
21KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
22KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
23KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
24KBUILD_CFLAGS += -c -MD -Os -m64
25KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
26
27$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
28 $(call if_changed,ld)
29
30CMD_BIN2C = $(objtree)/scripts/basic/bin2c
31quiet_cmd_bin2c = BIN2C $@
32 cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
33
34$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
35 $(call if_changed,bin2c)
36
37obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
new file mode 100644
index 000000000000..660c96a05a9b
--- /dev/null
+++ b/arch/s390/purgatory/head.S
@@ -0,0 +1,279 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Purgatory setup code
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/linkage.h>
11#include <asm/asm-offsets.h>
12#include <asm/page.h>
13#include <asm/sigp.h>
14
15/* The purgatory is the code running between two kernels. It's main purpose
16 * is to verify that the next kernel was not corrupted after load and to
17 * start it.
18 *
19 * If the next kernel is a crash kernel there are some peculiarities to
20 * consider:
21 *
22 * First the purgatory is called twice. Once only to verify the
23 * sha digest. So if the crash kernel got corrupted the old kernel can try
24 * to trigger a stand-alone dumper. And once to actually load the crash kernel.
25 *
26 * Second the purgatory also has to swap the crash memory region with its
27 * destination at address 0. As the purgatory is part of crash memory this
28 * requires some finesse. The tactic here is that the purgatory first copies
29 * itself to the end of the destination and then swaps the rest of the
30 * memory running from there.
31 */
32
33#define bufsz purgatory_end-stack
34
35.macro MEMCPY dst,src,len
36 lgr %r0,\dst
37 lgr %r1,\len
38 lgr %r2,\src
39 lgr %r3,\len
40
4120: mvcle %r0,%r2,0
42 jo 20b
43.endm
44
45.macro MEMSWAP dst,src,buf,len
4610: cghi \len,bufsz
47 jh 11f
48 lgr %r4,\len
49 j 12f
5011: lghi %r4,bufsz
51
5212: MEMCPY \buf,\dst,%r4
53 MEMCPY \dst,\src,%r4
54 MEMCPY \src,\buf,%r4
55
56 agr \dst,%r4
57 agr \src,%r4
58 sgr \len,%r4
59
60 cghi \len,0
61 jh 10b
62.endm
63
64.macro START_NEXT_KERNEL base
65 lg %r4,kernel_entry-\base(%r13)
66 lg %r5,load_psw_mask-\base(%r13)
67 ogr %r4,%r5
68 stg %r4,0(%r0)
69
70 xgr %r0,%r0
71 diag %r0,%r0,0x308
72.endm
73
74.text
75.align PAGE_SIZE
76ENTRY(purgatory_start)
77 /* The purgatory might be called after a diag308 so better set
78 * architecture and addressing mode.
79 */
80 lhi %r1,1
81 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
82 sam64
83
84 larl %r5,gprregs
85 stmg %r6,%r15,0(%r5)
86
87 basr %r13,0
88.base_crash:
89
90 /* Setup stack */
91 larl %r15,purgatory_end
92 aghi %r15,-160
93
94 /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called
95 * directly with a flag passed in %r2 whether the purgatory shall do
96 * checksum verification only (%r2 = 0 -> verification only).
97 *
98 * Check now and preserve over C function call by storing in
99 * %r10 whith
100 * 1 -> checksum verification only
101 * 0 -> load new kernel
102 */
103 lghi %r10,0
104 lg %r11,kernel_type-.base_crash(%r13)
105 cghi %r11,1 /* KEXEC_TYPE_CRASH */
106 jne .do_checksum_verification
107 cghi %r2,0 /* checksum verification only */
108 jne .do_checksum_verification
109 lghi %r10,1
110
111.do_checksum_verification:
112 brasl %r14,verify_sha256_digest
113
114 cghi %r10,1 /* checksum verification only */
115 je .return_old_kernel
116 cghi %r2,0 /* checksum match */
117 jne .disabled_wait
118
119 /* If the next kernel is a crash kernel the purgatory has to swap
120 * the mem regions first.
121 */
122 cghi %r11,1 /* KEXEC_TYPE_CRASH */
123 je .start_crash_kernel
124
125 /* start normal kernel */
126 START_NEXT_KERNEL .base_crash
127
128.return_old_kernel:
129 lmg %r6,%r15,gprregs-.base_crash(%r13)
130 br %r14
131
132.disabled_wait:
133 lpswe disabled_wait_psw-.base_crash(%r13)
134
135.start_crash_kernel:
136 /* Location of purgatory_start in crash memory */
137 lgr %r8,%r13
138 aghi %r8,-(.base_crash-purgatory_start)
139
140 /* Destination for this code i.e. end of memory to be swapped. */
141 lg %r9,crash_size-.base_crash(%r13)
142 aghi %r9,-(purgatory_end-purgatory_start)
143
144 /* Destination in crash memory, i.e. same as r9 but in crash memory. */
145 lg %r10,crash_start-.base_crash(%r13)
146 agr %r10,%r9
147
148 /* Buffer location (in crash memory) and size. As the purgatory is
149 * behind the point of no return it can re-use the stack as buffer.
150 */
151 lghi %r11,bufsz
152 larl %r12,stack
153
154 MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */
155 MEMCPY %r9,%r8,%r11 /* self -> dst */
156
157 /* Jump to new location. */
158 lgr %r7,%r9
159 aghi %r7,.jump_to_dst-purgatory_start
160 br %r7
161
162.jump_to_dst:
163 basr %r13,0
164.base_dst:
165
166 /* clear buffer */
167 MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */
168
169 /* Load new buffer location after jump */
170 larl %r7,stack
171 aghi %r10,stack-purgatory_start
172 MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */
173
174 /* Now the code is set up to run from its designated location. Start
175 * swapping the rest of crash memory now.
176 *
177 * The registers will be used as follow:
178 *
179 * %r0-%r4 reserved for macros defined above
180 * %r5-%r6 tmp registers
181 * %r7 pointer to current struct sha region
182 * %r8 index to iterate over all sha regions
183 * %r9 pointer in crash memory
184 * %r10 pointer in old kernel
185 * %r11 total size (still) to be moved
186 * %r12 pointer to buffer
187 */
188 lgr %r12,%r7
189 lgr %r11,%r9
190 lghi %r10,0
191 lg %r9,crash_start-.base_dst(%r13)
192 lghi %r8,16 /* KEXEC_SEGMENTS_MAX */
193 larl %r7,purgatory_sha_regions
194
195 j .loop_first
196
197 /* Loop over all purgatory_sha_regions. */
198.loop_next:
199 aghi %r8,-1
200 cghi %r8,0
201 je .loop_out
202
203 aghi %r7,__KEXEC_SHA_REGION_SIZE
204
205.loop_first:
206 lg %r5,__KEXEC_SHA_REGION_START(%r7)
207 cghi %r5,0
208 je .loop_next
209
210 /* Copy [end last sha region, start current sha region) */
211 /* Note: kexec_sha_region->start points in crash memory */
212 sgr %r5,%r9
213 MEMCPY %r9,%r10,%r5
214
215 agr %r9,%r5
216 agr %r10,%r5
217 sgr %r11,%r5
218
219 /* Swap sha region */
220 lg %r6,__KEXEC_SHA_REGION_LEN(%r7)
221 MEMSWAP %r9,%r10,%r12,%r6
222 sg %r11,__KEXEC_SHA_REGION_LEN(%r7)
223 j .loop_next
224
225.loop_out:
226 /* Copy rest of crash memory */
227 MEMCPY %r9,%r10,%r11
228
229 /* start crash kernel */
230 START_NEXT_KERNEL .base_dst
231
232
233load_psw_mask:
234 .long 0x00080000,0x80000000
235
236 .align 8
237disabled_wait_psw:
238 .quad 0x0002000180000000
239 .quad 0x0000000000000000 + .do_checksum_verification
240
241gprregs:
242 .rept 10
243 .quad 0
244 .endr
245
246purgatory_sha256_digest:
247 .global purgatory_sha256_digest
248 .rept 32 /* SHA256_DIGEST_SIZE */
249 .byte 0
250 .endr
251
252purgatory_sha_regions:
253 .global purgatory_sha_regions
254 .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */
255 .byte 0
256 .endr
257
258kernel_entry:
259 .global kernel_entry
260 .quad 0
261
262kernel_type:
263 .global kernel_type
264 .quad 0
265
266crash_start:
267 .global crash_start
268 .quad 0
269
270crash_size:
271 .global crash_size
272 .quad 0
273
274 .align PAGE_SIZE
275stack:
276 /* The buffer to move this code must be as big as the code. */
277 .skip stack-purgatory_start
278 .align PAGE_SIZE
279purgatory_end:
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
new file mode 100644
index 000000000000..4e2beb3c29b7
--- /dev/null
+++ b/arch/s390/purgatory/purgatory.c
@@ -0,0 +1,42 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Purgatory code running between two kernels.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8 */
9
10#include <linux/kexec.h>
11#include <linux/sha256.h>
12#include <linux/string.h>
13#include <asm/purgatory.h>
14
15struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
16u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
17
18u64 kernel_entry;
19u64 kernel_type;
20
21u64 crash_start;
22u64 crash_size;
23
24int verify_sha256_digest(void)
25{
26 struct kexec_sha_region *ptr, *end;
27 u8 digest[SHA256_DIGEST_SIZE];
28 struct sha256_state sctx;
29
30 sha256_init(&sctx);
31 end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
32
33 for (ptr = purgatory_sha_regions; ptr < end; ptr++)
34 sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
35
36 sha256_final(&sctx, digest);
37
38 if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
39 return 1;
40
41 return 0;
42}
diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
index 722951908b0a..4f6676fe4bcc 100644
--- a/arch/sparc/include/uapi/asm/oradax.h
+++ b/arch/sparc/include/uapi/asm/oradax.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * This program is free software: you can redistribute it and/or modify 4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 3 of the License, or 6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 1a0fa10cb6b7..32bae68e34c1 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
403 if (err) { 403 if (err) {
404 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", 404 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
405 dev_name(&vdev->dev), err); 405 dev_name(&vdev->dev), err);
406 kfree(vdev); 406 put_device(&vdev->dev);
407 return NULL; 407 return NULL;
408 } 408 }
409 if (vdev->dp) 409 if (vdev->dp)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 00fcf81f2c56..c07f492b871a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -52,6 +52,7 @@ config X86
52 select ARCH_HAS_DEVMEM_IS_ALLOWED 52 select ARCH_HAS_DEVMEM_IS_ALLOWED
53 select ARCH_HAS_ELF_RANDOMIZE 53 select ARCH_HAS_ELF_RANDOMIZE
54 select ARCH_HAS_FAST_MULTIPLIER 54 select ARCH_HAS_FAST_MULTIPLIER
55 select ARCH_HAS_FILTER_PGPROT
55 select ARCH_HAS_FORTIFY_SOURCE 56 select ARCH_HAS_FORTIFY_SOURCE
56 select ARCH_HAS_GCOV_PROFILE_ALL 57 select ARCH_HAS_GCOV_PROFILE_ALL
57 select ARCH_HAS_KCOV if X86_64 58 select ARCH_HAS_KCOV if X86_64
@@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX
273config ARCH_HAS_CACHE_LINE_SIZE 274config ARCH_HAS_CACHE_LINE_SIZE
274 def_bool y 275 def_bool y
275 276
277config ARCH_HAS_FILTER_PGPROT
278 def_bool y
279
276config HAVE_SETUP_PER_CPU_AREA 280config HAVE_SETUP_PER_CPU_AREA
277 def_bool y 281 def_bool y
278 282
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 9af927e59d49..9de7f1e1dede 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
84 pushq %rdx /* pt_regs->dx */ 84 pushq %rdx /* pt_regs->dx */
85 pushq %rcx /* pt_regs->cx */ 85 pushq %rcx /* pt_regs->cx */
86 pushq $-ENOSYS /* pt_regs->ax */ 86 pushq $-ENOSYS /* pt_regs->ax */
87 pushq $0 /* pt_regs->r8 = 0 */ 87 pushq %r8 /* pt_regs->r8 */
88 xorl %r8d, %r8d /* nospec r8 */ 88 xorl %r8d, %r8d /* nospec r8 */
89 pushq $0 /* pt_regs->r9 = 0 */ 89 pushq %r9 /* pt_regs->r9 */
90 xorl %r9d, %r9d /* nospec r9 */ 90 xorl %r9d, %r9d /* nospec r9 */
91 pushq $0 /* pt_regs->r10 = 0 */ 91 pushq %r10 /* pt_regs->r10 */
92 xorl %r10d, %r10d /* nospec r10 */ 92 xorl %r10d, %r10d /* nospec r10 */
93 pushq $0 /* pt_regs->r11 = 0 */ 93 pushq %r11 /* pt_regs->r11 */
94 xorl %r11d, %r11d /* nospec r11 */ 94 xorl %r11d, %r11d /* nospec r11 */
95 pushq %rbx /* pt_regs->rbx */ 95 pushq %rbx /* pt_regs->rbx */
96 xorl %ebx, %ebx /* nospec rbx */ 96 xorl %ebx, %ebx /* nospec rbx */
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 607bf565a90c..707b2a96e516 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu)
3339 3339
3340 cpuc->lbr_sel = NULL; 3340 cpuc->lbr_sel = NULL;
3341 3341
3342 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3342 if (x86_pmu.version > 1)
3343 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3343 3344
3344 if (!cpuc->shared_regs) 3345 if (!cpuc->shared_regs)
3345 return; 3346 return;
@@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = {
3502 .cpu_dying = intel_pmu_cpu_dying, 3503 .cpu_dying = intel_pmu_cpu_dying,
3503}; 3504};
3504 3505
3506static struct attribute *intel_pmu_attrs[];
3507
3505static __initconst const struct x86_pmu intel_pmu = { 3508static __initconst const struct x86_pmu intel_pmu = {
3506 .name = "Intel", 3509 .name = "Intel",
3507 .handle_irq = intel_pmu_handle_irq, 3510 .handle_irq = intel_pmu_handle_irq,
@@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3533 .format_attrs = intel_arch3_formats_attr, 3536 .format_attrs = intel_arch3_formats_attr,
3534 .events_sysfs_show = intel_event_sysfs_show, 3537 .events_sysfs_show = intel_event_sysfs_show,
3535 3538
3539 .attrs = intel_pmu_attrs,
3540
3536 .cpu_prepare = intel_pmu_cpu_prepare, 3541 .cpu_prepare = intel_pmu_cpu_prepare,
3537 .cpu_starting = intel_pmu_cpu_starting, 3542 .cpu_starting = intel_pmu_cpu_starting,
3538 .cpu_dying = intel_pmu_cpu_dying, 3543 .cpu_dying = intel_pmu_cpu_dying,
@@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void)
3911 3916
3912 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 3917 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3913 3918
3914
3915 x86_pmu.attrs = intel_pmu_attrs;
3916 /* 3919 /*
3917 * Quirk: v2 perfmon does not report fixed-purpose events, so 3920 * Quirk: v2 perfmon does not report fixed-purpose events, so
3918 * assume at least 3 events, when not running in a hypervisor: 3921 * assume at least 3 events, when not running in a hypervisor:
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c98b943e58b4..77076a102e34 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = {
3028 .format_group = &hswep_uncore_cbox_format_group, 3028 .format_group = &hswep_uncore_cbox_format_group,
3029}; 3029};
3030 3030
3031static struct intel_uncore_type bdx_uncore_sbox = {
3032 .name = "sbox",
3033 .num_counters = 4,
3034 .num_boxes = 4,
3035 .perf_ctr_bits = 48,
3036 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3037 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3038 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3039 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3040 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3041 .ops = &hswep_uncore_sbox_msr_ops,
3042 .format_group = &hswep_uncore_sbox_format_group,
3043};
3044
3045#define BDX_MSR_UNCORE_SBOX 3
3046
3031static struct intel_uncore_type *bdx_msr_uncores[] = { 3047static struct intel_uncore_type *bdx_msr_uncores[] = {
3032 &bdx_uncore_ubox, 3048 &bdx_uncore_ubox,
3033 &bdx_uncore_cbox, 3049 &bdx_uncore_cbox,
3034 &hswep_uncore_pcu, 3050 &hswep_uncore_pcu,
3051 &bdx_uncore_sbox,
3035 NULL, 3052 NULL,
3036}; 3053};
3037 3054
@@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
3043 3060
3044void bdx_uncore_cpu_init(void) 3061void bdx_uncore_cpu_init(void)
3045{ 3062{
3063 int pkg = topology_phys_to_logical_pkg(0);
3064
3046 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 3065 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3047 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 3066 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3048 uncore_msr_uncores = bdx_msr_uncores; 3067 uncore_msr_uncores = bdx_msr_uncores;
3049 3068
3069 /* BDX-DE doesn't have SBOX */
3070 if (boot_cpu_data.x86_model == 86) {
3071 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3072 /* Detect systems with no SBOXes */
3073 } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3074 struct pci_dev *pdev;
3075 u32 capid4;
3076
3077 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3078 pci_read_config_dword(pdev, 0x94, &capid4);
3079 if (((capid4 >> 6) & 0x3) == 0)
3080 bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3081 }
3050 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; 3082 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3051} 3083}
3052 3084
@@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
3264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3265 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), 3297 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3266 }, 3298 },
3299 { /* PCU.3 (for Capability registers) */
3300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3301 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3302 HSWEP_PCI_PCU_3),
3303 },
3267 { /* end: all zeroes */ } 3304 { /* end: all zeroes */ }
3268}; 3305};
3269 3306
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 386a6900e206..219faaec51df 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -136,7 +136,6 @@
136#endif 136#endif
137 137
138#ifndef __ASSEMBLY__ 138#ifndef __ASSEMBLY__
139#ifndef __BPF__
140/* 139/*
141 * This output constraint should be used for any inline asm which has a "call" 140 * This output constraint should be used for any inline asm which has a "call"
142 * instruction. Otherwise the asm may be inserted before the frame pointer 141 * instruction. Otherwise the asm may be inserted before the frame pointer
@@ -146,6 +145,5 @@
146register unsigned long current_stack_pointer asm(_ASM_SP); 145register unsigned long current_stack_pointer asm(_ASM_SP);
147#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) 146#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
148#endif 147#endif
149#endif
150 148
151#endif /* _ASM_X86_ASM_H */ 149#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index d554c11e01ff..578793e97431 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -320,6 +320,7 @@
320#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 320#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
321#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 321#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
322#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 322#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
323#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
323 324
324/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 325/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
325#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ 326#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 09ad88572746..cc8f8fcf9b4a 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs);
46#endif /* CONFIG_FUNCTION_TRACER */ 46#endif /* CONFIG_FUNCTION_TRACER */
47 47
48 48
49#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) 49#ifndef __ASSEMBLY__
50
51#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
52static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
53{
54 /*
55 * Compare the symbol name with the system call name. Skip the
56 * "__x64_sys", "__ia32_sys" or simple "sys" prefix.
57 */
58 return !strcmp(sym + 3, name + 3) ||
59 (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
60 (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
61}
62
63#ifndef COMPILE_OFFSETS
50 64
51#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) 65#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
52#include <asm/compat.h> 66#include <asm/compat.h>
@@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
67 return false; 81 return false;
68} 82}
69#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 83#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
70#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ 84#endif /* !COMPILE_OFFSETS */
85#endif /* !__ASSEMBLY__ */
71 86
72#endif /* _ASM_X86_FTRACE_H */ 87#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 404c5fdff859..548d90bbf919 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -34,11 +34,6 @@
34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA) 34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
35 */ 35 */
36#define FIRST_EXTERNAL_VECTOR 0x20 36#define FIRST_EXTERNAL_VECTOR 0x20
37/*
38 * We start allocating at 0x21 to spread out vectors evenly between
39 * priority levels. (0x80 is the syscall vector)
40 */
41#define VECTOR_OFFSET_START 1
42 37
43/* 38/*
44 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for 39 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
@@ -119,8 +114,6 @@
119#define FIRST_SYSTEM_VECTOR NR_VECTORS 114#define FIRST_SYSTEM_VECTOR NR_VECTORS
120#endif 115#endif
121 116
122#define FPU_IRQ 13
123
124/* 117/*
125 * Size the maximum number of interrupts. 118 * Size the maximum number of interrupts.
126 * 119 *
diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h
index b885a961a150..a34897aef2c2 100644
--- a/arch/x86/include/asm/jailhouse_para.h
+++ b/arch/x86/include/asm/jailhouse_para.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3/* 3/*
4 * Jailhouse paravirt detection 4 * Jailhouse paravirt detection
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 949c977bc4c9..c25775fad4ed 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1013,6 +1013,7 @@ struct kvm_x86_ops {
1013 1013
1014 bool (*has_wbinvd_exit)(void); 1014 bool (*has_wbinvd_exit)(void);
1015 1015
1016 u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1016 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 1017 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1017 1018
1018 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 1019 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5f49b4ff0c24..f1633de5a675 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
601 601
602#define canon_pgprot(p) __pgprot(massage_pgprot(p)) 602#define canon_pgprot(p) __pgprot(massage_pgprot(p))
603 603
604static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
605{
606 return canon_pgprot(prot);
607}
608
604static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 609static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
605 enum page_cache_mode pcm, 610 enum page_cache_mode pcm,
606 enum page_cache_mode new_pcm) 611 enum page_cache_mode new_pcm)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index d5c21a382475..adb47552e6bb 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d;
105#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 105#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
106#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 106#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
107 107
108#define __VMALLOC_BASE_L4 0xffffc90000000000 108#define __VMALLOC_BASE_L4 0xffffc90000000000UL
109#define __VMALLOC_BASE_L5 0xffa0000000000000 109#define __VMALLOC_BASE_L5 0xffa0000000000000UL
110 110
111#define VMALLOC_SIZE_TB_L4 32UL 111#define VMALLOC_SIZE_TB_L4 32UL
112#define VMALLOC_SIZE_TB_L5 12800UL 112#define VMALLOC_SIZE_TB_L5 12800UL
113 113
114#define __VMEMMAP_BASE_L4 0xffffea0000000000 114#define __VMEMMAP_BASE_L4 0xffffea0000000000UL
115#define __VMEMMAP_BASE_L5 0xffd4000000000000 115#define __VMEMMAP_BASE_L5 0xffd4000000000000UL
116 116
117#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 117#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
118# define VMALLOC_START vmalloc_base 118# define VMALLOC_START vmalloc_base
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4fa4206029e3..21a114914ba4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
749extern void enable_sep_cpu(void); 749extern void enable_sep_cpu(void);
750extern int sysenter_setup(void); 750extern int sysenter_setup(void);
751 751
752extern void early_trap_init(void);
753void early_trap_pf_init(void); 752void early_trap_pf_init(void);
754 753
755/* Defined in head.S */ 754/* Defined in head.S */
756extern struct desc_ptr early_gdt_descr; 755extern struct desc_ptr early_gdt_descr;
757 756
758extern void cpu_set_gdt(int);
759extern void switch_to_new_gdt(int); 757extern void switch_to_new_gdt(int);
760extern void load_direct_gdt(int); 758extern void load_direct_gdt(int);
761extern void load_fixmap_gdt(int); 759extern void load_fixmap_gdt(int);
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X64_MSGBUF_H
3#define __ASM_X64_MSGBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/msgbuf.h> 6#include <asm-generic/msgbuf.h>
7#else
8/*
9 * The msqid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19 __kernel_time_t msg_rtime; /* last msgrcv time */
20 __kernel_time_t msg_ctime; /* last change time */
21 __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
22 __kernel_ulong_t msg_qnum; /* number of messages in queue */
23 __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
24 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
25 __kernel_pid_t msg_lrpid; /* last receive pid */
26 __kernel_ulong_t __unused4;
27 __kernel_ulong_t __unused5;
28};
29
30#endif
31
32#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X86_SHMBUF_H
3#define __ASM_X86_SHMBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/shmbuf.h> 6#include <asm-generic/shmbuf.h>
7#else
8/*
9 * The shmid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct shmid64_ds {
17 struct ipc64_perm shm_perm; /* operation perms */
18 size_t shm_segsz; /* size of segment (bytes) */
19 __kernel_time_t shm_atime; /* last attach time */
20 __kernel_time_t shm_dtime; /* last detach time */
21 __kernel_time_t shm_ctime; /* last change time */
22 __kernel_pid_t shm_cpid; /* pid of creator */
23 __kernel_pid_t shm_lpid; /* pid of last operator */
24 __kernel_ulong_t shm_nattch; /* no. of current attaches */
25 __kernel_ulong_t __unused4;
26 __kernel_ulong_t __unused5;
27};
28
29struct shminfo64 {
30 __kernel_ulong_t shmmax;
31 __kernel_ulong_t shmmin;
32 __kernel_ulong_t shmmni;
33 __kernel_ulong_t shmseg;
34 __kernel_ulong_t shmall;
35 __kernel_ulong_t __unused1;
36 __kernel_ulong_t __unused2;
37 __kernel_ulong_t __unused3;
38 __kernel_ulong_t __unused4;
39};
40
41#endif
42
43#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index dde444f932c1..3b20607d581b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
215 apic_id = processor->local_apic_id; 215 apic_id = processor->local_apic_id;
216 enabled = processor->lapic_flags & ACPI_MADT_ENABLED; 216 enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
217 217
218 /* Ignore invalid ID */
219 if (apic_id == 0xffffffff)
220 return 0;
221
218 /* 222 /*
219 * We need to register disabled CPU as well to permit 223 * We need to register disabled CPU as well to permit
220 * counting disabled CPUs. This allows us to size 224 * counting disabled CPUs. This allows us to size
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b9693b80fc21..60d1897041da 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = {
835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
838 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
839 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
840 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
838 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 841 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
839 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 842 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
840 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 843 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 10c4fc2c91f8..77e201301528 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -564,14 +564,12 @@ static int __reload_late(void *info)
564 apply_microcode_local(&err); 564 apply_microcode_local(&err);
565 spin_unlock(&update_lock); 565 spin_unlock(&update_lock);
566 566
567 /* siblings return UCODE_OK because their engine got updated already */
567 if (err > UCODE_NFOUND) { 568 if (err > UCODE_NFOUND) {
568 pr_warn("Error reloading microcode on CPU %d\n", cpu); 569 pr_warn("Error reloading microcode on CPU %d\n", cpu);
569 return -1; 570 ret = -1;
570 /* siblings return UCODE_OK because their engine got updated already */
571 } else if (err == UCODE_UPDATED || err == UCODE_OK) { 571 } else if (err == UCODE_UPDATED || err == UCODE_OK) {
572 ret = 1; 572 ret = 1;
573 } else {
574 return ret;
575 } 573 }
576 574
577 /* 575 /*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 32b8e5724f96..1c2cfa0644aa 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -485,7 +485,6 @@ static void show_saved_mc(void)
485 */ 485 */
486static void save_mc_for_early(u8 *mc, unsigned int size) 486static void save_mc_for_early(u8 *mc, unsigned int size)
487{ 487{
488#ifdef CONFIG_HOTPLUG_CPU
489 /* Synchronization during CPU hotplug. */ 488 /* Synchronization during CPU hotplug. */
490 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 489 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
491 490
@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
495 show_saved_mc(); 494 show_saved_mc();
496 495
497 mutex_unlock(&x86_cpu_microcode_mutex); 496 mutex_unlock(&x86_cpu_microcode_mutex);
498#endif
499} 497}
500 498
501static bool load_builtin_intel_microcode(struct cpio_data *cp) 499static bool load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index fa183a131edc..a15fe0e92cf9 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Jailhouse paravirt_ops implementation 3 * Jailhouse paravirt_ops implementation
4 * 4 *
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 3182908b7e6c..7326078eaa7a 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
398 * little bit simple 398 * little bit simple
399 */ 399 */
400 efi_map_sz = efi_get_runtime_map_size(); 400 efi_map_sz = efi_get_runtime_map_size();
401 efi_map_sz = ALIGN(efi_map_sz, 16);
402 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + 401 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
403 MAX_ELFCOREHDR_STR_LEN; 402 MAX_ELFCOREHDR_STR_LEN;
404 params_cmdline_sz = ALIGN(params_cmdline_sz, 16); 403 params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
405 kbuf.bufsz = params_cmdline_sz + efi_map_sz + 404 kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
406 sizeof(struct setup_data) + 405 sizeof(struct setup_data) +
407 sizeof(struct efi_setup_data); 406 sizeof(struct efi_setup_data);
408 407
@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
410 if (!params) 409 if (!params)
411 return ERR_PTR(-ENOMEM); 410 return ERR_PTR(-ENOMEM);
412 efi_map_offset = params_cmdline_sz; 411 efi_map_offset = params_cmdline_sz;
413 efi_setup_data_offset = efi_map_offset + efi_map_sz; 412 efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
414 413
415 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ 414 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */
416 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; 415 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index d41d896481b8..c9b14020f4dd 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -166,7 +166,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
166 */ 166 */
167 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); 167 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
168 /* Filter out unsuppored __PAGE_KERNEL* bits: */ 168 /* Filter out unsuppored __PAGE_KERNEL* bits: */
169 pgprot_val(pte_prot) |= __supported_pte_mask; 169 pgprot_val(pte_prot) &= __supported_pte_mask;
170 pte = pfn_pte(pfn, pte_prot); 170 pte = pfn_pte(pfn, pte_prot);
171 set_pte_at(mm, va, ptep, pte); 171 set_pte_at(mm, va, ptep, pte);
172 pte_unmap_unlock(ptep, ptl); 172 pte_unmap_unlock(ptep, ptl);
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
deleted file mode 100644
index ac7ea3a8242f..000000000000
--- a/arch/x86/kernel/pci-nommu.c
+++ /dev/null
@@ -1,90 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Fallback functions when the main IOMMU code is not compiled in. This
3 code is roughly equivalent to i386. */
4#include <linux/dma-direct.h>
5#include <linux/scatterlist.h>
6#include <linux/string.h>
7#include <linux/gfp.h>
8#include <linux/pci.h>
9#include <linux/mm.h>
10
11#include <asm/processor.h>
12#include <asm/iommu.h>
13#include <asm/dma.h>
14
15#define NOMMU_MAPPING_ERROR 0
16
17static int
18check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
19{
20 if (hwdev && !dma_capable(hwdev, bus, size)) {
21 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
22 printk(KERN_ERR
23 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
24 name, (long long)bus, size,
25 (long long)*hwdev->dma_mask);
26 return 0;
27 }
28 return 1;
29}
30
31static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
32 unsigned long offset, size_t size,
33 enum dma_data_direction dir,
34 unsigned long attrs)
35{
36 dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
37 WARN_ON(size == 0);
38 if (!check_addr("map_single", dev, bus, size))
39 return NOMMU_MAPPING_ERROR;
40 return bus;
41}
42
43/* Map a set of buffers described by scatterlist in streaming
44 * mode for DMA. This is the scatter-gather version of the
45 * above pci_map_single interface. Here the scatter gather list
46 * elements are each tagged with the appropriate dma address
47 * and length. They are obtained via sg_dma_{address,length}(SG).
48 *
49 * NOTE: An implementation may be able to use a smaller number of
50 * DMA address/length pairs than there are SG table elements.
51 * (for example via virtual mapping capabilities)
52 * The routine returns the number of addr/length pairs actually
53 * used, at most nents.
54 *
55 * Device ownership issues as mentioned above for pci_map_single are
56 * the same here.
57 */
58static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
59 int nents, enum dma_data_direction dir,
60 unsigned long attrs)
61{
62 struct scatterlist *s;
63 int i;
64
65 WARN_ON(nents == 0 || sg[0].length == 0);
66
67 for_each_sg(sg, s, nents, i) {
68 BUG_ON(!sg_page(s));
69 s->dma_address = sg_phys(s);
70 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
71 return 0;
72 s->dma_length = s->length;
73 }
74 return nents;
75}
76
77static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
78{
79 return dma_addr == NOMMU_MAPPING_ERROR;
80}
81
82const struct dma_map_ops nommu_dma_ops = {
83 .alloc = dma_generic_alloc_coherent,
84 .free = dma_generic_free_coherent,
85 .map_sg = nommu_map_sg,
86 .map_page = nommu_map_page,
87 .is_phys = 1,
88 .mapping_error = nommu_mapping_error,
89 .dma_supported = x86_dma_supported,
90};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6285697b6e56..5c623dfe39d1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -50,6 +50,7 @@
50#include <linux/init_ohci1394_dma.h> 50#include <linux/init_ohci1394_dma.h>
51#include <linux/kvm_para.h> 51#include <linux/kvm_para.h>
52#include <linux/dma-contiguous.h> 52#include <linux/dma-contiguous.h>
53#include <xen/xen.h>
53 54
54#include <linux/errno.h> 55#include <linux/errno.h>
55#include <linux/kernel.h> 56#include <linux/kernel.h>
@@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void)
534 high = true; 535 high = true;
535 } 536 }
536 537
538 if (xen_pv_domain()) {
539 pr_info("Ignoring crashkernel for a Xen PV domain\n");
540 return;
541 }
542
537 /* 0 means: find the address automatically */ 543 /* 0 means: find the address automatically */
538 if (crash_base <= 0) { 544 if (crash_base <= 0) {
539 /* 545 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ff99e2b6fc54..0f1cbb042f49 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,8 @@
77#include <asm/i8259.h> 77#include <asm/i8259.h>
78#include <asm/misc.h> 78#include <asm/misc.h>
79#include <asm/qspinlock.h> 79#include <asm/qspinlock.h>
80#include <asm/intel-family.h>
81#include <asm/cpu_device_id.h>
80 82
81/* Number of siblings per CPU package */ 83/* Number of siblings per CPU package */
82int smp_num_siblings = 1; 84int smp_num_siblings = 1;
@@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
390 return false; 392 return false;
391} 393}
392 394
395/*
396 * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
397 *
398 * These are Intel CPUs that enumerate an LLC that is shared by
399 * multiple NUMA nodes. The LLC on these systems is shared for
400 * off-package data access but private to the NUMA node (half
401 * of the package) for on-package access.
402 *
403 * CPUID (the source of the information about the LLC) can only
404 * enumerate the cache as being shared *or* unshared, but not
405 * this particular configuration. The CPU in this case enumerates
406 * the cache to be shared across the entire package (spanning both
407 * NUMA nodes).
408 */
409
410static const struct x86_cpu_id snc_cpu[] = {
411 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
412 {}
413};
414
393static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 415static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
394{ 416{
395 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 417 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
396 418
397 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && 419 /* Do not match if we do not have a valid APICID for cpu: */
398 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) 420 if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
399 return topology_sane(c, o, "llc"); 421 return false;
400 422
401 return false; 423 /* Do not match if LLC id does not match: */
424 if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
425 return false;
426
427 /*
428 * Allow the SNC topology without warning. Return of false
429 * means 'c' does not share the LLC of 'o'. This will be
430 * reflected to userspace.
431 */
432 if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
433 return false;
434
435 return topology_sane(c, o, "llc");
402} 436}
403 437
404/* 438/*
@@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = {
456 490
457/* 491/*
458 * Set if a package/die has multiple NUMA nodes inside. 492 * Set if a package/die has multiple NUMA nodes inside.
459 * AMD Magny-Cours and Intel Cluster-on-Die have this. 493 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
494 * Sub-NUMA Clustering have this.
460 */ 495 */
461static bool x86_has_numa_in_package; 496static bool x86_has_numa_in_package;
462 497
@@ -1536,6 +1571,8 @@ static inline void mwait_play_dead(void)
1536 void *mwait_ptr; 1571 void *mwait_ptr;
1537 int i; 1572 int i;
1538 1573
1574 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1575 return;
1539 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1576 if (!this_cpu_has(X86_FEATURE_MWAIT))
1540 return; 1577 return;
1541 if (!this_cpu_has(X86_FEATURE_CLFLUSH)) 1578 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ef32297ff17e..91e6da48cbb6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
317 hpet2 -= hpet1; 317 hpet2 -= hpet1;
318 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 318 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
319 do_div(tmp, 1000000); 319 do_div(tmp, 1000000);
320 do_div(deltatsc, tmp); 320 deltatsc = div64_u64(deltatsc, tmp);
321 321
322 return (unsigned long) deltatsc; 322 return (unsigned long) deltatsc;
323} 323}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b58787daf9f8..1fc05e428aba 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1423 seg->base = 0; 1423 seg->base = 0;
1424} 1424}
1425 1425
1426static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1427{
1428 struct vcpu_svm *svm = to_svm(vcpu);
1429
1430 if (is_guest_mode(vcpu))
1431 return svm->nested.hsave->control.tsc_offset;
1432
1433 return vcpu->arch.tsc_offset;
1434}
1435
1426static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1436static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1427{ 1437{
1428 struct vcpu_svm *svm = to_svm(vcpu); 1438 struct vcpu_svm *svm = to_svm(vcpu);
1429 u64 g_tsc_offset = 0; 1439 u64 g_tsc_offset = 0;
1430 1440
1431 if (is_guest_mode(vcpu)) { 1441 if (is_guest_mode(vcpu)) {
1442 /* Write L1's TSC offset. */
1432 g_tsc_offset = svm->vmcb->control.tsc_offset - 1443 g_tsc_offset = svm->vmcb->control.tsc_offset -
1433 svm->nested.hsave->control.tsc_offset; 1444 svm->nested.hsave->control.tsc_offset;
1434 svm->nested.hsave->control.tsc_offset = offset; 1445 svm->nested.hsave->control.tsc_offset = offset;
@@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
3322 /* Restore the original control entries */ 3333 /* Restore the original control entries */
3323 copy_vmcb_control_area(vmcb, hsave); 3334 copy_vmcb_control_area(vmcb, hsave);
3324 3335
3336 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
3325 kvm_clear_exception_queue(&svm->vcpu); 3337 kvm_clear_exception_queue(&svm->vcpu);
3326 kvm_clear_interrupt_queue(&svm->vcpu); 3338 kvm_clear_interrupt_queue(&svm->vcpu);
3327 3339
@@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3482 /* We don't want to see VMMCALLs from a nested guest */ 3494 /* We don't want to see VMMCALLs from a nested guest */
3483 clr_intercept(svm, INTERCEPT_VMMCALL); 3495 clr_intercept(svm, INTERCEPT_VMMCALL);
3484 3496
3497 svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
3498 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
3499
3485 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; 3500 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3486 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; 3501 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3487 svm->vmcb->control.int_state = nested_vmcb->control.int_state; 3502 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3488 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
3489 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; 3503 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3490 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; 3504 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3491 3505
@@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4035 struct vcpu_svm *svm = to_svm(vcpu); 4049 struct vcpu_svm *svm = to_svm(vcpu);
4036 4050
4037 switch (msr_info->index) { 4051 switch (msr_info->index) {
4038 case MSR_IA32_TSC: {
4039 msr_info->data = svm->vmcb->control.tsc_offset +
4040 kvm_scale_tsc(vcpu, rdtsc());
4041
4042 break;
4043 }
4044 case MSR_STAR: 4052 case MSR_STAR:
4045 msr_info->data = svm->vmcb->save.star; 4053 msr_info->data = svm->vmcb->save.star;
4046 break; 4054 break;
@@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4193 svm->vmcb->save.g_pat = data; 4201 svm->vmcb->save.g_pat = data;
4194 mark_dirty(svm->vmcb, VMCB_NPT); 4202 mark_dirty(svm->vmcb, VMCB_NPT);
4195 break; 4203 break;
4196 case MSR_IA32_TSC:
4197 kvm_write_tsc(vcpu, msr);
4198 break;
4199 case MSR_IA32_SPEC_CTRL: 4204 case MSR_IA32_SPEC_CTRL:
4200 if (!msr->host_initiated && 4205 if (!msr->host_initiated &&
4201 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) 4206 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
@@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5265 } 5270 }
5266 5271
5267 if (!ret && svm) { 5272 if (!ret && svm) {
5268 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, 5273 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
5269 host_irq, e->gsi, 5274 e->gsi, vcpu_info.vector,
5270 vcpu_info.vector,
5271 vcpu_info.pi_desc_addr, set); 5275 vcpu_info.pi_desc_addr, set);
5272 } 5276 }
5273 5277
@@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7102 7106
7103 .has_wbinvd_exit = svm_has_wbinvd_exit, 7107 .has_wbinvd_exit = svm_has_wbinvd_exit,
7104 7108
7109 .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7105 .write_tsc_offset = svm_write_tsc_offset, 7110 .write_tsc_offset = svm_write_tsc_offset,
7106 7111
7107 .set_tdp_cr3 = set_tdp_cr3, 7112 .set_tdp_cr3 = set_tdp_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aafcc9881e88..c7668806163f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
2880 vmx_update_msr_bitmap(&vmx->vcpu); 2880 vmx_update_msr_bitmap(&vmx->vcpu);
2881} 2881}
2882 2882
2883/* 2883static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
2884 * reads and returns guest's timestamp counter "register"
2885 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2886 * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2887 */
2888static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2889{ 2884{
2890 u64 host_tsc, tsc_offset; 2885 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2891 2886
2892 host_tsc = rdtsc(); 2887 if (is_guest_mode(vcpu) &&
2893 tsc_offset = vmcs_read64(TSC_OFFSET); 2888 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
2894 return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; 2889 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
2890
2891 return vcpu->arch.tsc_offset;
2895} 2892}
2896 2893
2897/* 2894/*
@@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3524#endif 3521#endif
3525 case MSR_EFER: 3522 case MSR_EFER:
3526 return kvm_get_msr_common(vcpu, msr_info); 3523 return kvm_get_msr_common(vcpu, msr_info);
3527 case MSR_IA32_TSC:
3528 msr_info->data = guest_read_tsc(vcpu);
3529 break;
3530 case MSR_IA32_SPEC_CTRL: 3524 case MSR_IA32_SPEC_CTRL:
3531 if (!msr_info->host_initiated && 3525 if (!msr_info->host_initiated &&
3532 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && 3526 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3646 return 1; 3640 return 1;
3647 vmcs_write64(GUEST_BNDCFGS, data); 3641 vmcs_write64(GUEST_BNDCFGS, data);
3648 break; 3642 break;
3649 case MSR_IA32_TSC:
3650 kvm_write_tsc(vcpu, msr_info);
3651 break;
3652 case MSR_IA32_SPEC_CTRL: 3643 case MSR_IA32_SPEC_CTRL:
3653 if (!msr_info->host_initiated && 3644 if (!msr_info->host_initiated &&
3654 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && 3645 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@@ -4553,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
4553 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); 4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
4554} 4545}
4555 4546
4556static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4557{
4558 if (enable_ept)
4559 vmx_flush_tlb(vcpu, true);
4560}
4561
4562static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4547static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4563{ 4548{
4564 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4549 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -9287,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
9287 } else { 9272 } else {
9288 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 9273 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
9289 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9274 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9290 vmx_flush_tlb_ept_only(vcpu); 9275 vmx_flush_tlb(vcpu, true);
9291 } 9276 }
9292 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 9277 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
9293 9278
@@ -9315,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
9315 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 9300 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
9316 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 9301 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
9317 vmcs_write64(APIC_ACCESS_ADDR, hpa); 9302 vmcs_write64(APIC_ACCESS_ADDR, hpa);
9318 vmx_flush_tlb_ept_only(vcpu); 9303 vmx_flush_tlb(vcpu, true);
9319 } 9304 }
9320} 9305}
9321 9306
@@ -10608,6 +10593,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
10608 return true; 10593 return true;
10609} 10594}
10610 10595
10596static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
10597 struct vmcs12 *vmcs12)
10598{
10599 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
10600 !page_address_valid(vcpu, vmcs12->apic_access_addr))
10601 return -EINVAL;
10602 else
10603 return 0;
10604}
10605
10611static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 10606static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
10612 struct vmcs12 *vmcs12) 10607 struct vmcs12 *vmcs12)
10613{ 10608{
@@ -11176,11 +11171,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11176 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 11171 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
11177 } 11172 }
11178 11173
11179 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 11174 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
11180 vmcs_write64(TSC_OFFSET, 11175
11181 vcpu->arch.tsc_offset + vmcs12->tsc_offset);
11182 else
11183 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
11184 if (kvm_has_tsc_control) 11176 if (kvm_has_tsc_control)
11185 decache_tsc_multiplier(vmx); 11177 decache_tsc_multiplier(vmx);
11186 11178
@@ -11222,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11222 } 11214 }
11223 } else if (nested_cpu_has2(vmcs12, 11215 } else if (nested_cpu_has2(vmcs12,
11224 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 11216 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11225 vmx_flush_tlb_ept_only(vcpu); 11217 vmx_flush_tlb(vcpu, true);
11226 } 11218 }
11227 11219
11228 /* 11220 /*
@@ -11299,6 +11291,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
11299 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) 11291 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
11300 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 11292 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11301 11293
11294 if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
11295 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11296
11302 if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) 11297 if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
11303 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 11298 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11304 11299
@@ -11420,6 +11415,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11420 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 11415 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11421 u32 msr_entry_idx; 11416 u32 msr_entry_idx;
11422 u32 exit_qual; 11417 u32 exit_qual;
11418 int r;
11423 11419
11424 enter_guest_mode(vcpu); 11420 enter_guest_mode(vcpu);
11425 11421
@@ -11429,26 +11425,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11429 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 11425 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
11430 vmx_segment_cache_clear(vmx); 11426 vmx_segment_cache_clear(vmx);
11431 11427
11432 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { 11428 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11433 leave_guest_mode(vcpu); 11429 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
11434 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 11430
11435 nested_vmx_entry_failure(vcpu, vmcs12, 11431 r = EXIT_REASON_INVALID_STATE;
11436 EXIT_REASON_INVALID_STATE, exit_qual); 11432 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual))
11437 return 1; 11433 goto fail;
11438 }
11439 11434
11440 nested_get_vmcs12_pages(vcpu, vmcs12); 11435 nested_get_vmcs12_pages(vcpu, vmcs12);
11441 11436
11437 r = EXIT_REASON_MSR_LOAD_FAIL;
11442 msr_entry_idx = nested_vmx_load_msr(vcpu, 11438 msr_entry_idx = nested_vmx_load_msr(vcpu,
11443 vmcs12->vm_entry_msr_load_addr, 11439 vmcs12->vm_entry_msr_load_addr,
11444 vmcs12->vm_entry_msr_load_count); 11440 vmcs12->vm_entry_msr_load_count);
11445 if (msr_entry_idx) { 11441 if (msr_entry_idx)
11446 leave_guest_mode(vcpu); 11442 goto fail;
11447 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11448 nested_vmx_entry_failure(vcpu, vmcs12,
11449 EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
11450 return 1;
11451 }
11452 11443
11453 /* 11444 /*
11454 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 11445 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11457,6 +11448,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
11457 * the success flag) when L2 exits (see nested_vmx_vmexit()). 11448 * the success flag) when L2 exits (see nested_vmx_vmexit()).
11458 */ 11449 */
11459 return 0; 11450 return 0;
11451
11452fail:
11453 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11454 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
11455 leave_guest_mode(vcpu);
11456 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11457 nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
11458 return 1;
11460} 11459}
11461 11460
11462/* 11461/*
@@ -12028,6 +12027,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12028 12027
12029 leave_guest_mode(vcpu); 12028 leave_guest_mode(vcpu);
12030 12029
12030 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12031 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
12032
12031 if (likely(!vmx->fail)) { 12033 if (likely(!vmx->fail)) {
12032 if (exit_reason == -1) 12034 if (exit_reason == -1)
12033 sync_vmcs12(vcpu, vmcs12); 12035 sync_vmcs12(vcpu, vmcs12);
@@ -12065,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12065 } else if (!nested_cpu_has_ept(vmcs12) && 12067 } else if (!nested_cpu_has_ept(vmcs12) &&
12066 nested_cpu_has2(vmcs12, 12068 nested_cpu_has2(vmcs12,
12067 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 12069 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
12068 vmx_flush_tlb_ept_only(vcpu); 12070 vmx_flush_tlb(vcpu, true);
12069 } 12071 }
12070 12072
12071 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 12073 /* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -12224,10 +12226,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
12224 12226
12225static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) 12227static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
12226{ 12228{
12227 struct vcpu_vmx *vmx = to_vmx(vcpu); 12229 struct vcpu_vmx *vmx;
12228 u64 tscl = rdtsc(); 12230 u64 tscl, guest_tscl, delta_tsc;
12229 u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 12231
12230 u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 12232 if (kvm_mwait_in_guest(vcpu->kvm))
12233 return -EOPNOTSUPP;
12234
12235 vmx = to_vmx(vcpu);
12236 tscl = rdtsc();
12237 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
12238 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
12231 12239
12232 /* Convert to host delta tsc if tsc scaling is enabled */ 12240 /* Convert to host delta tsc if tsc scaling is enabled */
12233 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 12241 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
@@ -12533,7 +12541,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
12533 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); 12541 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
12534 vcpu_info.vector = irq.vector; 12542 vcpu_info.vector = irq.vector;
12535 12543
12536 trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, 12544 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
12537 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 12545 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
12538 12546
12539 if (set) 12547 if (set)
@@ -12712,6 +12720,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
12712 12720
12713 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 12721 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
12714 12722
12723 .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
12715 .write_tsc_offset = vmx_write_tsc_offset, 12724 .write_tsc_offset = vmx_write_tsc_offset,
12716 12725
12717 .set_tdp_cr3 = vmx_set_cr3, 12726 .set_tdp_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b2ff74b12ec4..51ecd381793b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1490 1490
1491static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) 1491static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1492{ 1492{
1493 u64 curr_offset = vcpu->arch.tsc_offset; 1493 u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
1494 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1494 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1495} 1495}
1496 1496
@@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1532 1532
1533u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 1533u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1534{ 1534{
1535 return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); 1535 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
1536
1537 return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1536} 1538}
1537EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 1539EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1538 1540
@@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2362 return 1; 2364 return 1;
2363 vcpu->arch.smbase = data; 2365 vcpu->arch.smbase = data;
2364 break; 2366 break;
2367 case MSR_IA32_TSC:
2368 kvm_write_tsc(vcpu, msr_info);
2369 break;
2365 case MSR_SMI_COUNT: 2370 case MSR_SMI_COUNT:
2366 if (!msr_info->host_initiated) 2371 if (!msr_info->host_initiated)
2367 return 1; 2372 return 1;
@@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2605 case MSR_IA32_UCODE_REV: 2610 case MSR_IA32_UCODE_REV:
2606 msr_info->data = vcpu->arch.microcode_version; 2611 msr_info->data = vcpu->arch.microcode_version;
2607 break; 2612 break;
2613 case MSR_IA32_TSC:
2614 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
2615 break;
2608 case MSR_MTRRcap: 2616 case MSR_MTRRcap:
2609 case 0x200 ... 0x2ff: 2617 case 0x200 ... 0x2ff:
2610 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 2618 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -2819,7 +2827,8 @@ out:
2819static inline bool kvm_can_mwait_in_guest(void) 2827static inline bool kvm_can_mwait_in_guest(void)
2820{ 2828{
2821 return boot_cpu_has(X86_FEATURE_MWAIT) && 2829 return boot_cpu_has(X86_FEATURE_MWAIT) &&
2822 !boot_cpu_has_bug(X86_BUG_MONITOR); 2830 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
2831 boot_cpu_has(X86_FEATURE_ARAT);
2823} 2832}
2824 2833
2825int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 2834int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7d35ce672989..c9492f764902 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
302 __rem; \ 302 __rem; \
303 }) 303 })
304 304
305#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
306#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
307#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
308#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
309 KVM_X86_DISABLE_EXITS_HTL | \
310 KVM_X86_DISABLE_EXITS_PAUSE)
311
312static inline bool kvm_mwait_in_guest(struct kvm *kvm) 305static inline bool kvm_mwait_in_guest(struct kvm *kvm)
313{ 306{
314 return kvm->arch.mwait_in_guest; 307 return kvm->arch.mwait_in_guest;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 62a7e9f65dec..cc7ff5957194 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/highmem.h>
21 22
22#include <asm/pgtable.h> 23#include <asm/pgtable.h>
23 24
@@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
334 pgprotval_t eff_in, unsigned long P) 335 pgprotval_t eff_in, unsigned long P)
335{ 336{
336 int i; 337 int i;
337 pte_t *start; 338 pte_t *pte;
338 pgprotval_t prot, eff; 339 pgprotval_t prot, eff;
339 340
340 start = (pte_t *)pmd_page_vaddr(addr);
341 for (i = 0; i < PTRS_PER_PTE; i++) { 341 for (i = 0; i < PTRS_PER_PTE; i++) {
342 prot = pte_flags(*start);
343 eff = effective_prot(eff_in, prot);
344 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 342 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
343 pte = pte_offset_map(&addr, st->current_address);
344 prot = pte_flags(*pte);
345 eff = effective_prot(eff_in, prot);
345 note_page(m, st, __pgprot(prot), eff, 5); 346 note_page(m, st, __pgprot(prot), eff, 5);
346 start++; 347 pte_unmap(pte);
347 } 348 }
348} 349}
349#ifdef CONFIG_KASAN 350#ifdef CONFIG_KASAN
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 0f3d50f4c48c..3bded76e8d5c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m)
93static inline void split_page_count(int level) { } 93static inline void split_page_count(int level) { }
94#endif 94#endif
95 95
96static inline int
97within(unsigned long addr, unsigned long start, unsigned long end)
98{
99 return addr >= start && addr < end;
100}
101
102static inline int
103within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
104{
105 return addr >= start && addr <= end;
106}
107
96#ifdef CONFIG_X86_64 108#ifdef CONFIG_X86_64
97 109
98static inline unsigned long highmap_start_pfn(void) 110static inline unsigned long highmap_start_pfn(void)
@@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void)
106 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; 118 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
107} 119}
108 120
109#endif 121static bool __cpa_pfn_in_highmap(unsigned long pfn)
110
111static inline int
112within(unsigned long addr, unsigned long start, unsigned long end)
113{ 122{
114 return addr >= start && addr < end; 123 /*
124 * Kernel text has an alias mapping at a high address, known
125 * here as "highmap".
126 */
127 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
115} 128}
116 129
117static inline int 130#else
118within_inclusive(unsigned long addr, unsigned long start, unsigned long end) 131
132static bool __cpa_pfn_in_highmap(unsigned long pfn)
119{ 133{
120 return addr >= start && addr <= end; 134 /* There is no highmap on 32-bit */
135 return false;
121} 136}
122 137
138#endif
139
123/* 140/*
124 * Flushing functions 141 * Flushing functions
125 */ 142 */
@@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg)
172 189
173static void cpa_flush_all(unsigned long cache) 190static void cpa_flush_all(unsigned long cache)
174{ 191{
175 BUG_ON(irqs_disabled()); 192 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
176 193
177 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 194 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
178} 195}
@@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
236 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ 253 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
237#endif 254#endif
238 255
239 BUG_ON(irqs_disabled()); 256 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
240 257
241 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); 258 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
242 259
@@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1183 cpa->numpages = 1; 1200 cpa->numpages = 1;
1184 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; 1201 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1185 return 0; 1202 return 0;
1203
1204 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1205 /* Faults in the highmap are OK, so do not warn: */
1206 return -EFAULT;
1186 } else { 1207 } else {
1187 WARN(1, KERN_WARNING "CPA: called for zero pte. " 1208 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1188 "vaddr = %lx cpa->vaddr = %lx\n", vaddr, 1209 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
@@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
1335 * to touch the high mapped kernel as well: 1356 * to touch the high mapped kernel as well:
1336 */ 1357 */
1337 if (!within(vaddr, (unsigned long)_text, _brk_end) && 1358 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1338 within_inclusive(cpa->pfn, highmap_start_pfn(), 1359 __cpa_pfn_in_highmap(cpa->pfn)) {
1339 highmap_end_pfn())) {
1340 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + 1360 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1341 __START_KERNEL_map - phys_base; 1361 __START_KERNEL_map - phys_base;
1342 alias_cpa = *cpa; 1362 alias_cpa = *cpa;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index f1fd52f449e0..4d418e705878 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void)
421 if (boot_cpu_has(X86_FEATURE_K8)) 421 if (boot_cpu_has(X86_FEATURE_K8))
422 return false; 422 return false;
423 423
424 /*
425 * RANDSTRUCT derives its hardening benefits from the
426 * attacker's lack of knowledge about the layout of kernel
427 * data structures. Keep the kernel image non-global in
428 * cases where RANDSTRUCT is in use to help keep the layout a
429 * secret.
430 */
431 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
432 return false;
433
424 return true; 434 return true;
425} 435}
426 436
@@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void)
430 */ 440 */
431void pti_clone_kernel_text(void) 441void pti_clone_kernel_text(void)
432{ 442{
443 /*
444 * rodata is part of the kernel image and is normally
445 * readable on the filesystem or on the web. But, do not
446 * clone the areas past rodata, they might contain secrets.
447 */
433 unsigned long start = PFN_ALIGN(_text); 448 unsigned long start = PFN_ALIGN(_text);
434 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); 449 unsigned long end = (unsigned long)__end_rodata_hpage_align;
435 450
436 if (!pti_kernel_image_global_ok()) 451 if (!pti_kernel_image_global_ok())
437 return; 452 return;
438 453
454 pr_debug("mapping partial kernel image into user address space\n");
455
456 /*
457 * Note that this will undo _some_ of the work that
458 * pti_set_kernel_image_nonglobal() did to clear the
459 * global bit.
460 */
439 pti_clone_pmds(start, end, _PAGE_RW); 461 pti_clone_pmds(start, end, _PAGE_RW);
440} 462}
441 463
@@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void)
458 if (pti_kernel_image_global_ok()) 480 if (pti_kernel_image_global_ok())
459 return; 481 return;
460 482
461 pr_debug("set kernel image non-global\n");
462
463 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); 483 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
464} 484}
465 485
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 48b14b534897..ccf4a49bb065 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
98 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); 98 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
99 } else { 99 } else {
100 /* No p4d for 4-level paging: point the pgd to the pud page table */ 100 /* No p4d for 4-level paging: point the pgd to the pud page table */
101 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); 101 pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
102 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); 102 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
103 } 103 }
104 104
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f0ecd98509d8..771ae9730ac6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
4934 bool new_queue = false; 4934 bool new_queue = false;
4935 bool bfqq_already_existing = false, split = false; 4935 bool bfqq_already_existing = false, split = false;
4936 4936
4937 if (!rq->elv.icq) 4937 /*
4938 * Even if we don't have an icq attached, we should still clear
4939 * the scheduler pointers, as they might point to previously
4940 * allocated bic/bfqq structs.
4941 */
4942 if (!rq->elv.icq) {
4943 rq->elv.priv[0] = rq->elv.priv[1] = NULL;
4938 return; 4944 return;
4945 }
4946
4939 bic = icq_to_bic(rq->elv.icq); 4947 bic = icq_to_bic(rq->elv.icq);
4940 4948
4941 spin_lock_irq(&bfqd->lock); 4949 spin_lock_irq(&bfqd->lock);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1c16694ae145..eb85cb87c40f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
1177 1177
1178 preloaded = !radix_tree_preload(GFP_KERNEL); 1178 preloaded = !radix_tree_preload(GFP_KERNEL);
1179 1179
1180 /* 1180 /* Make sure the root blkg exists. */
1181 * Make sure the root blkg exists and count the existing blkgs. As
1182 * @q is bypassing at this point, blkg_lookup_create() can't be
1183 * used. Open code insertion.
1184 */
1185 rcu_read_lock(); 1181 rcu_read_lock();
1186 spin_lock_irq(q->queue_lock); 1182 spin_lock_irq(q->queue_lock);
1187 blkg = blkg_create(&blkcg_root, q, new_blkg); 1183 blkg = blkg_create(&blkcg_root, q, new_blkg);
1184 if (IS_ERR(blkg))
1185 goto err_unlock;
1186 q->root_blkg = blkg;
1187 q->root_rl.blkg = blkg;
1188 spin_unlock_irq(q->queue_lock); 1188 spin_unlock_irq(q->queue_lock);
1189 rcu_read_unlock(); 1189 rcu_read_unlock();
1190 1190
1191 if (preloaded) 1191 if (preloaded)
1192 radix_tree_preload_end(); 1192 radix_tree_preload_end();
1193 1193
1194 if (IS_ERR(blkg))
1195 return PTR_ERR(blkg);
1196
1197 q->root_blkg = blkg;
1198 q->root_rl.blkg = blkg;
1199
1200 ret = blk_throtl_init(q); 1194 ret = blk_throtl_init(q);
1201 if (ret) { 1195 if (ret) {
1202 spin_lock_irq(q->queue_lock); 1196 spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
1204 spin_unlock_irq(q->queue_lock); 1198 spin_unlock_irq(q->queue_lock);
1205 } 1199 }
1206 return ret; 1200 return ret;
1201
1202err_unlock:
1203 spin_unlock_irq(q->queue_lock);
1204 rcu_read_unlock();
1205 if (preloaded)
1206 radix_tree_preload_end();
1207 return PTR_ERR(blkg);
1207} 1208}
1208 1209
1209/** 1210/**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1410 __clear_bit(pol->plid, q->blkcg_pols); 1411 __clear_bit(pol->plid, q->blkcg_pols);
1411 1412
1412 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1413 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1413 /* grab blkcg lock too while removing @pd from @blkg */
1414 spin_lock(&blkg->blkcg->lock);
1415
1416 if (blkg->pd[pol->plid]) { 1414 if (blkg->pd[pol->plid]) {
1417 if (!blkg->pd[pol->plid]->offline && 1415 if (!blkg->pd[pol->plid]->offline &&
1418 pol->pd_offline_fn) { 1416 pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1422 pol->pd_free_fn(blkg->pd[pol->plid]); 1420 pol->pd_free_fn(blkg->pd[pol->plid]);
1423 blkg->pd[pol->plid] = NULL; 1421 blkg->pd[pol->plid] = NULL;
1424 } 1422 }
1425
1426 spin_unlock(&blkg->blkcg->lock);
1427 } 1423 }
1428 1424
1429 spin_unlock_irq(q->queue_lock); 1425 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 806ce2442819..85909b431eb0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
201 rq->part = NULL; 201 rq->part = NULL;
202 seqcount_init(&rq->gstate_seq); 202 seqcount_init(&rq->gstate_seq);
203 u64_stats_init(&rq->aborted_gstate_sync); 203 u64_stats_init(&rq->aborted_gstate_sync);
204 /*
205 * See comment of blk_mq_init_request
206 */
207 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
204} 208}
205EXPORT_SYMBOL(blk_rq_init); 209EXPORT_SYMBOL(blk_rq_init);
206 210
@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
915 919
916 while (true) { 920 while (true) {
917 bool success = false; 921 bool success = false;
918 int ret;
919 922
920 rcu_read_lock(); 923 rcu_read_lock();
921 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 924 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
947 */ 950 */
948 smp_rmb(); 951 smp_rmb();
949 952
950 ret = wait_event_interruptible(q->mq_freeze_wq, 953 wait_event(q->mq_freeze_wq,
951 (atomic_read(&q->mq_freeze_depth) == 0 && 954 (atomic_read(&q->mq_freeze_depth) == 0 &&
952 (preempt || !blk_queue_preempt_only(q))) || 955 (preempt || !blk_queue_preempt_only(q))) ||
953 blk_queue_dying(q)); 956 blk_queue_dying(q));
954 if (blk_queue_dying(q)) 957 if (blk_queue_dying(q))
955 return -ENODEV; 958 return -ENODEV;
956 if (ret)
957 return ret;
958 } 959 }
959} 960}
960 961
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0dc9e341c2a7..c3621453ad87 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2042 2042
2043 seqcount_init(&rq->gstate_seq); 2043 seqcount_init(&rq->gstate_seq);
2044 u64_stats_init(&rq->aborted_gstate_sync); 2044 u64_stats_init(&rq->aborted_gstate_sync);
2045 /*
2046 * start gstate with gen 1 instead of 0, otherwise it will be equal
2047 * to aborted_gstate, and be identified timed out by
2048 * blk_mq_terminate_expired.
2049 */
2050 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
2051
2045 return 0; 2052 return 0;
2046} 2053}
2047 2054
@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2329 2336
2330static void blk_mq_map_swqueue(struct request_queue *q) 2337static void blk_mq_map_swqueue(struct request_queue *q)
2331{ 2338{
2332 unsigned int i; 2339 unsigned int i, hctx_idx;
2333 struct blk_mq_hw_ctx *hctx; 2340 struct blk_mq_hw_ctx *hctx;
2334 struct blk_mq_ctx *ctx; 2341 struct blk_mq_ctx *ctx;
2335 struct blk_mq_tag_set *set = q->tag_set; 2342 struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2346 2353
2347 /* 2354 /*
2348 * Map software to hardware queues. 2355 * Map software to hardware queues.
2356 *
2357 * If the cpu isn't present, the cpu is mapped to first hctx.
2349 */ 2358 */
2350 for_each_possible_cpu(i) { 2359 for_each_possible_cpu(i) {
2360 hctx_idx = q->mq_map[i];
2361 /* unmapped hw queue can be remapped after CPU topo changed */
2362 if (!set->tags[hctx_idx] &&
2363 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2364 /*
2365 * If tags initialization fail for some hctx,
2366 * that hctx won't be brought online. In this
2367 * case, remap the current ctx to hctx[0] which
2368 * is guaranteed to always have tags allocated
2369 */
2370 q->mq_map[i] = 0;
2371 }
2372
2351 ctx = per_cpu_ptr(q->queue_ctx, i); 2373 ctx = per_cpu_ptr(q->queue_ctx, i);
2352 hctx = blk_mq_map_queue(q, i); 2374 hctx = blk_mq_map_queue(q, i);
2353 2375
@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2359 mutex_unlock(&q->sysfs_lock); 2381 mutex_unlock(&q->sysfs_lock);
2360 2382
2361 queue_for_each_hw_ctx(q, hctx, i) { 2383 queue_for_each_hw_ctx(q, hctx, i) {
2362 /* every hctx should get mapped by at least one CPU */ 2384 /*
2363 WARN_ON(!hctx->nr_ctx); 2385 * If no software queues are mapped to this hardware queue,
2386 * disable it and free the request entries.
2387 */
2388 if (!hctx->nr_ctx) {
2389 /* Never unmap queue 0. We need it as a
2390 * fallback in case of a new remap fails
2391 * allocation
2392 */
2393 if (i && set->tags[i])
2394 blk_mq_free_map_and_requests(set, i);
2395
2396 hctx->tags = NULL;
2397 continue;
2398 }
2364 2399
2365 hctx->tags = set->tags[i]; 2400 hctx->tags = set->tags[i];
2366 WARN_ON(!hctx->tags); 2401 WARN_ON(!hctx->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 88c558f71819..89b5cd3a6c70 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,9 @@
7 7
8struct blk_mq_tag_set; 8struct blk_mq_tag_set;
9 9
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
10struct blk_mq_ctx { 13struct blk_mq_ctx {
11 struct { 14 struct {
12 spinlock_t lock; 15 spinlock_t lock;
diff --git a/crypto/api.c b/crypto/api.c
index 1d5290c67108..0ee632bba064 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -204,9 +204,14 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
204 204
205 down_read(&crypto_alg_sem); 205 down_read(&crypto_alg_sem);
206 alg = __crypto_alg_lookup(name, type | test, mask | test); 206 alg = __crypto_alg_lookup(name, type | test, mask | test);
207 if (!alg && test) 207 if (!alg && test) {
208 alg = __crypto_alg_lookup(name, type, mask) ? 208 alg = __crypto_alg_lookup(name, type, mask);
209 ERR_PTR(-ELIBBAD) : NULL; 209 if (alg && !crypto_is_larval(alg)) {
210 /* Test failed */
211 crypto_mod_put(alg);
212 alg = ERR_PTR(-ELIBBAD);
213 }
214 }
210 up_read(&crypto_alg_sem); 215 up_read(&crypto_alg_sem);
211 216
212 return alg; 217 return alg;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 4faa2781c964..466a112a4446 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
1134 if (!drbg) 1134 if (!drbg)
1135 return; 1135 return;
1136 kzfree(drbg->Vbuf); 1136 kzfree(drbg->Vbuf);
1137 drbg->Vbuf = NULL;
1137 drbg->V = NULL; 1138 drbg->V = NULL;
1138 kzfree(drbg->Cbuf); 1139 kzfree(drbg->Cbuf);
1140 drbg->Cbuf = NULL;
1139 drbg->C = NULL; 1141 drbg->C = NULL;
1140 kzfree(drbg->scratchpadbuf); 1142 kzfree(drbg->scratchpadbuf);
1141 drbg->scratchpadbuf = NULL; 1143 drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
2123 return opregion; 2123 return opregion;
2124} 2124}
2125 2125
2126static bool dmi_is_desktop(void)
2127{
2128 const char *chassis_type;
2129
2130 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
2131 if (!chassis_type)
2132 return false;
2133
2134 if (!strcmp(chassis_type, "3") || /* 3: Desktop */
2135 !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
2136 !strcmp(chassis_type, "5") || /* 5: Pizza Box */
2137 !strcmp(chassis_type, "6") || /* 6: Mini Tower */
2138 !strcmp(chassis_type, "7") || /* 7: Tower */
2139 !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
2140 return true;
2141
2142 return false;
2143}
2144
2126int acpi_video_register(void) 2145int acpi_video_register(void)
2127{ 2146{
2128 int ret = 0; 2147 int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
2143 * win8 ready (where we also prefer the native backlight driver, so 2162 * win8 ready (where we also prefer the native backlight driver, so
2144 * normally the acpi_video code should not register there anyways). 2163 * normally the acpi_video code should not register there anyways).
2145 */ 2164 */
2146 if (only_lcd == -1) 2165 if (only_lcd == -1) {
2147 only_lcd = acpi_osi_is_win8(); 2166 if (dmi_is_desktop() && acpi_osi_is_win8())
2167 only_lcd = true;
2168 else
2169 only_lcd = false;
2170 }
2148 2171
2149 dmi_check_system(video_dmi_table); 2172 dmi_check_system(video_dmi_table);
2150 2173
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
12#define pr_fmt(fmt) "ACPI: watchdog: " fmt 12#define pr_fmt(fmt) "ACPI: watchdog: " fmt
13 13
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/dmi.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include "internal.h" 19#include "internal.h"
19 20
21static const struct dmi_system_id acpi_watchdog_skip[] = {
22 {
23 /*
24 * On Lenovo Z50-70 there are two issues with the WDAT
25 * table. First some of the instructions use RTC SRAM
26 * to store persistent information. This does not work well
27 * with Linux RTC driver. Second, more important thing is
28 * that the instructions do not actually reset the system.
29 *
30 * On this particular system iTCO_wdt seems to work just
31 * fine so we prefer that over WDAT for now.
32 *
33 * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
34 */
35 .ident = "Lenovo Z50-70",
36 .matches = {
37 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
38 DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
39 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
40 },
41 },
42 {}
43};
44
45static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
46{
47 const struct acpi_table_wdat *wdat = NULL;
48 acpi_status status;
49
50 if (acpi_disabled)
51 return NULL;
52
53 if (dmi_check_system(acpi_watchdog_skip))
54 return NULL;
55
56 status = acpi_get_table(ACPI_SIG_WDAT, 0,
57 (struct acpi_table_header **)&wdat);
58 if (ACPI_FAILURE(status)) {
59 /* It is fine if there is no WDAT */
60 return NULL;
61 }
62
63 return wdat;
64}
65
20/** 66/**
21 * Returns true if this system should prefer ACPI based watchdog instead of 67 * Returns true if this system should prefer ACPI based watchdog instead of
22 * the native one (which are typically the same hardware). 68 * the native one (which are typically the same hardware).
23 */ 69 */
24bool acpi_has_watchdog(void) 70bool acpi_has_watchdog(void)
25{ 71{
26 struct acpi_table_header hdr; 72 return !!acpi_watchdog_get_wdat();
27
28 if (acpi_disabled)
29 return false;
30
31 return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
32} 73}
33EXPORT_SYMBOL_GPL(acpi_has_watchdog); 74EXPORT_SYMBOL_GPL(acpi_has_watchdog);
34 75
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
41 struct platform_device *pdev; 82 struct platform_device *pdev;
42 struct resource *resources; 83 struct resource *resources;
43 size_t nresources = 0; 84 size_t nresources = 0;
44 acpi_status status;
45 int i; 85 int i;
46 86
47 status = acpi_get_table(ACPI_SIG_WDAT, 0, 87 wdat = acpi_watchdog_get_wdat();
48 (struct acpi_table_header **)&wdat); 88 if (!wdat) {
49 if (ACPI_FAILURE(status)) {
50 /* It is fine if there is no WDAT */ 89 /* It is fine if there is no WDAT */
51 return; 90 return;
52 } 91 }
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
635 NULL, 0644); 635 NULL, 0644);
636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); 636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
637 637
638module_acpi_driver(acpi_button_driver); 638static int acpi_button_register_driver(struct acpi_driver *driver)
639{
640 /*
641 * Modules such as nouveau.ko and i915.ko have a link time dependency
642 * on acpi_lid_open(), and would therefore not be loadable on ACPI
643 * capable kernels booted in non-ACPI mode if the return value of
644 * acpi_bus_register_driver() is returned from here with ACPI disabled
645 * when this driver is built as a module.
646 */
647 if (acpi_disabled)
648 return 0;
649
650 return acpi_bus_register_driver(driver);
651}
652
653static void acpi_button_unregister_driver(struct acpi_driver *driver)
654{
655 if (!acpi_disabled)
656 acpi_bus_unregister_driver(driver);
657}
658
659module_driver(acpi_button_driver, acpi_button_register_driver,
660 acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
2166 acpi_cmos_rtc_init(); 2166 acpi_cmos_rtc_init();
2167 acpi_container_init(); 2167 acpi_container_init();
2168 acpi_memory_hotplug_init(); 2168 acpi_memory_hotplug_init();
2169 acpi_watchdog_init();
2169 acpi_pnp_init(); 2170 acpi_pnp_init();
2170 acpi_int340x_thermal_init(); 2171 acpi_int340x_thermal_init();
2171 acpi_amba_init(); 2172 acpi_amba_init();
2172 acpi_watchdog_init();
2173 acpi_init_lpit(); 2173 acpi_init_lpit();
2174 2174
2175 acpi_scan_add_handler(&generic_device_handler); 2175 acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), 364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
365 }, 365 },
366 }, 366 },
367 /*
368 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
369 * the Low Power S0 Idle firmware interface (see
370 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
371 */
372 {
373 .callback = init_no_lps0,
374 .ident = "ThinkPad X1 Tablet(2016)",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
377 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
378 },
379 },
367 {}, 380 {},
368}; 381};
369 382
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 594c228d2f02..4a3ac31c07d0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
69 struct device_attribute *attr, char *buf) 69 struct device_attribute *attr, char *buf)
70{ 70{
71 struct amba_device *dev = to_amba_device(_dev); 71 struct amba_device *dev = to_amba_device(_dev);
72 ssize_t len;
72 73
73 if (!dev->driver_override) 74 device_lock(_dev);
74 return 0; 75 len = sprintf(buf, "%s\n", dev->driver_override);
75 76 device_unlock(_dev);
76 return sprintf(buf, "%s\n", dev->driver_override); 77 return len;
77} 78}
78 79
79static ssize_t driver_override_store(struct device *_dev, 80static ssize_t driver_override_store(struct device *_dev,
@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
81 const char *buf, size_t count) 82 const char *buf, size_t count)
82{ 83{
83 struct amba_device *dev = to_amba_device(_dev); 84 struct amba_device *dev = to_amba_device(_dev);
84 char *driver_override, *old = dev->driver_override, *cp; 85 char *driver_override, *old, *cp;
85 86
86 if (count > PATH_MAX) 87 /* We need to keep extra room for a newline */
88 if (count >= (PAGE_SIZE - 1))
87 return -EINVAL; 89 return -EINVAL;
88 90
89 driver_override = kstrndup(buf, count, GFP_KERNEL); 91 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
94 if (cp) 96 if (cp)
95 *cp = '\0'; 97 *cp = '\0';
96 98
99 device_lock(_dev);
100 old = dev->driver_override;
97 if (strlen(driver_override)) { 101 if (strlen(driver_override)) {
98 dev->driver_override = driver_override; 102 dev->driver_override = driver_override;
99 } else { 103 } else {
100 kfree(driver_override); 104 kfree(driver_override);
101 dev->driver_override = NULL; 105 dev->driver_override = NULL;
102 } 106 }
107 device_unlock(_dev);
103 108
104 kfree(old); 109 kfree(old);
105 110
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 764b63a5aade..e578eee31589 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
2839 else 2839 else
2840 return_error = BR_DEAD_REPLY; 2840 return_error = BR_DEAD_REPLY;
2841 mutex_unlock(&context->context_mgr_node_lock); 2841 mutex_unlock(&context->context_mgr_node_lock);
2842 if (target_node && target_proc == proc) {
2843 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2844 proc->pid, thread->pid);
2845 return_error = BR_FAILED_REPLY;
2846 return_error_param = -EINVAL;
2847 return_error_line = __LINE__;
2848 goto err_invalid_target_handle;
2849 }
2842 } 2850 }
2843 if (!target_node) { 2851 if (!target_node) {
2844 /* 2852 /*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 44abb8a0a5e5..be076606d30e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
671 if ((vcc->pop) && (skb1->len != 0)) 671 if ((vcc->pop) && (skb1->len != 0))
672 { 672 {
673 vcc->pop(vcc, skb1); 673 vcc->pop(vcc, skb1);
674 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", 674 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
675 (long)skb1);) 675 (long)skb1);)
676 } 676 }
677 else 677 else
@@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); 1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1666 if (status & TRANSMIT_DONE){ 1666 if (status & TRANSMIT_DONE){
1667 1667
1668 IF_EVENT(printk("Tansmit Done Intr logic run\n");) 1668 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1669 spin_lock_irqsave(&iadev->tx_lock, flags); 1669 spin_lock_irqsave(&iadev->tx_lock, flags);
1670 ia_tx_poll(iadev); 1670 ia_tx_poll(iadev);
1671 spin_unlock_irqrestore(&iadev->tx_lock, flags); 1671 spin_unlock_irqrestore(&iadev->tx_lock, flags);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
312 * This checks whether the memory was allocated from the per-device 312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma. 313 * coherent memory pool and if so, maps that memory to the provided vma.
314 * 314 *
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * proceed with mapping memory from generic pools. 316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
317 */ 318 */
318int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
319 void *vaddr, size_t size, int *ret) 320 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
227 unsigned long user_count = vma_pages(vma); 227 unsigned long user_count = vma_pages(vma);
228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
229 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
230 unsigned long off = vma->vm_pgoff; 229 unsigned long off = vma->vm_pgoff;
231 230
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
235 return ret; 234 return ret;
236 235
237 if (off < count && user_count <= (count - off)) { 236 if (off < count && user_count <= (count - off))
238 ret = remap_pfn_range(vma, vma->vm_start, 237 ret = remap_pfn_range(vma, vma->vm_start,
239 pfn + off, 238 page_to_pfn(virt_to_page(cpu_addr)) + off,
240 user_count << PAGE_SHIFT, 239 user_count << PAGE_SHIFT,
241 vma->vm_page_prot); 240 vma->vm_page_prot);
242 }
243#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 241#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
244 242
245 return ret; 243 return ret;
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 31b5015b59fe..358354148dec 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -537,8 +537,8 @@ exit:
537} 537}
538 538
539/** 539/**
540 * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism 540 * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism
541 * @fw_sysfs: firmware syfs information for the firmware to load 541 * @fw_sysfs: firmware sysfs information for the firmware to load
542 * @opt_flags: flags of options, FW_OPT_* 542 * @opt_flags: flags of options, FW_OPT_*
543 * @timeout: timeout to wait for the load 543 * @timeout: timeout to wait for the load
544 * 544 *
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index dfebc644ed35..f8255670a663 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,7 +6,7 @@
6#include <linux/device.h> 6#include <linux/device.h>
7 7
8/** 8/**
9 * struct firmware_fallback_config - firmware fallback configuratioon settings 9 * struct firmware_fallback_config - firmware fallback configuration settings
10 * 10 *
11 * Helps describe and fine tune the fallback mechanism. 11 * Helps describe and fine tune the fallback mechanism.
12 * 12 *
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
451static void lo_complete_rq(struct request *rq) 451static void lo_complete_rq(struct request *rq)
452{ 452{
453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
454 blk_status_t ret = BLK_STS_OK;
454 455
455 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 456 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
456 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 457 req_op(rq) != REQ_OP_READ) {
457 struct bio *bio = cmd->rq->bio; 458 if (cmd->ret < 0)
458 459 ret = BLK_STS_IOERR;
459 bio_advance(bio, cmd->ret); 460 goto end_io;
460 zero_fill_bio(bio);
461 } 461 }
462 462
463 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 463 /*
464 * Short READ - if we got some data, advance our request and
465 * retry it. If we got no data, end the rest with EIO.
466 */
467 if (cmd->ret) {
468 blk_update_request(rq, BLK_STS_OK, cmd->ret);
469 cmd->ret = 0;
470 blk_mq_requeue_request(rq, true);
471 } else {
472 if (cmd->use_aio) {
473 struct bio *bio = rq->bio;
474
475 while (bio) {
476 zero_fill_bio(bio);
477 bio = bio->bi_next;
478 }
479 }
480 ret = BLK_STS_IOERR;
481end_io:
482 blk_mq_end_request(rq, ret);
483 }
464} 484}
465 485
466static void lo_rw_aio_do_completion(struct loop_cmd *cmd) 486static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
467{ 487{
488 struct request *rq = blk_mq_rq_from_pdu(cmd);
489
468 if (!atomic_dec_and_test(&cmd->ref)) 490 if (!atomic_dec_and_test(&cmd->ref))
469 return; 491 return;
470 kfree(cmd->bvec); 492 kfree(cmd->bvec);
471 cmd->bvec = NULL; 493 cmd->bvec = NULL;
472 blk_mq_complete_request(cmd->rq); 494 blk_mq_complete_request(rq);
473} 495}
474 496
475static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 497static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
487{ 509{
488 struct iov_iter iter; 510 struct iov_iter iter;
489 struct bio_vec *bvec; 511 struct bio_vec *bvec;
490 struct request *rq = cmd->rq; 512 struct request *rq = blk_mq_rq_from_pdu(cmd);
491 struct bio *bio = rq->bio; 513 struct bio *bio = rq->bio;
492 struct file *file = lo->lo_backing_file; 514 struct file *file = lo->lo_backing_file;
493 unsigned int offset; 515 unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
1702static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1724static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1703 const struct blk_mq_queue_data *bd) 1725 const struct blk_mq_queue_data *bd)
1704{ 1726{
1705 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1727 struct request *rq = bd->rq;
1706 struct loop_device *lo = cmd->rq->q->queuedata; 1728 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1729 struct loop_device *lo = rq->q->queuedata;
1707 1730
1708 blk_mq_start_request(bd->rq); 1731 blk_mq_start_request(rq);
1709 1732
1710 if (lo->lo_state != Lo_bound) 1733 if (lo->lo_state != Lo_bound)
1711 return BLK_STS_IOERR; 1734 return BLK_STS_IOERR;
1712 1735
1713 switch (req_op(cmd->rq)) { 1736 switch (req_op(rq)) {
1714 case REQ_OP_FLUSH: 1737 case REQ_OP_FLUSH:
1715 case REQ_OP_DISCARD: 1738 case REQ_OP_DISCARD:
1716 case REQ_OP_WRITE_ZEROES: 1739 case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1723 1746
1724 /* always use the first bio's css */ 1747 /* always use the first bio's css */
1725#ifdef CONFIG_BLK_CGROUP 1748#ifdef CONFIG_BLK_CGROUP
1726 if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { 1749 if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
1727 cmd->css = cmd->rq->bio->bi_css; 1750 cmd->css = rq->bio->bi_css;
1728 css_get(cmd->css); 1751 css_get(cmd->css);
1729 } else 1752 } else
1730#endif 1753#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1736 1759
1737static void loop_handle_cmd(struct loop_cmd *cmd) 1760static void loop_handle_cmd(struct loop_cmd *cmd)
1738{ 1761{
1739 const bool write = op_is_write(req_op(cmd->rq)); 1762 struct request *rq = blk_mq_rq_from_pdu(cmd);
1740 struct loop_device *lo = cmd->rq->q->queuedata; 1763 const bool write = op_is_write(req_op(rq));
1764 struct loop_device *lo = rq->q->queuedata;
1741 int ret = 0; 1765 int ret = 0;
1742 1766
1743 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1767 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1745 goto failed; 1769 goto failed;
1746 } 1770 }
1747 1771
1748 ret = do_req_filebacked(lo, cmd->rq); 1772 ret = do_req_filebacked(lo, rq);
1749 failed: 1773 failed:
1750 /* complete non-aio request */ 1774 /* complete non-aio request */
1751 if (!cmd->use_aio || ret) { 1775 if (!cmd->use_aio || ret) {
1752 cmd->ret = ret ? -EIO : 0; 1776 cmd->ret = ret ? -EIO : 0;
1753 blk_mq_complete_request(cmd->rq); 1777 blk_mq_complete_request(rq);
1754 } 1778 }
1755} 1779}
1756 1780
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1767{ 1791{
1768 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1792 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1769 1793
1770 cmd->rq = rq;
1771 kthread_init_work(&cmd->work, loop_queue_work); 1794 kthread_init_work(&cmd->work, loop_queue_work);
1772
1773 return 0; 1795 return 0;
1774} 1796}
1775 1797
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
66 66
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq;
70 bool use_aio; /* use AIO interface to handle I/O */ 69 bool use_aio; /* use AIO interface to handle I/O */
71 atomic_t ref; /* only for aio */ 70 atomic_t ref; /* only for aio */
72 long ret; 71 long ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 07dc5419bd63..8e8b04cc569a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
732 */ 732 */
733enum { 733enum {
734 Opt_queue_depth, 734 Opt_queue_depth,
735 Opt_lock_timeout,
735 Opt_last_int, 736 Opt_last_int,
736 /* int args above */ 737 /* int args above */
737 Opt_last_string, 738 Opt_last_string,
@@ -740,11 +741,13 @@ enum {
740 Opt_read_write, 741 Opt_read_write,
741 Opt_lock_on_read, 742 Opt_lock_on_read,
742 Opt_exclusive, 743 Opt_exclusive,
744 Opt_notrim,
743 Opt_err 745 Opt_err
744}; 746};
745 747
746static match_table_t rbd_opts_tokens = { 748static match_table_t rbd_opts_tokens = {
747 {Opt_queue_depth, "queue_depth=%d"}, 749 {Opt_queue_depth, "queue_depth=%d"},
750 {Opt_lock_timeout, "lock_timeout=%d"},
748 /* int args above */ 751 /* int args above */
749 /* string args above */ 752 /* string args above */
750 {Opt_read_only, "read_only"}, 753 {Opt_read_only, "read_only"},
@@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = {
753 {Opt_read_write, "rw"}, /* Alternate spelling */ 756 {Opt_read_write, "rw"}, /* Alternate spelling */
754 {Opt_lock_on_read, "lock_on_read"}, 757 {Opt_lock_on_read, "lock_on_read"},
755 {Opt_exclusive, "exclusive"}, 758 {Opt_exclusive, "exclusive"},
759 {Opt_notrim, "notrim"},
756 {Opt_err, NULL} 760 {Opt_err, NULL}
757}; 761};
758 762
759struct rbd_options { 763struct rbd_options {
760 int queue_depth; 764 int queue_depth;
765 unsigned long lock_timeout;
761 bool read_only; 766 bool read_only;
762 bool lock_on_read; 767 bool lock_on_read;
763 bool exclusive; 768 bool exclusive;
769 bool trim;
764}; 770};
765 771
766#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ 772#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
773#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
767#define RBD_READ_ONLY_DEFAULT false 774#define RBD_READ_ONLY_DEFAULT false
768#define RBD_LOCK_ON_READ_DEFAULT false 775#define RBD_LOCK_ON_READ_DEFAULT false
769#define RBD_EXCLUSIVE_DEFAULT false 776#define RBD_EXCLUSIVE_DEFAULT false
777#define RBD_TRIM_DEFAULT true
770 778
771static int parse_rbd_opts_token(char *c, void *private) 779static int parse_rbd_opts_token(char *c, void *private)
772{ 780{
@@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private)
796 } 804 }
797 rbd_opts->queue_depth = intval; 805 rbd_opts->queue_depth = intval;
798 break; 806 break;
807 case Opt_lock_timeout:
808 /* 0 is "wait forever" (i.e. infinite timeout) */
809 if (intval < 0 || intval > INT_MAX / 1000) {
810 pr_err("lock_timeout out of range\n");
811 return -EINVAL;
812 }
813 rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
814 break;
799 case Opt_read_only: 815 case Opt_read_only:
800 rbd_opts->read_only = true; 816 rbd_opts->read_only = true;
801 break; 817 break;
@@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private)
808 case Opt_exclusive: 824 case Opt_exclusive:
809 rbd_opts->exclusive = true; 825 rbd_opts->exclusive = true;
810 break; 826 break;
827 case Opt_notrim:
828 rbd_opts->trim = false;
829 break;
811 default: 830 default:
812 /* libceph prints "bad option" msg */ 831 /* libceph prints "bad option" msg */
813 return -EINVAL; 832 return -EINVAL;
@@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
1392 case OBJ_OP_DISCARD: 1411 case OBJ_OP_DISCARD:
1393 return true; 1412 return true;
1394 default: 1413 default:
1395 rbd_assert(0); 1414 BUG();
1396 } 1415 }
1397} 1416}
1398 1417
@@ -2466,7 +2485,7 @@ again:
2466 } 2485 }
2467 return false; 2486 return false;
2468 default: 2487 default:
2469 rbd_assert(0); 2488 BUG();
2470 } 2489 }
2471} 2490}
2472 2491
@@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2494 } 2513 }
2495 return false; 2514 return false;
2496 default: 2515 default:
2497 rbd_assert(0); 2516 BUG();
2498 } 2517 }
2499} 2518}
2500 2519
@@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3533/* 3552/*
3534 * lock_rwsem must be held for read 3553 * lock_rwsem must be held for read
3535 */ 3554 */
3536static void rbd_wait_state_locked(struct rbd_device *rbd_dev) 3555static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3537{ 3556{
3538 DEFINE_WAIT(wait); 3557 DEFINE_WAIT(wait);
3558 unsigned long timeout;
3559 int ret = 0;
3560
3561 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3562 return -EBLACKLISTED;
3563
3564 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3565 return 0;
3566
3567 if (!may_acquire) {
3568 rbd_warn(rbd_dev, "exclusive lock required");
3569 return -EROFS;
3570 }
3539 3571
3540 do { 3572 do {
3541 /* 3573 /*
@@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
3547 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, 3579 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3548 TASK_UNINTERRUPTIBLE); 3580 TASK_UNINTERRUPTIBLE);
3549 up_read(&rbd_dev->lock_rwsem); 3581 up_read(&rbd_dev->lock_rwsem);
3550 schedule(); 3582 timeout = schedule_timeout(ceph_timeout_jiffies(
3583 rbd_dev->opts->lock_timeout));
3551 down_read(&rbd_dev->lock_rwsem); 3584 down_read(&rbd_dev->lock_rwsem);
3552 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3585 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3553 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); 3586 ret = -EBLACKLISTED;
3587 break;
3588 }
3589 if (!timeout) {
3590 rbd_warn(rbd_dev, "timed out waiting for lock");
3591 ret = -ETIMEDOUT;
3592 break;
3593 }
3594 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3554 3595
3555 finish_wait(&rbd_dev->lock_waitq, &wait); 3596 finish_wait(&rbd_dev->lock_waitq, &wait);
3597 return ret;
3556} 3598}
3557 3599
3558static void rbd_queue_workfn(struct work_struct *work) 3600static void rbd_queue_workfn(struct work_struct *work)
@@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work)
3638 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); 3680 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3639 if (must_be_locked) { 3681 if (must_be_locked) {
3640 down_read(&rbd_dev->lock_rwsem); 3682 down_read(&rbd_dev->lock_rwsem);
3641 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3683 result = rbd_wait_state_locked(rbd_dev,
3642 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3684 !rbd_dev->opts->exclusive);
3643 if (rbd_dev->opts->exclusive) { 3685 if (result)
3644 rbd_warn(rbd_dev, "exclusive lock required");
3645 result = -EROFS;
3646 goto err_unlock;
3647 }
3648 rbd_wait_state_locked(rbd_dev);
3649 }
3650 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3651 result = -EBLACKLISTED;
3652 goto err_unlock; 3686 goto err_unlock;
3653 }
3654 } 3687 }
3655 3688
3656 img_request = rbd_img_request_create(rbd_dev, op_type, snapc); 3689 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
@@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3902{ 3935{
3903 struct gendisk *disk; 3936 struct gendisk *disk;
3904 struct request_queue *q; 3937 struct request_queue *q;
3905 u64 segment_size; 3938 unsigned int objset_bytes =
3939 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3906 int err; 3940 int err;
3907 3941
3908 /* create gendisk info */ 3942 /* create gendisk info */
@@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3942 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 3976 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3943 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 3977 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3944 3978
3945 /* set io sizes to object size */ 3979 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
3946 segment_size = rbd_obj_bytes(&rbd_dev->header);
3947 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3948 q->limits.max_sectors = queue_max_hw_sectors(q); 3980 q->limits.max_sectors = queue_max_hw_sectors(q);
3949 blk_queue_max_segments(q, USHRT_MAX); 3981 blk_queue_max_segments(q, USHRT_MAX);
3950 blk_queue_max_segment_size(q, UINT_MAX); 3982 blk_queue_max_segment_size(q, UINT_MAX);
3951 blk_queue_io_min(q, segment_size); 3983 blk_queue_io_min(q, objset_bytes);
3952 blk_queue_io_opt(q, segment_size); 3984 blk_queue_io_opt(q, objset_bytes);
3953 3985
3954 /* enable the discard support */ 3986 if (rbd_dev->opts->trim) {
3955 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 3987 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3956 q->limits.discard_granularity = segment_size; 3988 q->limits.discard_granularity = objset_bytes;
3957 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 3989 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
3958 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 3990 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
3991 }
3959 3992
3960 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 3993 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3961 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; 3994 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf,
5179 5212
5180 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; 5213 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5181 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 5214 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5215 rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5182 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 5216 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5183 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 5217 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5218 rbd_opts->trim = RBD_TRIM_DEFAULT;
5184 5219
5185 copts = ceph_parse_options(options, mon_addrs, 5220 copts = ceph_parse_options(options, mon_addrs,
5186 mon_addrs + mon_addrs_size - 1, 5221 mon_addrs + mon_addrs_size - 1,
@@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5216 5251
5217static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 5252static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5218{ 5253{
5254 int ret;
5255
5219 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 5256 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5220 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 5257 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5221 return -EINVAL; 5258 return -EINVAL;
@@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5223 5260
5224 /* FIXME: "rbd map --exclusive" should be in interruptible */ 5261 /* FIXME: "rbd map --exclusive" should be in interruptible */
5225 down_read(&rbd_dev->lock_rwsem); 5262 down_read(&rbd_dev->lock_rwsem);
5226 rbd_wait_state_locked(rbd_dev); 5263 ret = rbd_wait_state_locked(rbd_dev, true);
5227 up_read(&rbd_dev->lock_rwsem); 5264 up_read(&rbd_dev->lock_rwsem);
5228 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 5265 if (ret) {
5229 rbd_warn(rbd_dev, "failed to acquire exclusive lock"); 5266 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5230 return -EROFS; 5267 return -EROFS;
5231 } 5268 }
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
110/* Select values for swim_select and swim_readbit */ 110/* Select values for swim_select and swim_readbit */
111 111
112#define READ_DATA_0 0x074 112#define READ_DATA_0 0x074
113#define TWOMEG_DRIVE 0x075 113#define ONEMEG_DRIVE 0x075
114#define SINGLE_SIDED 0x076 114#define SINGLE_SIDED 0x076
115#define DRIVE_PRESENT 0x077 115#define DRIVE_PRESENT 0x077
116#define DISK_IN 0x170 116#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
118#define TRACK_ZERO 0x172 118#define TRACK_ZERO 0x172
119#define TACHO 0x173 119#define TACHO 0x173
120#define READ_DATA_1 0x174 120#define READ_DATA_1 0x174
121#define MFM_MODE 0x175 121#define GCR_MODE 0x175
122#define SEEK_COMPLETE 0x176 122#define SEEK_COMPLETE 0x176
123#define ONEMEG_MEDIA 0x177 123#define TWOMEG_MEDIA 0x177
124 124
125/* Bits in handshake register */ 125/* Bits in handshake register */
126 126
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
612 struct floppy_struct *g; 612 struct floppy_struct *g;
613 fs->disk_in = 1; 613 fs->disk_in = 1;
614 fs->write_protected = swim_readbit(base, WRITE_PROT); 614 fs->write_protected = swim_readbit(base, WRITE_PROT);
615 fs->type = swim_readbit(base, ONEMEG_MEDIA);
616 615
617 if (swim_track00(base)) 616 if (swim_track00(base))
618 printk(KERN_ERR 617 printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
620 619
621 swim_track00(base); 620 swim_track00(base);
622 621
622 fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
623 HD_MEDIA : DD_MEDIA;
624 fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
623 get_floppy_geometry(fs, 0, &g); 625 get_floppy_geometry(fs, 0, &g);
624 fs->total_secs = g->size; 626 fs->total_secs = g->size;
625 fs->secpercyl = g->head * g->sect; 627 fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
646 648
647 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); 649 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
648 udelay(10); 650 udelay(10);
649 swim_drive(base, INTERNAL_DRIVE); 651 swim_drive(base, fs->location);
650 swim_motor(base, ON); 652 swim_motor(base, ON);
651 swim_action(base, SETMFM); 653 swim_action(base, SETMFM);
652 if (fs->ejected) 654 if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
656 goto out; 658 goto out;
657 } 659 }
658 660
661 set_capacity(fs->disk, fs->total_secs);
662
659 if (mode & FMODE_NDELAY) 663 if (mode & FMODE_NDELAY)
660 return 0; 664 return 0;
661 665
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
727 if (copy_to_user((void __user *) param, (void *) &floppy_type, 731 if (copy_to_user((void __user *) param, (void *) &floppy_type,
728 sizeof(struct floppy_struct))) 732 sizeof(struct floppy_struct)))
729 return -EFAULT; 733 return -EFAULT;
730 break; 734 return 0;
731
732 default:
733 printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
734 cmd);
735 return -ENOSYS;
736 } 735 }
737 return 0; 736 return -ENOTTY;
738} 737}
739 738
740static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) 739static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
795 struct swim_priv *swd = data; 794 struct swim_priv *swd = data;
796 int drive = (*part & 3); 795 int drive = (*part & 3);
797 796
798 if (drive > swd->floppy_count) 797 if (drive >= swd->floppy_count)
799 return NULL; 798 return NULL;
800 799
801 *part = 0; 800 *part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
813 812
814 swim_motor(base, OFF); 813 swim_motor(base, OFF);
815 814
816 if (swim_readbit(base, SINGLE_SIDED)) 815 fs->type = HD_MEDIA;
817 fs->head_number = 1; 816 fs->head_number = 2;
818 else 817
819 fs->head_number = 2;
820 fs->ref_count = 0; 818 fs->ref_count = 0;
821 fs->ejected = 1; 819 fs->ejected = 1;
822 820
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
834 /* scan floppy drives */ 832 /* scan floppy drives */
835 833
836 swim_drive(base, INTERNAL_DRIVE); 834 swim_drive(base, INTERNAL_DRIVE);
837 if (swim_readbit(base, DRIVE_PRESENT)) 835 if (swim_readbit(base, DRIVE_PRESENT) &&
836 !swim_readbit(base, ONEMEG_DRIVE))
838 swim_add_floppy(swd, INTERNAL_DRIVE); 837 swim_add_floppy(swd, INTERNAL_DRIVE);
839 swim_drive(base, EXTERNAL_DRIVE); 838 swim_drive(base, EXTERNAL_DRIVE);
840 if (swim_readbit(base, DRIVE_PRESENT)) 839 if (swim_readbit(base, DRIVE_PRESENT) &&
840 !swim_readbit(base, ONEMEG_DRIVE))
841 swim_add_floppy(swd, EXTERNAL_DRIVE); 841 swim_add_floppy(swd, EXTERNAL_DRIVE);
842 842
843 /* register floppy drives */ 843 /* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
861 &swd->lock); 861 &swd->lock);
862 if (!swd->unit[drive].disk->queue) { 862 if (!swd->unit[drive].disk->queue) {
863 err = -ENOMEM; 863 err = -ENOMEM;
864 put_disk(swd->unit[drive].disk);
865 goto exit_put_disks; 864 goto exit_put_disks;
866 } 865 }
867 blk_queue_bounce_limit(swd->unit[drive].disk->queue, 866 blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
911 goto out; 910 goto out;
912 } 911 }
913 912
914 swim_base = ioremap(res->start, resource_size(res)); 913 swim_base = (struct swim __iomem *)res->start;
915 if (!swim_base) { 914 if (!swim_base) {
916 ret = -ENOMEM; 915 ret = -ENOMEM;
917 goto out_release_io; 916 goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
923 if (!get_swim_mode(swim_base)) { 922 if (!get_swim_mode(swim_base)) {
924 printk(KERN_INFO "SWIM device not found !\n"); 923 printk(KERN_INFO "SWIM device not found !\n");
925 ret = -ENODEV; 924 ret = -ENODEV;
926 goto out_iounmap; 925 goto out_release_io;
927 } 926 }
928 927
929 /* set platform driver data */ 928 /* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
931 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); 930 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
932 if (!swd) { 931 if (!swd) {
933 ret = -ENOMEM; 932 ret = -ENOMEM;
934 goto out_iounmap; 933 goto out_release_io;
935 } 934 }
936 platform_set_drvdata(dev, swd); 935 platform_set_drvdata(dev, swd);
937 936
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
945 944
946out_kfree: 945out_kfree:
947 kfree(swd); 946 kfree(swd);
948out_iounmap:
949 iounmap(swim_base);
950out_release_io: 947out_release_io:
951 release_mem_region(res->start, resource_size(res)); 948 release_mem_region(res->start, resource_size(res));
952out: 949out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
974 for (drive = 0; drive < swd->floppy_count; drive++) 971 for (drive = 0; drive < swd->floppy_count; drive++)
975 floppy_eject(&swd->unit[drive]); 972 floppy_eject(&swd->unit[drive]);
976 973
977 iounmap(swd->base);
978
979 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 974 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
980 if (res) 975 if (res)
981 release_mem_region(res->start, resource_size(res)); 976 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
148#define MOTOR_ON 2 148#define MOTOR_ON 2
149#define RELAX 3 /* also eject in progress */ 149#define RELAX 3 /* also eject in progress */
150#define READ_DATA_0 4 150#define READ_DATA_0 4
151#define TWOMEG_DRIVE 5 151#define ONEMEG_DRIVE 5
152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ 152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
153#define DRIVE_PRESENT 7 153#define DRIVE_PRESENT 7
154#define DISK_IN 8 154#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
156#define TRACK_ZERO 10 156#define TRACK_ZERO 10
157#define TACHO 11 157#define TACHO 11
158#define READ_DATA_1 12 158#define READ_DATA_1 12
159#define MFM_MODE 13 159#define GCR_MODE 13
160#define SEEK_COMPLETE 14 160#define SEEK_COMPLETE 14
161#define ONEMEG_MEDIA 15 161#define TWOMEG_MEDIA 15
162 162
163/* Definitions of values used in writing and formatting */ 163/* Definitions of values used in writing and formatting */
164#define DATA_ESCAPE 0x99 164#define DATA_ESCAPE 0x99
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..6dc177bf4c42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -33,6 +33,7 @@ config HISILICON_LPC
33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 33 bool "Support for ISA I/O space on HiSilicon Hip06/7"
34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
35 select INDIRECT_PIO 35 select INDIRECT_PIO
36 select MFD_CORE if ACPI
36 help 37 help
37 Driver to enable I/O access to devices attached to the Low Pin 38 Driver to enable I/O access to devices attached to the Low Pin
38 Count bus on the HiSilicon Hip06/7 SoC. 39 Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2372 return media_changed(cdi, 1); 2372 return media_changed(cdi, 1);
2373 2373
2374 if ((unsigned int)arg >= cdi->capacity) 2374 if (arg >= cdi->capacity)
2375 return -EINVAL; 2375 return -EINVAL;
2376 2376
2377 info = kmalloc(sizeof(*info), GFP_KERNEL); 2377 info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e027e7fa1472..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
261#include <linux/ptrace.h> 261#include <linux/ptrace.h>
262#include <linux/workqueue.h> 262#include <linux/workqueue.h>
263#include <linux/irq.h> 263#include <linux/irq.h>
264#include <linux/ratelimit.h>
264#include <linux/syscalls.h> 265#include <linux/syscalls.h>
265#include <linux/completion.h> 266#include <linux/completion.h>
266#include <linux/uuid.h> 267#include <linux/uuid.h>
@@ -427,8 +428,9 @@ struct crng_state primary_crng = {
427 * its value (from 0->1->2). 428 * its value (from 0->1->2).
428 */ 429 */
429static int crng_init = 0; 430static int crng_init = 0;
430#define crng_ready() (likely(crng_init > 0)) 431#define crng_ready() (likely(crng_init > 1))
431static int crng_init_cnt = 0; 432static int crng_init_cnt = 0;
433static unsigned long crng_global_init_time = 0;
432#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) 434#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
433static void _extract_crng(struct crng_state *crng, 435static void _extract_crng(struct crng_state *crng,
434 __u32 out[CHACHA20_BLOCK_WORDS]); 436 __u32 out[CHACHA20_BLOCK_WORDS]);
@@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
437static void process_random_ready_list(void); 439static void process_random_ready_list(void);
438static void _get_random_bytes(void *buf, int nbytes); 440static void _get_random_bytes(void *buf, int nbytes);
439 441
442static struct ratelimit_state unseeded_warning =
443 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
444static struct ratelimit_state urandom_warning =
445 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
446
447static int ratelimit_disable __read_mostly;
448
449module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
450MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
451
440/********************************************************************** 452/**********************************************************************
441 * 453 *
442 * OS independent entropy store. Here are the functions which handle 454 * OS independent entropy store. Here are the functions which handle
@@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng)
787 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 799 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
788} 800}
789 801
802#ifdef CONFIG_NUMA
803static void do_numa_crng_init(struct work_struct *work)
804{
805 int i;
806 struct crng_state *crng;
807 struct crng_state **pool;
808
809 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
810 for_each_online_node(i) {
811 crng = kmalloc_node(sizeof(struct crng_state),
812 GFP_KERNEL | __GFP_NOFAIL, i);
813 spin_lock_init(&crng->lock);
814 crng_initialize(crng);
815 pool[i] = crng;
816 }
817 mb();
818 if (cmpxchg(&crng_node_pool, NULL, pool)) {
819 for_each_node(i)
820 kfree(pool[i]);
821 kfree(pool);
822 }
823}
824
825static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
826
827static void numa_crng_init(void)
828{
829 schedule_work(&numa_crng_init_work);
830}
831#else
832static void numa_crng_init(void) {}
833#endif
834
835/*
836 * crng_fast_load() can be called by code in the interrupt service
837 * path. So we can't afford to dilly-dally.
838 */
790static int crng_fast_load(const char *cp, size_t len) 839static int crng_fast_load(const char *cp, size_t len)
791{ 840{
792 unsigned long flags; 841 unsigned long flags;
@@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len)
794 843
795 if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 844 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
796 return 0; 845 return 0;
797 if (crng_ready()) { 846 if (crng_init != 0) {
798 spin_unlock_irqrestore(&primary_crng.lock, flags); 847 spin_unlock_irqrestore(&primary_crng.lock, flags);
799 return 0; 848 return 0;
800 } 849 }
@@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len)
813 return 1; 862 return 1;
814} 863}
815 864
865/*
866 * crng_slow_load() is called by add_device_randomness, which has two
867 * attributes. (1) We can't trust the buffer passed to it is
868 * guaranteed to be unpredictable (so it might not have any entropy at
869 * all), and (2) it doesn't have the performance constraints of
870 * crng_fast_load().
871 *
872 * So we do something more comprehensive which is guaranteed to touch
873 * all of the primary_crng's state, and which uses a LFSR with a
874 * period of 255 as part of the mixing algorithm. Finally, we do
875 * *not* advance crng_init_cnt since buffer we may get may be something
876 * like a fixed DMI table (for example), which might very well be
877 * unique to the machine, but is otherwise unvarying.
878 */
879static int crng_slow_load(const char *cp, size_t len)
880{
881 unsigned long flags;
882 static unsigned char lfsr = 1;
883 unsigned char tmp;
884 unsigned i, max = CHACHA20_KEY_SIZE;
885 const char * src_buf = cp;
886 char * dest_buf = (char *) &primary_crng.state[4];
887
888 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
889 return 0;
890 if (crng_init != 0) {
891 spin_unlock_irqrestore(&primary_crng.lock, flags);
892 return 0;
893 }
894 if (len > max)
895 max = len;
896
897 for (i = 0; i < max ; i++) {
898 tmp = lfsr;
899 lfsr >>= 1;
900 if (tmp & 1)
901 lfsr ^= 0xE1;
902 tmp = dest_buf[i % CHACHA20_KEY_SIZE];
903 dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
904 lfsr += (tmp << 3) | (tmp >> 5);
905 }
906 spin_unlock_irqrestore(&primary_crng.lock, flags);
907 return 1;
908}
909
816static void crng_reseed(struct crng_state *crng, struct entropy_store *r) 910static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
817{ 911{
818 unsigned long flags; 912 unsigned long flags;
@@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
831 _crng_backtrack_protect(&primary_crng, buf.block, 925 _crng_backtrack_protect(&primary_crng, buf.block,
832 CHACHA20_KEY_SIZE); 926 CHACHA20_KEY_SIZE);
833 } 927 }
834 spin_lock_irqsave(&primary_crng.lock, flags); 928 spin_lock_irqsave(&crng->lock, flags);
835 for (i = 0; i < 8; i++) { 929 for (i = 0; i < 8; i++) {
836 unsigned long rv; 930 unsigned long rv;
837 if (!arch_get_random_seed_long(&rv) && 931 if (!arch_get_random_seed_long(&rv) &&
@@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
841 } 935 }
842 memzero_explicit(&buf, sizeof(buf)); 936 memzero_explicit(&buf, sizeof(buf));
843 crng->init_time = jiffies; 937 crng->init_time = jiffies;
844 spin_unlock_irqrestore(&primary_crng.lock, flags); 938 spin_unlock_irqrestore(&crng->lock, flags);
845 if (crng == &primary_crng && crng_init < 2) { 939 if (crng == &primary_crng && crng_init < 2) {
846 invalidate_batched_entropy(); 940 invalidate_batched_entropy();
941 numa_crng_init();
847 crng_init = 2; 942 crng_init = 2;
848 process_random_ready_list(); 943 process_random_ready_list();
849 wake_up_interruptible(&crng_init_wait); 944 wake_up_interruptible(&crng_init_wait);
850 pr_notice("random: crng init done\n"); 945 pr_notice("random: crng init done\n");
946 if (unseeded_warning.missed) {
947 pr_notice("random: %d get_random_xx warning(s) missed "
948 "due to ratelimiting\n",
949 unseeded_warning.missed);
950 unseeded_warning.missed = 0;
951 }
952 if (urandom_warning.missed) {
953 pr_notice("random: %d urandom warning(s) missed "
954 "due to ratelimiting\n",
955 urandom_warning.missed);
956 urandom_warning.missed = 0;
957 }
851 } 958 }
852} 959}
853 960
@@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng,
856{ 963{
857 unsigned long v, flags; 964 unsigned long v, flags;
858 965
859 if (crng_init > 1 && 966 if (crng_ready() &&
860 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) 967 (time_after(crng_global_init_time, crng->init_time) ||
968 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
861 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); 969 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
862 spin_lock_irqsave(&crng->lock, flags); 970 spin_lock_irqsave(&crng->lock, flags);
863 if (arch_get_random_long(&v)) 971 if (arch_get_random_long(&v))
@@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size)
981 unsigned long time = random_get_entropy() ^ jiffies; 1089 unsigned long time = random_get_entropy() ^ jiffies;
982 unsigned long flags; 1090 unsigned long flags;
983 1091
984 if (!crng_ready()) { 1092 if (!crng_ready() && size)
985 crng_fast_load(buf, size); 1093 crng_slow_load(buf, size);
986 return;
987 }
988 1094
989 trace_add_device_randomness(size, _RET_IP_); 1095 trace_add_device_randomness(size, _RET_IP_);
990 spin_lock_irqsave(&input_pool.lock, flags); 1096 spin_lock_irqsave(&input_pool.lock, flags);
@@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
1139 fast_mix(fast_pool); 1245 fast_mix(fast_pool);
1140 add_interrupt_bench(cycles); 1246 add_interrupt_bench(cycles);
1141 1247
1142 if (!crng_ready()) { 1248 if (unlikely(crng_init == 0)) {
1143 if ((fast_pool->count >= 64) && 1249 if ((fast_pool->count >= 64) &&
1144 crng_fast_load((char *) fast_pool->pool, 1250 crng_fast_load((char *) fast_pool->pool,
1145 sizeof(fast_pool->pool))) { 1251 sizeof(fast_pool->pool))) {
@@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1489#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1595#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1490 print_once = true; 1596 print_once = true;
1491#endif 1597#endif
1492 pr_notice("random: %s called from %pS with crng_init=%d\n", 1598 if (__ratelimit(&unseeded_warning))
1493 func_name, caller, crng_init); 1599 pr_notice("random: %s called from %pS with crng_init=%d\n",
1600 func_name, caller, crng_init);
1494} 1601}
1495 1602
1496/* 1603/*
@@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r)
1680 */ 1787 */
1681static int rand_initialize(void) 1788static int rand_initialize(void)
1682{ 1789{
1683#ifdef CONFIG_NUMA
1684 int i;
1685 struct crng_state *crng;
1686 struct crng_state **pool;
1687#endif
1688
1689 init_std_data(&input_pool); 1790 init_std_data(&input_pool);
1690 init_std_data(&blocking_pool); 1791 init_std_data(&blocking_pool);
1691 crng_initialize(&primary_crng); 1792 crng_initialize(&primary_crng);
1692 1793 crng_global_init_time = jiffies;
1693#ifdef CONFIG_NUMA 1794 if (ratelimit_disable) {
1694 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); 1795 urandom_warning.interval = 0;
1695 for_each_online_node(i) { 1796 unseeded_warning.interval = 0;
1696 crng = kmalloc_node(sizeof(struct crng_state),
1697 GFP_KERNEL | __GFP_NOFAIL, i);
1698 spin_lock_init(&crng->lock);
1699 crng_initialize(crng);
1700 pool[i] = crng;
1701 } 1797 }
1702 mb();
1703 crng_node_pool = pool;
1704#endif
1705 return 0; 1798 return 0;
1706} 1799}
1707early_initcall(rand_initialize); 1800early_initcall(rand_initialize);
@@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1769 1862
1770 if (!crng_ready() && maxwarn > 0) { 1863 if (!crng_ready() && maxwarn > 0) {
1771 maxwarn--; 1864 maxwarn--;
1772 printk(KERN_NOTICE "random: %s: uninitialized urandom read " 1865 if (__ratelimit(&urandom_warning))
1773 "(%zd bytes read)\n", 1866 printk(KERN_NOTICE "random: %s: uninitialized "
1774 current->comm, nbytes); 1867 "urandom read (%zd bytes read)\n",
1868 current->comm, nbytes);
1775 spin_lock_irqsave(&primary_crng.lock, flags); 1869 spin_lock_irqsave(&primary_crng.lock, flags);
1776 crng_init_cnt = 0; 1870 crng_init_cnt = 0;
1777 spin_unlock_irqrestore(&primary_crng.lock, flags); 1871 spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1875 input_pool.entropy_count = 0; 1969 input_pool.entropy_count = 0;
1876 blocking_pool.entropy_count = 0; 1970 blocking_pool.entropy_count = 0;
1877 return 0; 1971 return 0;
1972 case RNDRESEEDCRNG:
1973 if (!capable(CAP_SYS_ADMIN))
1974 return -EPERM;
1975 if (crng_init < 2)
1976 return -ENODATA;
1977 crng_reseed(&primary_crng, NULL);
1978 crng_global_init_time = jiffies - 1;
1979 return 0;
1878 default: 1980 default:
1879 return -EINVAL; 1981 return -EINVAL;
1880 } 1982 }
@@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2212{ 2314{
2213 struct entropy_store *poolp = &input_pool; 2315 struct entropy_store *poolp = &input_pool;
2214 2316
2215 if (!crng_ready()) { 2317 if (unlikely(crng_init == 0)) {
2216 crng_fast_load(buffer, count); 2318 crng_fast_load(buffer, count);
2217 return; 2319 return;
2218 } 2320 }
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 468f06134012..21085515814f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
422 } 422 }
423} 423}
424 424
425static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, 425static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
426 int pages) 426 int pages)
427{ 427{
428 struct port_buffer *buf; 428 struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
445 return buf; 445 return buf;
446 } 446 }
447 447
448 if (is_rproc_serial(vq->vdev)) { 448 if (is_rproc_serial(vdev)) {
449 /* 449 /*
450 * Allocate DMA memory from ancestor. When a virtio 450 * Allocate DMA memory from ancestor. When a virtio
451 * device is created by remoteproc, the DMA memory is 451 * device is created by remoteproc, the DMA memory is
452 * associated with the grandparent device: 452 * associated with the grandparent device:
453 * vdev => rproc => platform-dev. 453 * vdev => rproc => platform-dev.
454 */ 454 */
455 if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) 455 if (!vdev->dev.parent || !vdev->dev.parent->parent)
456 goto free_buf; 456 goto free_buf;
457 buf->dev = vq->vdev->dev.parent->parent; 457 buf->dev = vdev->dev.parent->parent;
458 458
459 /* Increase device refcnt to avoid freeing it */ 459 /* Increase device refcnt to avoid freeing it */
460 get_device(buf->dev); 460 get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
838 838
839 count = min((size_t)(32 * 1024), count); 839 count = min((size_t)(32 * 1024), count);
840 840
841 buf = alloc_buf(port->out_vq, count, 0); 841 buf = alloc_buf(port->portdev->vdev, count, 0);
842 if (!buf) 842 if (!buf)
843 return -ENOMEM; 843 return -ENOMEM;
844 844
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
957 if (ret < 0) 957 if (ret < 0)
958 goto error_out; 958 goto error_out;
959 959
960 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 960 buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
961 if (!buf) { 961 if (!buf) {
962 ret = -ENOMEM; 962 ret = -ENOMEM;
963 goto error_out; 963 goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1374 1374
1375 nr_added_bufs = 0; 1375 nr_added_bufs = 0;
1376 do { 1376 do {
1377 buf = alloc_buf(vq, PAGE_SIZE, 0); 1377 buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1378 if (!buf) 1378 if (!buf)
1379 break; 1379 break;
1380 1380
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1402{ 1402{
1403 char debugfs_name[16]; 1403 char debugfs_name[16];
1404 struct port *port; 1404 struct port *port;
1405 struct port_buffer *buf;
1406 dev_t devt; 1405 dev_t devt;
1407 unsigned int nr_added_bufs; 1406 unsigned int nr_added_bufs;
1408 int err; 1407 int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1513 return 0; 1512 return 0;
1514 1513
1515free_inbufs: 1514free_inbufs:
1516 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1517 free_buf(buf, true);
1518free_device: 1515free_device:
1519 device_destroy(pdrvdata.class, port->dev->devt); 1516 device_destroy(pdrvdata.class, port->dev->devt);
1520free_cdev: 1517free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
1539 1536
1540static void remove_port_data(struct port *port) 1537static void remove_port_data(struct port *port)
1541{ 1538{
1542 struct port_buffer *buf;
1543
1544 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1545 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1546 discard_port_data(port); 1541 discard_port_data(port);
1547 spin_unlock_irq(&port->inbuf_lock); 1542 spin_unlock_irq(&port->inbuf_lock);
1548 1543
1549 /* Remove buffers we queued up for the Host to send us data in. */
1550 do {
1551 spin_lock_irq(&port->inbuf_lock);
1552 buf = virtqueue_detach_unused_buf(port->in_vq);
1553 spin_unlock_irq(&port->inbuf_lock);
1554 if (buf)
1555 free_buf(buf, true);
1556 } while (buf);
1557
1558 spin_lock_irq(&port->outvq_lock); 1544 spin_lock_irq(&port->outvq_lock);
1559 reclaim_consumed_buffers(port); 1545 reclaim_consumed_buffers(port);
1560 spin_unlock_irq(&port->outvq_lock); 1546 spin_unlock_irq(&port->outvq_lock);
1561
1562 /* Free pending buffers from the out-queue. */
1563 do {
1564 spin_lock_irq(&port->outvq_lock);
1565 buf = virtqueue_detach_unused_buf(port->out_vq);
1566 spin_unlock_irq(&port->outvq_lock);
1567 if (buf)
1568 free_buf(buf, true);
1569 } while (buf);
1570} 1547}
1571 1548
1572/* 1549/*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
1791 spin_unlock(&portdev->c_ivq_lock); 1768 spin_unlock(&portdev->c_ivq_lock);
1792} 1769}
1793 1770
1771static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1772{
1773 struct port_buffer *buf;
1774 unsigned int len;
1775
1776 while ((buf = virtqueue_get_buf(vq, &len)))
1777 free_buf(buf, can_sleep);
1778}
1779
1794static void out_intr(struct virtqueue *vq) 1780static void out_intr(struct virtqueue *vq)
1795{ 1781{
1796 struct port *port; 1782 struct port *port;
1797 1783
1798 port = find_port_by_vq(vq->vdev->priv, vq); 1784 port = find_port_by_vq(vq->vdev->priv, vq);
1799 if (!port) 1785 if (!port) {
1786 flush_bufs(vq, false);
1800 return; 1787 return;
1788 }
1801 1789
1802 wake_up_interruptible(&port->waitqueue); 1790 wake_up_interruptible(&port->waitqueue);
1803} 1791}
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
1808 unsigned long flags; 1796 unsigned long flags;
1809 1797
1810 port = find_port_by_vq(vq->vdev->priv, vq); 1798 port = find_port_by_vq(vq->vdev->priv, vq);
1811 if (!port) 1799 if (!port) {
1800 flush_bufs(vq, false);
1812 return; 1801 return;
1802 }
1813 1803
1814 spin_lock_irqsave(&port->inbuf_lock, flags); 1804 spin_lock_irqsave(&port->inbuf_lock, flags);
1815 port->inbuf = get_inbuf(port); 1805 port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
1984 1974
1985static void remove_vqs(struct ports_device *portdev) 1975static void remove_vqs(struct ports_device *portdev)
1986{ 1976{
1977 struct virtqueue *vq;
1978
1979 virtio_device_for_each_vq(portdev->vdev, vq) {
1980 struct port_buffer *buf;
1981
1982 flush_bufs(vq, true);
1983 while ((buf = virtqueue_detach_unused_buf(vq)))
1984 free_buf(buf, true);
1985 }
1987 portdev->vdev->config->del_vqs(portdev->vdev); 1986 portdev->vdev->config->del_vqs(portdev->vdev);
1988 kfree(portdev->in_vqs); 1987 kfree(portdev->in_vqs);
1989 kfree(portdev->out_vqs); 1988 kfree(portdev->out_vqs);
1990} 1989}
1991 1990
1992static void remove_controlq_data(struct ports_device *portdev) 1991static void virtcons_remove(struct virtio_device *vdev)
1993{ 1992{
1994 struct port_buffer *buf; 1993 struct ports_device *portdev;
1995 unsigned int len; 1994 struct port *port, *port2;
1996 1995
1997 if (!use_multiport(portdev)) 1996 portdev = vdev->priv;
1998 return;
1999 1997
2000 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1998 spin_lock_irq(&pdrvdata_lock);
2001 free_buf(buf, true); 1999 list_del(&portdev->list);
2000 spin_unlock_irq(&pdrvdata_lock);
2002 2001
2003 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 2002 /* Disable interrupts for vqs */
2004 free_buf(buf, true); 2003 vdev->config->reset(vdev);
2004 /* Finish up work that's lined up */
2005 if (use_multiport(portdev))
2006 cancel_work_sync(&portdev->control_work);
2007 else
2008 cancel_work_sync(&portdev->config_work);
2009
2010 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2011 unplug_port(port);
2012
2013 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2014
2015 /*
2016 * When yanking out a device, we immediately lose the
2017 * (device-side) queues. So there's no point in keeping the
2018 * guest side around till we drop our final reference. This
2019 * also means that any ports which are in an open state will
2020 * have to just stop using the port, as the vqs are going
2021 * away.
2022 */
2023 remove_vqs(portdev);
2024 kfree(portdev);
2005} 2025}
2006 2026
2007/* 2027/*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
2070 2090
2071 spin_lock_init(&portdev->ports_lock); 2091 spin_lock_init(&portdev->ports_lock);
2072 INIT_LIST_HEAD(&portdev->ports); 2092 INIT_LIST_HEAD(&portdev->ports);
2093 INIT_LIST_HEAD(&portdev->list);
2073 2094
2074 virtio_device_ready(portdev->vdev); 2095 virtio_device_ready(portdev->vdev);
2075 2096
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
2087 if (!nr_added_bufs) { 2108 if (!nr_added_bufs) {
2088 dev_err(&vdev->dev, 2109 dev_err(&vdev->dev,
2089 "Error allocating buffers for control queue\n"); 2110 "Error allocating buffers for control queue\n");
2090 err = -ENOMEM; 2111 /*
2091 goto free_vqs; 2112 * The host might want to notify mgmt sw about device
2113 * add failure.
2114 */
2115 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2116 VIRTIO_CONSOLE_DEVICE_READY, 0);
2117 /* Device was functional: we need full cleanup. */
2118 virtcons_remove(vdev);
2119 return -ENOMEM;
2092 } 2120 }
2093 } else { 2121 } else {
2094 /* 2122 /*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
2119 2147
2120 return 0; 2148 return 0;
2121 2149
2122free_vqs:
2123 /* The host might want to notify mgmt sw about device add failure */
2124 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2125 VIRTIO_CONSOLE_DEVICE_READY, 0);
2126 remove_vqs(portdev);
2127free_chrdev: 2150free_chrdev:
2128 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 2151 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2129free: 2152free:
@@ -2132,43 +2155,6 @@ fail:
2132 return err; 2155 return err;
2133} 2156}
2134 2157
2135static void virtcons_remove(struct virtio_device *vdev)
2136{
2137 struct ports_device *portdev;
2138 struct port *port, *port2;
2139
2140 portdev = vdev->priv;
2141
2142 spin_lock_irq(&pdrvdata_lock);
2143 list_del(&portdev->list);
2144 spin_unlock_irq(&pdrvdata_lock);
2145
2146 /* Disable interrupts for vqs */
2147 vdev->config->reset(vdev);
2148 /* Finish up work that's lined up */
2149 if (use_multiport(portdev))
2150 cancel_work_sync(&portdev->control_work);
2151 else
2152 cancel_work_sync(&portdev->config_work);
2153
2154 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2155 unplug_port(port);
2156
2157 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2158
2159 /*
2160 * When yanking out a device, we immediately lose the
2161 * (device-side) queues. So there's no point in keeping the
2162 * guest side around till we drop our final reference. This
2163 * also means that any ports which are in an open state will
2164 * have to just stop using the port, as the vqs are going
2165 * away.
2166 */
2167 remove_controlq_data(portdev);
2168 remove_vqs(portdev);
2169 kfree(portdev);
2170}
2171
2172static struct virtio_device_id id_table[] = { 2158static struct virtio_device_id id_table[] = {
2173 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 2159 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2174 { 0 }, 2160 { 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
2209 */ 2195 */
2210 if (use_multiport(portdev)) 2196 if (use_multiport(portdev))
2211 virtqueue_disable_cb(portdev->c_ivq); 2197 virtqueue_disable_cb(portdev->c_ivq);
2212 remove_controlq_data(portdev);
2213 2198
2214 list_for_each_entry(port, &portdev->ports, list) { 2199 list_for_each_entry(port, &portdev->ports, list) {
2215 virtqueue_disable_cb(port->in_vq); 2200 virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9ee2888275c1..8e8a09755d10 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -133,6 +133,14 @@ config VT8500_TIMER
133 help 133 help
134 Enables support for the VT8500 driver. 134 Enables support for the VT8500 driver.
135 135
136config NPCM7XX_TIMER
137 bool "NPCM7xx timer driver" if COMPILE_TEST
138 depends on HAS_IOMEM
139 select CLKSRC_MMIO
140 help
141 Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
142 While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
143
136config CADENCE_TTC_TIMER 144config CADENCE_TTC_TIMER
137 bool "Cadence TTC timer driver" if COMPILE_TEST 145 bool "Cadence TTC timer driver" if COMPILE_TEST
138 depends on COMMON_CLK 146 depends on COMMON_CLK
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index e8e76dfef00b..00caf37e52f9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
56obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o 56obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
57obj-$(CONFIG_OWL_TIMER) += owl-timer.o 57obj-$(CONFIG_OWL_TIMER) += owl-timer.o
58obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o 58obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
59obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
59 60
60obj-$(CONFIG_ARC_TIMERS) += arc_timer.o 61obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
61obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 62obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 21bffdcb2f20..6c8318470b48 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -17,9 +17,14 @@
17#include <linux/of_irq.h> 17#include <linux/of_irq.h>
18#include <linux/sched_clock.h> 18#include <linux/sched_clock.h>
19 19
20#define TPM_PARAM 0x4
21#define TPM_PARAM_WIDTH_SHIFT 16
22#define TPM_PARAM_WIDTH_MASK (0xff << 16)
20#define TPM_SC 0x10 23#define TPM_SC 0x10
21#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) 24#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
22#define TPM_SC_CMOD_DIV_DEFAULT 0x3 25#define TPM_SC_CMOD_DIV_DEFAULT 0x3
26#define TPM_SC_CMOD_DIV_MAX 0x7
27#define TPM_SC_TOF_MASK (0x1 << 7)
23#define TPM_CNT 0x14 28#define TPM_CNT 0x14
24#define TPM_MOD 0x18 29#define TPM_MOD 0x18
25#define TPM_STATUS 0x1c 30#define TPM_STATUS 0x1c
@@ -29,8 +34,11 @@
29#define TPM_C0SC_MODE_SHIFT 2 34#define TPM_C0SC_MODE_SHIFT 2
30#define TPM_C0SC_MODE_MASK 0x3c 35#define TPM_C0SC_MODE_MASK 0x3c
31#define TPM_C0SC_MODE_SW_COMPARE 0x4 36#define TPM_C0SC_MODE_SW_COMPARE 0x4
37#define TPM_C0SC_CHF_MASK (0x1 << 7)
32#define TPM_C0V 0x24 38#define TPM_C0V 0x24
33 39
40static int counter_width;
41static int rating;
34static void __iomem *timer_base; 42static void __iomem *timer_base;
35static struct clock_event_device clockevent_tpm; 43static struct clock_event_device clockevent_tpm;
36 44
@@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate)
83 tpm_delay_timer.freq = rate; 91 tpm_delay_timer.freq = rate;
84 register_current_timer_delay(&tpm_delay_timer); 92 register_current_timer_delay(&tpm_delay_timer);
85 93
86 sched_clock_register(tpm_read_sched_clock, 32, rate); 94 sched_clock_register(tpm_read_sched_clock, counter_width, rate);
87 95
88 return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", 96 return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
89 rate, 200, 32, clocksource_mmio_readl_up); 97 rate, rating, counter_width,
98 clocksource_mmio_readl_up);
90} 99}
91 100
92static int tpm_set_next_event(unsigned long delta, 101static int tpm_set_next_event(unsigned long delta,
@@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
105 * of writing CNT registers which may cause the min_delta event got 114 * of writing CNT registers which may cause the min_delta event got
106 * missed, so we need add a ETIME check here in case it happened. 115 * missed, so we need add a ETIME check here in case it happened.
107 */ 116 */
108 return (int)((next - now) <= 0) ? -ETIME : 0; 117 return (int)(next - now) <= 0 ? -ETIME : 0;
109} 118}
110 119
111static int tpm_set_state_oneshot(struct clock_event_device *evt) 120static int tpm_set_state_oneshot(struct clock_event_device *evt)
@@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = {
139 .set_state_oneshot = tpm_set_state_oneshot, 148 .set_state_oneshot = tpm_set_state_oneshot,
140 .set_next_event = tpm_set_next_event, 149 .set_next_event = tpm_set_next_event,
141 .set_state_shutdown = tpm_set_state_shutdown, 150 .set_state_shutdown = tpm_set_state_shutdown,
142 .rating = 200,
143}; 151};
144 152
145static int __init tpm_clockevent_init(unsigned long rate, int irq) 153static int __init tpm_clockevent_init(unsigned long rate, int irq)
@@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq)
149 ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 157 ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
150 "i.MX7ULP TPM Timer", &clockevent_tpm); 158 "i.MX7ULP TPM Timer", &clockevent_tpm);
151 159
160 clockevent_tpm.rating = rating;
152 clockevent_tpm.cpumask = cpumask_of(0); 161 clockevent_tpm.cpumask = cpumask_of(0);
153 clockevent_tpm.irq = irq; 162 clockevent_tpm.irq = irq;
154 clockevents_config_and_register(&clockevent_tpm, 163 clockevents_config_and_register(&clockevent_tpm, rate, 300,
155 rate, 300, 0xfffffffe); 164 GENMASK(counter_width - 1, 1));
156 165
157 return ret; 166 return ret;
158} 167}
@@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np)
179 ipg = of_clk_get_by_name(np, "ipg"); 188 ipg = of_clk_get_by_name(np, "ipg");
180 per = of_clk_get_by_name(np, "per"); 189 per = of_clk_get_by_name(np, "per");
181 if (IS_ERR(ipg) || IS_ERR(per)) { 190 if (IS_ERR(ipg) || IS_ERR(per)) {
182 pr_err("tpm: failed to get igp or per clk\n"); 191 pr_err("tpm: failed to get ipg or per clk\n");
183 ret = -ENODEV; 192 ret = -ENODEV;
184 goto err_clk_get; 193 goto err_clk_get;
185 } 194 }
@@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np)
197 goto err_per_clk_enable; 206 goto err_per_clk_enable;
198 } 207 }
199 208
209 counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
210 >> TPM_PARAM_WIDTH_SHIFT;
211 /* use rating 200 for 32-bit counter and 150 for 16-bit counter */
212 rating = counter_width == 0x20 ? 200 : 150;
213
200 /* 214 /*
201 * Initialize tpm module to a known state 215 * Initialize tpm module to a known state
202 * 1) Counter disabled 216 * 1) Counter disabled
@@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np)
205 * 4) Channel0 disabled 219 * 4) Channel0 disabled
206 * 5) DMA transfers disabled 220 * 5) DMA transfers disabled
207 */ 221 */
222 /* make sure counter is disabled */
208 writel(0, timer_base + TPM_SC); 223 writel(0, timer_base + TPM_SC);
224 /* TOF is W1C */
225 writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
209 writel(0, timer_base + TPM_CNT); 226 writel(0, timer_base + TPM_CNT);
210 writel(0, timer_base + TPM_C0SC); 227 /* CHF is W1C */
228 writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
211 229
212 /* increase per cnt, div 8 by default */ 230 /*
213 writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, 231 * increase per cnt,
232 * div 8 for 32-bit counter and div 128 for 16-bit counter
233 */
234 writel(TPM_SC_CMOD_INC_PER_CNT |
235 (counter_width == 0x20 ?
236 TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
214 timer_base + TPM_SC); 237 timer_base + TPM_SC);
215 238
216 /* set MOD register to maximum for free running mode */ 239 /* set MOD register to maximum for free running mode */
217 writel(0xffffffff, timer_base + TPM_MOD); 240 writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
218 241
219 rate = clk_get_rate(per) >> 3; 242 rate = clk_get_rate(per) >> 3;
220 ret = tpm_clocksource_init(rate); 243 ret = tpm_clocksource_init(rate);
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
new file mode 100644
index 000000000000..7a9bb5532d99
--- /dev/null
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -0,0 +1,215 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com
4 * All rights reserved.
5 *
6 * Copyright 2017 Google, Inc.
7 */
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/clockchips.h>
17#include <linux/of_irq.h>
18#include <linux/of_address.h>
19#include "timer-of.h"
20
21/* Timers registers */
22#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
23#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
24#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
25#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
26#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
27#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
28
29/* Timers control */
30#define NPCM7XX_Tx_RESETINT 0x1f
31#define NPCM7XX_Tx_PERIOD BIT(27)
32#define NPCM7XX_Tx_INTEN BIT(29)
33#define NPCM7XX_Tx_COUNTEN BIT(30)
34#define NPCM7XX_Tx_ONESHOT 0x0
35#define NPCM7XX_Tx_OPER GENMASK(3, 27)
36#define NPCM7XX_Tx_MIN_PRESCALE 0x1
37#define NPCM7XX_Tx_TDR_MASK_BITS 24
38#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
39#define NPCM7XX_T0_CLR_INT 0x1
40#define NPCM7XX_Tx_CLR_CSR 0x0
41
42/* Timers operating mode */
43#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
44 NPCM7XX_Tx_INTEN | \
45 NPCM7XX_Tx_MIN_PRESCALE)
46
47#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
48 NPCM7XX_Tx_INTEN | \
49 NPCM7XX_Tx_MIN_PRESCALE)
50
51#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
52 NPCM7XX_Tx_MIN_PRESCALE)
53
54#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
55
56static int npcm7xx_timer_resume(struct clock_event_device *evt)
57{
58 struct timer_of *to = to_timer_of(evt);
59 u32 val;
60
61 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
62 val |= NPCM7XX_Tx_COUNTEN;
63 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
64
65 return 0;
66}
67
68static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
69{
70 struct timer_of *to = to_timer_of(evt);
71 u32 val;
72
73 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
74 val &= ~NPCM7XX_Tx_COUNTEN;
75 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
76
77 return 0;
78}
79
80static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
81{
82 struct timer_of *to = to_timer_of(evt);
83 u32 val;
84
85 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
86 val &= ~NPCM7XX_Tx_OPER;
87
88 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
89 val |= NPCM7XX_START_ONESHOT_Tx;
90 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
91
92 return 0;
93}
94
95static int npcm7xx_timer_periodic(struct clock_event_device *evt)
96{
97 struct timer_of *to = to_timer_of(evt);
98 u32 val;
99
100 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
101 val &= ~NPCM7XX_Tx_OPER;
102
103 writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
104 val |= NPCM7XX_START_PERIODIC_Tx;
105
106 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
107
108 return 0;
109}
110
111static int npcm7xx_clockevent_set_next_event(unsigned long evt,
112 struct clock_event_device *clk)
113{
114 struct timer_of *to = to_timer_of(clk);
115 u32 val;
116
117 writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
118 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
119 val |= NPCM7XX_START_Tx;
120 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
121
122 return 0;
123}
124
125static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
126{
127 struct clock_event_device *evt = (struct clock_event_device *)dev_id;
128 struct timer_of *to = to_timer_of(evt);
129
130 writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
131
132 evt->event_handler(evt);
133
134 return IRQ_HANDLED;
135}
136
137static struct timer_of npcm7xx_to = {
138 .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
139
140 .clkevt = {
141 .name = "npcm7xx-timer0",
142 .features = CLOCK_EVT_FEAT_PERIODIC |
143 CLOCK_EVT_FEAT_ONESHOT,
144 .set_next_event = npcm7xx_clockevent_set_next_event,
145 .set_state_shutdown = npcm7xx_timer_shutdown,
146 .set_state_periodic = npcm7xx_timer_periodic,
147 .set_state_oneshot = npcm7xx_timer_oneshot,
148 .tick_resume = npcm7xx_timer_resume,
149 .rating = 300,
150 },
151
152 .of_irq = {
153 .handler = npcm7xx_timer0_interrupt,
154 .flags = IRQF_TIMER | IRQF_IRQPOLL,
155 },
156};
157
158static void __init npcm7xx_clockevents_init(void)
159{
160 writel(NPCM7XX_DEFAULT_CSR,
161 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
162
163 writel(NPCM7XX_Tx_RESETINT,
164 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
165
166 npcm7xx_to.clkevt.cpumask = cpumask_of(0);
167 clockevents_config_and_register(&npcm7xx_to.clkevt,
168 timer_of_rate(&npcm7xx_to),
169 0x1, NPCM7XX_Tx_MAX_CNT);
170}
171
172static void __init npcm7xx_clocksource_init(void)
173{
174 u32 val;
175
176 writel(NPCM7XX_DEFAULT_CSR,
177 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
178 writel(NPCM7XX_Tx_MAX_CNT,
179 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
180
181 val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
182 val |= NPCM7XX_START_Tx;
183 writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
184
185 clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
186 NPCM7XX_REG_TDR1,
187 "npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
188 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
189 clocksource_mmio_readl_down);
190}
191
192static int __init npcm7xx_timer_init(struct device_node *np)
193{
194 int ret;
195
196 ret = timer_of_init(np, &npcm7xx_to);
197 if (ret)
198 return ret;
199
200 /* Clock input is divided by PRESCALE + 1 before it is fed */
201 /* to the counter */
202 npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
203 (NPCM7XX_Tx_MIN_PRESCALE + 1);
204
205 npcm7xx_clocksource_init();
206 npcm7xx_clockevents_init();
207
208 pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
209 timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
210
211 return 0;
212}
213
214TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
215
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..de55c7d57438 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
71 71
72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. 72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
73 73
74config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
75 bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
76 depends on ARM_BRCMSTB_AVS_CPUFREQ
77 help
78 Enabling this option turns on debug support via sysfs under
79 /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
80 write some AVS mailbox registers through sysfs entries.
81
82 If in doubt, say N.
83
84config ARM_EXYNOS5440_CPUFREQ 74config ARM_EXYNOS5440_CPUFREQ
85 tristate "SAMSUNG EXYNOS5440" 75 tristate "SAMSUNG EXYNOS5440"
86 depends on SOC_EXYNOS5440 76 depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
49#include <linux/platform_device.h> 49#include <linux/platform_device.h>
50#include <linux/semaphore.h> 50#include <linux/semaphore.h>
51 51
52#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
53#include <linux/ctype.h>
54#include <linux/debugfs.h>
55#include <linux/slab.h>
56#include <linux/uaccess.h>
57#endif
58
59/* Max number of arguments AVS calls take */ 52/* Max number of arguments AVS calls take */
60#define AVS_MAX_CMD_ARGS 4 53#define AVS_MAX_CMD_ARGS 4
61/* 54/*
@@ -182,88 +175,11 @@ struct private_data {
182 void __iomem *base; 175 void __iomem *base;
183 void __iomem *avs_intr_base; 176 void __iomem *avs_intr_base;
184 struct device *dev; 177 struct device *dev;
185#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
186 struct dentry *debugfs;
187#endif
188 struct completion done; 178 struct completion done;
189 struct semaphore sem; 179 struct semaphore sem;
190 struct pmap pmap; 180 struct pmap pmap;
191}; 181};
192 182
193#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
194
195enum debugfs_format {
196 DEBUGFS_NORMAL,
197 DEBUGFS_FLOAT,
198 DEBUGFS_REV,
199};
200
201struct debugfs_data {
202 struct debugfs_entry *entry;
203 struct private_data *priv;
204};
205
206struct debugfs_entry {
207 char *name;
208 u32 offset;
209 fmode_t mode;
210 enum debugfs_format format;
211};
212
213#define DEBUGFS_ENTRY(name, mode, format) { \
214 #name, AVS_MBOX_##name, mode, format \
215}
216
217/*
218 * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
219 */
220#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
221#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
222#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
223#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
224
225/*
226 * This table stores the name, access permissions and offset for each hardware
227 * register and is used to generate debugfs entries.
228 */
229static struct debugfs_entry debugfs_entries[] = {
230 DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
231 DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
232 DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
233 DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
234 DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
235 DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
236 DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
237 DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
238 DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
239 DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
240 DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
241 DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
242 DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
243 DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
244 DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
245 DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
246 DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
247 DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
248 DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
249 DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
250 DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
251};
252
253static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
254
255static char *__strtolower(char *s)
256{
257 char *p;
258
259 for (p = s; *p; p++)
260 *p = tolower(*p);
261
262 return s;
263}
264
265#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
266
267static void __iomem *__map_region(const char *name) 183static void __iomem *__map_region(const char *name)
268{ 184{
269 struct device_node *np; 185 struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
516 return table; 432 return table;
517} 433}
518 434
519#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
520
521#define MANT(x) (unsigned int)(abs((x)) / 1000)
522#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
523
524static int brcm_avs_debug_show(struct seq_file *s, void *data)
525{
526 struct debugfs_data *dbgfs = s->private;
527 void __iomem *base;
528 u32 val, offset;
529
530 if (!dbgfs) {
531 seq_puts(s, "No device pointer\n");
532 return 0;
533 }
534
535 base = dbgfs->priv->base;
536 offset = dbgfs->entry->offset;
537 val = readl(base + offset);
538 switch (dbgfs->entry->format) {
539 case DEBUGFS_NORMAL:
540 seq_printf(s, "%u\n", val);
541 break;
542 case DEBUGFS_FLOAT:
543 seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
544 break;
545 case DEBUGFS_REV:
546 seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
547 (val >> 16 & 0xff), (val >> 8 & 0xff),
548 val & 0xff);
549 break;
550 }
551 seq_printf(s, "0x%08x\n", val);
552
553 return 0;
554}
555
556#undef MANT
557#undef FRAC
558
559static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
560 size_t size, loff_t *ppos)
561{
562 struct seq_file *s = file->private_data;
563 struct debugfs_data *dbgfs = s->private;
564 struct private_data *priv = dbgfs->priv;
565 void __iomem *base, *avs_intr_base;
566 bool use_issue_command = false;
567 unsigned long val, offset;
568 char str[128];
569 int ret;
570 char *str_ptr = str;
571
572 if (size >= sizeof(str))
573 return -E2BIG;
574
575 memset(str, 0, sizeof(str));
576 ret = copy_from_user(str, buf, size);
577 if (ret)
578 return ret;
579
580 base = priv->base;
581 avs_intr_base = priv->avs_intr_base;
582 offset = dbgfs->entry->offset;
583 /*
584 * Special case writing to "command" entry only: if the string starts
585 * with a 'c', we use the driver's __issue_avs_command() function.
586 * Otherwise, we perform a raw write. This should allow testing of raw
587 * access as well as using the higher level function. (Raw access
588 * doesn't clear the firmware return status after issuing the command.)
589 */
590 if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
591 use_issue_command = true;
592 str_ptr++;
593 }
594 if (kstrtoul(str_ptr, 0, &val) != 0)
595 return -EINVAL;
596
597 /*
598 * Setting the P-state is a special case. We need to update the CPU
599 * frequency we report.
600 */
601 if (val == AVS_CMD_SET_PSTATE) {
602 struct cpufreq_policy *policy;
603 unsigned int pstate;
604
605 policy = cpufreq_cpu_get(smp_processor_id());
606 /* Read back the P-state we are about to set */
607 pstate = readl(base + AVS_MBOX_PARAM(0));
608 if (use_issue_command) {
609 ret = brcm_avs_target_index(policy, pstate);
610 return ret ? ret : size;
611 }
612 policy->cur = policy->freq_table[pstate].frequency;
613 }
614
615 if (use_issue_command) {
616 ret = __issue_avs_command(priv, val, false, NULL);
617 } else {
618 /* Locking here is not perfect, but is only for debug. */
619 ret = down_interruptible(&priv->sem);
620 if (ret)
621 return ret;
622
623 writel(val, base + offset);
624 /* We have to wake up the firmware to process a command. */
625 if (offset == AVS_MBOX_COMMAND)
626 writel(AVS_CPU_L2_INT_MASK,
627 avs_intr_base + AVS_CPU_L2_SET0);
628 up(&priv->sem);
629 }
630
631 return ret ? ret : size;
632}
633
634static struct debugfs_entry *__find_debugfs_entry(const char *name)
635{
636 int i;
637
638 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
639 if (strcasecmp(debugfs_entries[i].name, name) == 0)
640 return &debugfs_entries[i];
641
642 return NULL;
643}
644
645static int brcm_avs_debug_open(struct inode *inode, struct file *file)
646{
647 struct debugfs_data *data;
648 fmode_t fmode;
649 int ret;
650
651 /*
652 * seq_open(), which is called by single_open(), clears "write" access.
653 * We need write access to some files, so we preserve our access mode
654 * and restore it.
655 */
656 fmode = file->f_mode;
657 /*
658 * Check access permissions even for root. We don't want to be writing
659 * to read-only registers. Access for regular users has already been
660 * checked by the VFS layer.
661 */
662 if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
663 return -EACCES;
664
665 data = kmalloc(sizeof(*data), GFP_KERNEL);
666 if (!data)
667 return -ENOMEM;
668 /*
669 * We use the same file system operations for all our debug files. To
670 * produce specific output, we look up the file name upon opening a
671 * debugfs entry and map it to a memory offset. This offset is then used
672 * in the generic "show" function to read a specific register.
673 */
674 data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
675 data->priv = inode->i_private;
676
677 ret = single_open(file, brcm_avs_debug_show, data);
678 if (ret)
679 kfree(data);
680 file->f_mode = fmode;
681
682 return ret;
683}
684
685static int brcm_avs_debug_release(struct inode *inode, struct file *file)
686{
687 struct seq_file *seq_priv = file->private_data;
688 struct debugfs_data *data = seq_priv->private;
689
690 kfree(data);
691 return single_release(inode, file);
692}
693
694static const struct file_operations brcm_avs_debug_ops = {
695 .open = brcm_avs_debug_open,
696 .read = seq_read,
697 .write = brcm_avs_seq_write,
698 .llseek = seq_lseek,
699 .release = brcm_avs_debug_release,
700};
701
702static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
703{
704 struct private_data *priv = platform_get_drvdata(pdev);
705 struct dentry *dir;
706 int i;
707
708 if (!priv)
709 return;
710
711 dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
712 if (IS_ERR_OR_NULL(dir))
713 return;
714 priv->debugfs = dir;
715
716 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
717 /*
718 * The DEBUGFS_ENTRY macro generates uppercase strings. We
719 * convert them to lowercase before creating the debugfs
720 * entries.
721 */
722 char *entry = __strtolower(debugfs_entries[i].name);
723 fmode_t mode = debugfs_entries[i].mode;
724
725 if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
726 dir, priv, &brcm_avs_debug_ops)) {
727 priv->debugfs = NULL;
728 debugfs_remove_recursive(dir);
729 break;
730 }
731 }
732}
733
734static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
735{
736 struct private_data *priv = platform_get_drvdata(pdev);
737
738 if (priv && priv->debugfs) {
739 debugfs_remove_recursive(priv->debugfs);
740 priv->debugfs = NULL;
741 }
742}
743
744#else
745
746static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
747static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
748
749#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
750
751/* 435/*
752 * To ensure the right firmware is running we need to 436 * To ensure the right firmware is running we need to
753 * - check the MAGIC matches what we expect 437 * - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
1016 return ret; 700 return ret;
1017 701
1018 brcm_avs_driver.driver_data = pdev; 702 brcm_avs_driver.driver_data = pdev;
1019 ret = cpufreq_register_driver(&brcm_avs_driver);
1020 if (!ret)
1021 brcm_avs_cpufreq_debug_init(pdev);
1022 703
1023 return ret; 704 return cpufreq_register_driver(&brcm_avs_driver);
1024} 705}
1025 706
1026static int brcm_avs_cpufreq_remove(struct platform_device *pdev) 707static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
1032 if (ret) 713 if (ret)
1033 return ret; 714 return ret;
1034 715
1035 brcm_avs_cpufreq_debug_exit(pdev);
1036
1037 priv = platform_get_drvdata(pdev); 716 priv = platform_get_drvdata(pdev);
1038 iounmap(priv->base); 717 iounmap(priv->base);
1039 iounmap(priv->avs_intr_base); 718 iounmap(priv->avs_intr_base);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 0591874856d3..54edaec1e608 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
679 679
680 if (!spin_trylock(&gpstates->gpstate_lock)) 680 if (!spin_trylock(&gpstates->gpstate_lock))
681 return; 681 return;
682 /*
683 * If the timer has migrated to the different cpu then bring
684 * it back to one of the policy->cpus
685 */
686 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
687 gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
688 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
689 spin_unlock(&gpstates->gpstate_lock);
690 return;
691 }
682 692
683 /* 693 /*
684 * If PMCR was last updated was using fast_swtich then 694 * If PMCR was last updated was using fast_swtich then
@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
718 if (gpstate_idx != gpstates->last_lpstate_idx) 728 if (gpstate_idx != gpstates->last_lpstate_idx)
719 queue_gpstate_timer(gpstates); 729 queue_gpstate_timer(gpstates);
720 730
731 set_pstate(&freq_data);
721 spin_unlock(&gpstates->gpstate_lock); 732 spin_unlock(&gpstates->gpstate_lock);
722
723 /* Timer may get migrated to a different cpu on cpu hot unplug */
724 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
725} 733}
726 734
727/* 735/*
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index be8606457f27..aff2c1594220 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -19,6 +19,7 @@
19#include <linux/dax.h> 19#include <linux/dax.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/mman.h>
22#include "dax-private.h" 23#include "dax-private.h"
23#include "dax.h" 24#include "dax.h"
24 25
@@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
540 .release = dax_release, 541 .release = dax_release,
541 .get_unmapped_area = dax_get_unmapped_area, 542 .get_unmapped_area = dax_get_unmapped_area,
542 .mmap = dax_mmap, 543 .mmap = dax_mmap,
544 .mmap_supported_flags = MAP_SYNC,
543}; 545};
544 546
545static void dev_dax_release(struct device *dev) 547static void dev_dax_release(struct device *dev)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e6f17825db79..2b90606452a2 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
284 struct clock_info *ci = handle->clk_priv; 284 struct clock_info *ci = handle->clk_priv;
285 struct scmi_clock_info *clk = ci->clk + clk_id; 285 struct scmi_clock_info *clk = ci->clk + clk_id;
286 286
287 if (!clk->name || !clk->name[0]) 287 if (!clk->name[0])
288 return NULL; 288 return NULL;
289 289
290 return clk; 290 return clk;
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 14f14efdf0d5..06d212a3d49d 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
249 249
250 conf->data = of_id->data; 250 conf->data = of_id->data;
251 conf->spi = spi; 251 conf->spi = spi;
252 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); 252 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
253 if (IS_ERR(conf->config)) { 253 if (IS_ERR(conf->config)) {
254 dev_err(&spi->dev, "Failed to get config gpio: %ld\n", 254 dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
255 PTR_ERR(conf->config)); 255 PTR_ERR(conf->config));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b0e591eaa71a..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
1459static const u32 vgpr_init_regs[] = 1459static const u32 vgpr_init_regs[] =
1460{ 1460{
1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1462 mmCOMPUTE_RESOURCE_LIMITS, 0, 1462 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1463 mmCOMPUTE_NUM_THREAD_X, 256*4, 1463 mmCOMPUTE_NUM_THREAD_X, 256*4,
1464 mmCOMPUTE_NUM_THREAD_Y, 1, 1464 mmCOMPUTE_NUM_THREAD_Y, 1,
1465 mmCOMPUTE_NUM_THREAD_Z, 1, 1465 mmCOMPUTE_NUM_THREAD_Z, 1,
1466 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1466 mmCOMPUTE_PGM_RSRC2, 20, 1467 mmCOMPUTE_PGM_RSRC2, 20,
1467 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1468 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
1479static const u32 sgpr1_init_regs[] = 1480static const u32 sgpr1_init_regs[] =
1480{ 1481{
1481 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1482 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1482 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1483 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1483 mmCOMPUTE_NUM_THREAD_X, 256*5, 1484 mmCOMPUTE_NUM_THREAD_X, 256*5,
1484 mmCOMPUTE_NUM_THREAD_Y, 1, 1485 mmCOMPUTE_NUM_THREAD_Y, 1,
1485 mmCOMPUTE_NUM_THREAD_Z, 1, 1486 mmCOMPUTE_NUM_THREAD_Z, 1,
1487 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1486 mmCOMPUTE_PGM_RSRC2, 20, 1488 mmCOMPUTE_PGM_RSRC2, 20,
1487 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1489 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1488 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1490 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
1503 mmCOMPUTE_NUM_THREAD_X, 256*5, 1505 mmCOMPUTE_NUM_THREAD_X, 256*5,
1504 mmCOMPUTE_NUM_THREAD_Y, 1, 1506 mmCOMPUTE_NUM_THREAD_Y, 1,
1505 mmCOMPUTE_NUM_THREAD_Z, 1, 1507 mmCOMPUTE_NUM_THREAD_Z, 1,
1508 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1506 mmCOMPUTE_PGM_RSRC2, 20, 1509 mmCOMPUTE_PGM_RSRC2, 20,
1507 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1510 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1508 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1511 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ed2f06c9f346..3858820a0055 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,5 +6,6 @@ config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices" 6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && X86_64
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2
9 select MMU_NOTIFIER
9 help 10 help
10 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..59808a39ecf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
749 struct timespec64 time; 749 struct timespec64 time;
750 750
751 dev = kfd_device_by_id(args->gpu_id); 751 dev = kfd_device_by_id(args->gpu_id);
752 if (dev == NULL) 752 if (dev)
753 return -EINVAL; 753 /* Reading GPU clock counter from KGD */
754 754 args->gpu_clock_counter =
755 /* Reading GPU clock counter from KGD */ 755 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
756 args->gpu_clock_counter = 756 else
757 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 757 /* Node without GPU resource */
758 args->gpu_clock_counter = 0;
758 759
759 /* No access to rdtsc. Using raw monotonic time */ 760 /* No access to rdtsc. Using raw monotonic time */
760 getrawmonotonic64(&time); 761 getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
1147 return ret; 1148 return ret;
1148} 1149}
1149 1150
1150bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1151static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1151{ 1152{
1152 struct kfd_local_mem_info mem_info; 1153 struct kfd_local_mem_info mem_info;
1153 1154
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1421 1422
1422 pdd = kfd_get_process_device_data(dev, p); 1423 pdd = kfd_get_process_device_data(dev, p);
1423 if (!pdd) { 1424 if (!pdd) {
1424 err = PTR_ERR(pdd); 1425 err = -EINVAL;
1425 goto bind_process_to_device_failed; 1426 goto bind_process_to_device_failed;
1426 } 1427 }
1427 1428
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e2f379ce217..1dd1142246c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
4557 struct amdgpu_dm_connector *aconnector = NULL; 4557 struct amdgpu_dm_connector *aconnector = NULL;
4558 struct drm_connector_state *new_con_state = NULL; 4558 struct drm_connector_state *new_con_state = NULL;
4559 struct dm_connector_state *dm_conn_state = NULL; 4559 struct dm_connector_state *dm_conn_state = NULL;
4560 struct drm_plane_state *new_plane_state = NULL;
4560 4561
4561 new_stream = NULL; 4562 new_stream = NULL;
4562 4563
@@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
4564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4565 acrtc = to_amdgpu_crtc(crtc); 4566 acrtc = to_amdgpu_crtc(crtc);
4566 4567
4568 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4569
4570 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4571 ret = -EINVAL;
4572 goto fail;
4573 }
4574
4567 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4575 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4568 4576
4569 /* TODO This hack should go away */ 4577 /* TODO This hack should go away */
@@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
4760 if (!dm_old_crtc_state->stream) 4768 if (!dm_old_crtc_state->stream)
4761 continue; 4769 continue;
4762 4770
4763 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4771 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4764 plane->base.id, old_plane_crtc->base.id); 4772 plane->base.id, old_plane_crtc->base.id);
4765 4773
4766 if (!dc_remove_plane_from_context( 4774 if (!dc_remove_plane_from_context(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index f6cb502c303f..25f064c01038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
138 lut = (struct drm_color_lut *)blob->data; 138 lut = (struct drm_color_lut *)blob->data;
139 lut_size = blob->length / sizeof(struct drm_color_lut); 139 lut_size = blob->length / sizeof(struct drm_color_lut);
140 140
141 if (__is_lut_linear(lut, lut_size)) {
142 /* Set to bypass if lut is set to linear */
143 stream->out_transfer_func->type = TF_TYPE_BYPASS;
144 stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
145 return 0;
146 }
147
148 gamma = dc_create_gamma(); 141 gamma = dc_create_gamma();
149 if (!gamma) 142 if (!gamma)
150 return -ENOMEM; 143 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
329{ 329{
330 int src; 330 int src;
331 struct irq_list_head *lh; 331 struct irq_list_head *lh;
332 unsigned long irq_table_flags;
332 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 333 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
333
334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
335 335 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
336 /* The handler was removed from the table, 336 /* The handler was removed from the table,
337 * it means it is safe to flush all the 'work' 337 * it means it is safe to flush all the 'work'
338 * (because no code can schedule a new one). */ 338 * (because no code can schedule a new one). */
339 lh = &adev->dm.irq_handler_list_low_tab[src]; 339 lh = &adev->dm.irq_handler_list_low_tab[src];
340 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
340 flush_work(&lh->work); 341 flush_work(&lh->work);
341 } 342 }
342} 343}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
163 163
164 if (amdgpu_dm_connector->edid) {
165 kfree(amdgpu_dm_connector->edid);
166 amdgpu_dm_connector->edid = NULL;
167 }
168
164 drm_encoder_cleanup(&amdgpu_encoder->base); 169 drm_encoder_cleanup(&amdgpu_encoder->base);
165 kfree(amdgpu_encoder); 170 kfree(amdgpu_encoder);
166 drm_connector_cleanup(connector); 171 drm_connector_cleanup(connector);
@@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
181void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 186void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
182{ 187{
183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
184 struct edid *edid;
185 struct dc_sink *dc_sink; 189 struct dc_sink *dc_sink;
186 struct dc_sink_init_data init_params = { 190 struct dc_sink_init_data init_params = {
187 .link = aconnector->dc_link, 191 .link = aconnector->dc_link,
188 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 192 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
189 193
194 /* FIXME none of this is safe. we shouldn't touch aconnector here in
195 * atomic_check
196 */
197
190 /* 198 /*
191 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists 199 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
192 */ 200 */
193 if (!aconnector->port || !aconnector->port->aux.ddc.algo) 201 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
194 return; 202 return;
195 203
196 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 204 ASSERT(aconnector->edid);
197
198 if (!edid) {
199 drm_mode_connector_update_edid_property(
200 &aconnector->base,
201 NULL);
202 return;
203 }
204
205 aconnector->edid = edid;
206 205
207 dc_sink = dc_link_add_remote_sink( 206 dc_sink = dc_link_add_remote_sink(
208 aconnector->dc_link, 207 aconnector->dc_link,
@@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
215 214
216 amdgpu_dm_add_sink_to_freesync_module( 215 amdgpu_dm_add_sink_to_freesync_module(
217 connector, aconnector->edid); 216 connector, aconnector->edid);
218
219 drm_mode_connector_update_edid_property(
220 &aconnector->base, aconnector->edid);
221} 217}
222 218
223static int dm_dp_mst_get_modes(struct drm_connector *connector) 219static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
230 226
231 if (!aconnector->edid) { 227 if (!aconnector->edid) {
232 struct edid *edid; 228 struct edid *edid;
233 struct dc_sink *dc_sink;
234 struct dc_sink_init_data init_params = {
235 .link = aconnector->dc_link,
236 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
237 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 229 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
238 230
239 if (!edid) { 231 if (!edid) {
@@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
244 } 236 }
245 237
246 aconnector->edid = edid; 238 aconnector->edid = edid;
239 }
247 240
241 if (!aconnector->dc_sink) {
242 struct dc_sink *dc_sink;
243 struct dc_sink_init_data init_params = {
244 .link = aconnector->dc_link,
245 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
248 dc_sink = dc_link_add_remote_sink( 246 dc_sink = dc_link_add_remote_sink(
249 aconnector->dc_link, 247 aconnector->dc_link,
250 (uint8_t *)edid, 248 (uint8_t *)aconnector->edid,
251 (edid->extensions + 1) * EDID_LENGTH, 249 (aconnector->edid->extensions + 1) * EDID_LENGTH,
252 &init_params); 250 &init_params);
253 251
254 dc_sink->priv = aconnector; 252 dc_sink->priv = aconnector;
@@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
256 254
257 if (aconnector->dc_sink) 255 if (aconnector->dc_sink)
258 amdgpu_dm_add_sink_to_freesync_module( 256 amdgpu_dm_add_sink_to_freesync_module(
259 connector, edid); 257 connector, aconnector->edid);
260
261 drm_mode_connector_update_edid_property(
262 &aconnector->base, edid);
263 } 258 }
264 259
260 drm_mode_connector_update_edid_property(
261 &aconnector->base, aconnector->edid);
262
265 ret = drm_add_edid_modes(connector, aconnector->edid); 263 ret = drm_add_edid_modes(connector, aconnector->edid);
266 264
267 return ret; 265 return ret;
@@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
424 dc_sink_release(aconnector->dc_sink); 422 dc_sink_release(aconnector->dc_sink);
425 aconnector->dc_sink = NULL; 423 aconnector->dc_sink = NULL;
426 } 424 }
427 if (aconnector->edid) {
428 kfree(aconnector->edid);
429 aconnector->edid = NULL;
430 }
431
432 drm_mode_connector_update_edid_property(
433 &aconnector->base,
434 NULL);
435 425
436 aconnector->mst_connected = false; 426 aconnector->mst_connected = false;
437} 427}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index add90675fd2a..26fbeafc3c96 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4743 4743
4744 for (i=0; i < dep_table->count; i++) { 4744 for (i=0; i < dep_table->count; i++) {
4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4746 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4746 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
4747 break; 4747 return;
4748 } 4748 }
4749 } 4749 }
4750 if (i == dep_table->count) 4750 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4752 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4753 }
4752 4754
4753 dep_table = table_info->vdd_dep_on_sclk; 4755 dep_table = table_info->vdd_dep_on_sclk;
4754 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 4756 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
4755 for (i=0; i < dep_table->count; i++) { 4757 for (i=0; i < dep_table->count; i++) {
4756 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4758 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4757 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4759 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
4758 break; 4760 return;
4759 } 4761 }
4760 } 4762 }
4761 if (i == dep_table->count) 4763 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4762 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4764 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4765 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4766 }
4763} 4767}
4764 4768
4765static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 4769static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index fb696e3d06cf..2f8a3b983cce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,8 +412,10 @@ typedef struct {
412 QuadraticInt_t ReservedEquation2; 412 QuadraticInt_t ReservedEquation2;
413 QuadraticInt_t ReservedEquation3; 413 QuadraticInt_t ReservedEquation3;
414 414
415 uint16_t MinVoltageUlvGfx;
416 uint16_t MinVoltageUlvSoc;
415 417
416 uint32_t Reserved[15]; 418 uint32_t Reserved[14];
417 419
418 420
419 421
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 02a50929af67..e7f4fe2848a5 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
350{ 350{
351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; 351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
352 ssize_t ret; 352 ssize_t ret;
353 int retry;
353 354
354 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 355 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
355 return 0; 356 return 0;
356 357
357 ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 358 /*
358 &tmds_oen, sizeof(tmds_oen)); 359 * LSPCON adapters in low-power state may ignore the first write, so
359 if (ret) { 360 * read back and verify the written value a few times.
360 DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", 361 */
361 enable ? "enable" : "disable"); 362 for (retry = 0; retry < 3; retry++) {
362 return ret; 363 uint8_t tmp;
364
365 ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
366 &tmds_oen, sizeof(tmds_oen));
367 if (ret) {
368 DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
369 enable ? "enable" : "disable",
370 retry + 1);
371 return ret;
372 }
373
374 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
375 &tmp, sizeof(tmp));
376 if (ret) {
377 DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
378 enable ? "enabling" : "disabling",
379 retry + 1);
380 return ret;
381 }
382
383 if (tmp == tmds_oen)
384 return 0;
363 } 385 }
364 386
365 return 0; 387 DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
388 enable ? "enabling" : "disabling");
389
390 return -EIO;
366} 391}
367EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); 392EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
368 393
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..39f1db4acda4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
4451 info->max_tmds_clock = 0; 4451 info->max_tmds_clock = 0;
4452 info->dvi_dual = false; 4452 info->dvi_dual = false;
4453 info->has_hdmi_infoframe = false; 4453 info->has_hdmi_infoframe = false;
4454 memset(&info->hdmi, 0, sizeof(info->hdmi));
4454 4455
4455 info->non_desktop = 0; 4456 info->non_desktop = 0;
4456} 4457}
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
4462 4463
4463 u32 quirks = edid_get_quirks(edid); 4464 u32 quirks = edid_get_quirks(edid);
4464 4465
4466 drm_reset_display_info(connector);
4467
4465 info->width_mm = edid->width_cm * 10; 4468 info->width_mm = edid->width_cm * 10;
4466 info->height_mm = edid->height_cm * 10; 4469 info->height_mm = edid->height_cm * 10;
4467 4470
4468 /* driver figures it out in this case */
4469 info->bpc = 0;
4470 info->color_formats = 0;
4471 info->cea_rev = 0;
4472 info->max_tmds_clock = 0;
4473 info->dvi_dual = false;
4474 info->has_hdmi_infoframe = false;
4475
4476 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4471 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4477 4472
4478 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); 4473 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..f0e79178bde6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -18,6 +18,7 @@
18#include <drm/drm_fb_helper.h> 18#include <drm/drm_fb_helper.h>
19#include <drm/drm_atomic.h> 19#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h>
21#include <uapi/drm/exynos_drm.h> 22#include <uapi/drm/exynos_drm.h>
22 23
23#include "exynos_drm_drv.h" 24#include "exynos_drm_drv.h"
@@ -26,20 +27,6 @@
26#include "exynos_drm_iommu.h" 27#include "exynos_drm_iommu.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
28 29
29#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
30
31/*
32 * exynos specific framebuffer structure.
33 *
34 * @fb: drm framebuffer obejct.
35 * @exynos_gem: array of exynos specific gem object containing a gem object.
36 */
37struct exynos_drm_fb {
38 struct drm_framebuffer fb;
39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40 dma_addr_t dma_addr[MAX_FB_BUFFER];
41};
42
43static int check_fb_gem_memory_type(struct drm_device *drm_dev, 30static int check_fb_gem_memory_type(struct drm_device *drm_dev,
44 struct exynos_drm_gem *exynos_gem) 31 struct exynos_drm_gem *exynos_gem)
45{ 32{
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
66 return 0; 53 return 0;
67} 54}
68 55
69static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
70{
71 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
72 unsigned int i;
73
74 drm_framebuffer_cleanup(fb);
75
76 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
77 struct drm_gem_object *obj;
78
79 if (exynos_fb->exynos_gem[i] == NULL)
80 continue;
81
82 obj = &exynos_fb->exynos_gem[i]->base;
83 drm_gem_object_unreference_unlocked(obj);
84 }
85
86 kfree(exynos_fb);
87 exynos_fb = NULL;
88}
89
90static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
91 struct drm_file *file_priv,
92 unsigned int *handle)
93{
94 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
95
96 return drm_gem_handle_create(file_priv,
97 &exynos_fb->exynos_gem[0]->base, handle);
98}
99
100static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 56static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
101 .destroy = exynos_drm_fb_destroy, 57 .destroy = drm_gem_fb_destroy,
102 .create_handle = exynos_drm_fb_create_handle, 58 .create_handle = drm_gem_fb_create_handle,
103}; 59};
104 60
105struct drm_framebuffer * 61struct drm_framebuffer *
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
108 struct exynos_drm_gem **exynos_gem, 64 struct exynos_drm_gem **exynos_gem,
109 int count) 65 int count)
110{ 66{
111 struct exynos_drm_fb *exynos_fb; 67 struct drm_framebuffer *fb;
112 int i; 68 int i;
113 int ret; 69 int ret;
114 70
115 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 71 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
116 if (!exynos_fb) 72 if (!fb)
117 return ERR_PTR(-ENOMEM); 73 return ERR_PTR(-ENOMEM);
118 74
119 for (i = 0; i < count; i++) { 75 for (i = 0; i < count; i++) {
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
121 if (ret < 0) 77 if (ret < 0)
122 goto err; 78 goto err;
123 79
124 exynos_fb->exynos_gem[i] = exynos_gem[i]; 80 fb->obj[i] = &exynos_gem[i]->base;
125 exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
126 + mode_cmd->offsets[i];
127 } 81 }
128 82
129 drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); 83 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
130 84
131 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 85 ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
132 if (ret < 0) { 86 if (ret < 0) {
133 DRM_ERROR("failed to initialize framebuffer\n"); 87 DRM_ERROR("failed to initialize framebuffer\n");
134 goto err; 88 goto err;
135 } 89 }
136 90
137 return &exynos_fb->fb; 91 return fb;
138 92
139err: 93err:
140 kfree(exynos_fb); 94 kfree(fb);
141 return ERR_PTR(ret); 95 return ERR_PTR(ret);
142} 96}
143 97
@@ -191,12 +145,13 @@ err:
191 145
192dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) 146dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
193{ 147{
194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 148 struct exynos_drm_gem *exynos_gem;
195 149
196 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) 150 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
197 return 0; 151 return 0;
198 152
199 return exynos_fb->dma_addr[index]; 153 exynos_gem = to_exynos_gem(fb->obj[index]);
154 return exynos_gem->dma_addr + fb->offsets[index];
200} 155}
201 156
202static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { 157static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index db6b94dda5df..d85939bd7b47 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1080{ 1080{
1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, 1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1082 s->workload->pending_events); 1082 s->workload->pending_events);
1083 patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1083 return 0; 1084 return 0;
1084} 1085}
1085 1086
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index dd96ffc878ac..6d8180e8d1e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
169static void emulate_monitor_status_change(struct intel_vgpu *vgpu) 169static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
170{ 170{
171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
172 int pipe;
173
172 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | 174 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
173 SDE_PORTC_HOTPLUG_CPT | 175 SDE_PORTC_HOTPLUG_CPT |
174 SDE_PORTD_HOTPLUG_CPT); 176 SDE_PORTD_HOTPLUG_CPT);
@@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
267 if (IS_BROADWELL(dev_priv)) 269 if (IS_BROADWELL(dev_priv))
268 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; 270 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
269 271
272 /* Disable Primary/Sprite/Cursor plane */
273 for_each_pipe(dev_priv, pipe) {
274 vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
275 vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
276 vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
277 vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
278 }
279
270 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 280 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
271} 281}
272 282
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index b555eb26f9ce..6f4f8e941fc2 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
323 struct intel_vgpu_fb_info *fb_info) 323 struct intel_vgpu_fb_info *fb_info)
324{ 324{
325 gvt_dmabuf->drm_format = fb_info->drm_format; 325 gvt_dmabuf->drm_format = fb_info->drm_format;
326 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
326 gvt_dmabuf->width = fb_info->width; 327 gvt_dmabuf->width = fb_info->width;
327 gvt_dmabuf->height = fb_info->height; 328 gvt_dmabuf->height = fb_info->height;
328 gvt_dmabuf->stride = fb_info->stride; 329 gvt_dmabuf->stride = fb_info->stride;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 6b50fe78dc1b..1c120683e958 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
245 plane->hw_format = fmt; 245 plane->hw_format = fmt;
246 246
247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; 247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
248 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 248 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
249 gvt_vgpu_err("invalid gma address: %lx\n",
250 (unsigned long)plane->base);
251 return -EINVAL; 249 return -EINVAL;
252 }
253 250
254 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
255 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 252 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
256 gvt_vgpu_err("invalid gma address: %lx\n", 253 gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
257 (unsigned long)plane->base); 254 plane->base);
258 return -EINVAL; 255 return -EINVAL;
259 } 256 }
260 257
@@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
371 alpha_plane, alpha_force); 368 alpha_plane, alpha_force);
372 369
373 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; 370 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
374 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 371 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
375 gvt_vgpu_err("invalid gma address: %lx\n",
376 (unsigned long)plane->base);
377 return -EINVAL; 372 return -EINVAL;
378 }
379 373
380 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 374 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
381 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 375 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
382 gvt_vgpu_err("invalid gma address: %lx\n", 376 gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
383 (unsigned long)plane->base); 377 plane->base);
384 return -EINVAL; 378 return -EINVAL;
385 } 379 }
386 380
@@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
476 plane->drm_format = drm_format; 470 plane->drm_format = drm_format;
477 471
478 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; 472 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
479 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 473 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
480 gvt_vgpu_err("invalid gma address: %lx\n",
481 (unsigned long)plane->base);
482 return -EINVAL; 474 return -EINVAL;
483 }
484 475
485 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 476 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
486 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 477 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
487 gvt_vgpu_err("invalid gma address: %lx\n", 478 gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
488 (unsigned long)plane->base); 479 plane->base);
489 return -EINVAL; 480 return -EINVAL;
490 } 481 }
491 482
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d29281231507..78e55aafc8bc 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
530 false, 0, mm->vgpu); 530 false, 0, mm->vgpu);
531} 531}
532 532
533static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{
536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
537
538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
539
540 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
541}
542
533static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 543static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index) 544 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{ 545{
@@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1818 return ret; 1828 return ret;
1819} 1829}
1820 1830
1831static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
1832 struct intel_gvt_gtt_entry *entry)
1833{
1834 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1835 unsigned long pfn;
1836
1837 pfn = pte_ops->get_pfn(entry);
1838 if (pfn != vgpu->gvt->gtt.scratch_mfn)
1839 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
1840 pfn << PAGE_SHIFT);
1841}
1842
1821static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1843static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1822 void *p_data, unsigned int bytes) 1844 void *p_data, unsigned int bytes)
1823{ 1845{
@@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1844 1866
1845 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1867 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1846 bytes); 1868 bytes);
1847 m = e;
1848 1869
1849 if (ops->test_present(&e)) { 1870 if (ops->test_present(&e)) {
1850 gfn = ops->get_pfn(&e); 1871 gfn = ops->get_pfn(&e);
1872 m = e;
1851 1873
1852 /* one PTE update may be issued in multiple writes and the 1874 /* one PTE update may be issued in multiple writes and the
1853 * first write may not construct a valid gfn 1875 * first write may not construct a valid gfn
@@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1868 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1890 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1869 } else 1891 } else
1870 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1892 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
1871 } else 1893 } else {
1894 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
1895 ggtt_invalidate_pte(vgpu, &m);
1872 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1896 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1897 ops->clear_present(&m);
1898 }
1873 1899
1874out: 1900out:
1875 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 1901 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
@@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2030 return PTR_ERR(gtt->ggtt_mm); 2056 return PTR_ERR(gtt->ggtt_mm);
2031 } 2057 }
2032 2058
2033 intel_vgpu_reset_ggtt(vgpu); 2059 intel_vgpu_reset_ggtt(vgpu, false);
2034 2060
2035 return create_scratch_page_tree(vgpu); 2061 return create_scratch_page_tree(vgpu);
2036} 2062}
@@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2315/** 2341/**
2316 * intel_vgpu_reset_ggtt - reset the GGTT entry 2342 * intel_vgpu_reset_ggtt - reset the GGTT entry
2317 * @vgpu: a vGPU 2343 * @vgpu: a vGPU
2344 * @invalidate_old: invalidate old entries
2318 * 2345 *
2319 * This function is called at the vGPU create stage 2346 * This function is called at the vGPU create stage
2320 * to reset all the GGTT entries. 2347 * to reset all the GGTT entries.
2321 * 2348 *
2322 */ 2349 */
2323void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2350void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2324{ 2351{
2325 struct intel_gvt *gvt = vgpu->gvt; 2352 struct intel_gvt *gvt = vgpu->gvt;
2326 struct drm_i915_private *dev_priv = gvt->dev_priv; 2353 struct drm_i915_private *dev_priv = gvt->dev_priv;
2327 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2354 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2328 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2355 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2356 struct intel_gvt_gtt_entry old_entry;
2329 u32 index; 2357 u32 index;
2330 u32 num_entries; 2358 u32 num_entries;
2331 2359
@@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2334 2362
2335 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2363 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2336 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2364 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2337 while (num_entries--) 2365 while (num_entries--) {
2366 if (invalidate_old) {
2367 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2368 ggtt_invalidate_pte(vgpu, &old_entry);
2369 }
2338 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2370 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2371 }
2339 2372
2340 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2373 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2341 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2374 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2342 while (num_entries--) 2375 while (num_entries--) {
2376 if (invalidate_old) {
2377 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2378 ggtt_invalidate_pte(vgpu, &old_entry);
2379 }
2343 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2380 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2381 }
2344 2382
2345 ggtt_invalidate(dev_priv); 2383 ggtt_invalidate(dev_priv);
2346} 2384}
@@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2360 * removing the shadow pages. 2398 * removing the shadow pages.
2361 */ 2399 */
2362 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2400 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2363 intel_vgpu_reset_ggtt(vgpu); 2401 intel_vgpu_reset_ggtt(vgpu, true);
2364} 2402}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a8b369cd352b..3792f2b7f4ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
193 193
194extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 194extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
195extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 195extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
196void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 196void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
197void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 197void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
198 198
199extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 199extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8c5d5d005854..a33c1c3e4a21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1150 switch (notification) { 1150 switch (notification) {
1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: 1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1153 /* fall through */
1153 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: 1154 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1154 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); 1155 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1155 return PTR_ERR_OR_ZERO(mm); 1156 return PTR_ERR_OR_ZERO(mm);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c16a492449d7..1466d8769ec9 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1301 1301
1302 } 1302 }
1303 1303
1304 return 0; 1304 return -ENOTTY;
1305} 1305}
1306 1306
1307static ssize_t 1307static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 84ca369f15a5..3b4daafebdcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1105 1105
1106 ret = i915_ggtt_probe_hw(dev_priv); 1106 ret = i915_ggtt_probe_hw(dev_priv);
1107 if (ret) 1107 if (ret)
1108 return ret; 1108 goto err_perf;
1109 1109
1110 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1110 /*
1111 * otherwise the vga fbdev driver falls over. */ 1111 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1112 * otherwise the vga fbdev driver falls over.
1113 */
1112 ret = i915_kick_out_firmware_fb(dev_priv); 1114 ret = i915_kick_out_firmware_fb(dev_priv);
1113 if (ret) { 1115 if (ret) {
1114 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1116 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1115 goto out_ggtt; 1117 goto err_ggtt;
1116 } 1118 }
1117 1119
1118 ret = i915_kick_out_vgacon(dev_priv); 1120 ret = i915_kick_out_vgacon(dev_priv);
1119 if (ret) { 1121 if (ret) {
1120 DRM_ERROR("failed to remove conflicting VGA console\n"); 1122 DRM_ERROR("failed to remove conflicting VGA console\n");
1121 goto out_ggtt; 1123 goto err_ggtt;
1122 } 1124 }
1123 1125
1124 ret = i915_ggtt_init_hw(dev_priv); 1126 ret = i915_ggtt_init_hw(dev_priv);
1125 if (ret) 1127 if (ret)
1126 return ret; 1128 goto err_ggtt;
1127 1129
1128 ret = i915_ggtt_enable_hw(dev_priv); 1130 ret = i915_ggtt_enable_hw(dev_priv);
1129 if (ret) { 1131 if (ret) {
1130 DRM_ERROR("failed to enable GGTT\n"); 1132 DRM_ERROR("failed to enable GGTT\n");
1131 goto out_ggtt; 1133 goto err_ggtt;
1132 } 1134 }
1133 1135
1134 pci_set_master(pdev); 1136 pci_set_master(pdev);
@@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1139 if (ret) { 1141 if (ret) {
1140 DRM_ERROR("failed to set DMA mask\n"); 1142 DRM_ERROR("failed to set DMA mask\n");
1141 1143
1142 goto out_ggtt; 1144 goto err_ggtt;
1143 } 1145 }
1144 } 1146 }
1145 1147
@@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1157 if (ret) { 1159 if (ret) {
1158 DRM_ERROR("failed to set DMA mask\n"); 1160 DRM_ERROR("failed to set DMA mask\n");
1159 1161
1160 goto out_ggtt; 1162 goto err_ggtt;
1161 } 1163 }
1162 } 1164 }
1163 1165
@@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1190 1192
1191 ret = intel_gvt_init(dev_priv); 1193 ret = intel_gvt_init(dev_priv);
1192 if (ret) 1194 if (ret)
1193 goto out_ggtt; 1195 goto err_ggtt;
1194 1196
1195 return 0; 1197 return 0;
1196 1198
1197out_ggtt: 1199err_ggtt:
1198 i915_ggtt_cleanup_hw(dev_priv); 1200 i915_ggtt_cleanup_hw(dev_priv);
1199 1201err_perf:
1202 i915_perf_fini(dev_priv);
1200 return ret; 1203 return ret;
1201} 1204}
1202 1205
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8c170db8495d..0414228cd2b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
728 728
729 err = radix_tree_insert(handles_vma, handle, vma); 729 err = radix_tree_insert(handles_vma, handle, vma);
730 if (unlikely(err)) { 730 if (unlikely(err)) {
731 kfree(lut); 731 kmem_cache_free(eb->i915->luts, lut);
732 goto err_obj; 732 goto err_obj;
733 } 733 }
734 734
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d8feb9053e0c..f0519e31543a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
473 spin_lock_irqsave(&i915->pmu.lock, flags); 473 spin_lock_irqsave(&i915->pmu.lock, flags);
474 spin_lock(&kdev->power.lock); 474 spin_lock(&kdev->power.lock);
475 475
476 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 476 /*
477 i915->pmu.suspended_jiffies_last = 477 * After the above branch intel_runtime_pm_get_if_in_use failed
478 kdev->power.suspended_jiffies; 478 * to get the runtime PM reference we cannot assume we are in
479 * runtime suspend since we can either: a) race with coming out
480 * of it before we took the power.lock, or b) there are other
481 * states than suspended which can bring us here.
482 *
483 * We need to double-check that we are indeed currently runtime
484 * suspended and if not we cannot do better than report the last
485 * known RC6 value.
486 */
487 if (kdev->power.runtime_status == RPM_SUSPENDED) {
488 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
489 i915->pmu.suspended_jiffies_last =
490 kdev->power.suspended_jiffies;
479 491
480 val = kdev->power.suspended_jiffies - 492 val = kdev->power.suspended_jiffies -
481 i915->pmu.suspended_jiffies_last; 493 i915->pmu.suspended_jiffies_last;
482 val += jiffies - kdev->power.accounting_timestamp; 494 val += jiffies - kdev->power.accounting_timestamp;
483 495
484 spin_unlock(&kdev->power.lock); 496 val = jiffies_to_nsecs(val);
497 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
485 498
486 val = jiffies_to_nsecs(val); 499 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
487 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 500 } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
488 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 501 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
502 } else {
503 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
504 }
489 505
506 spin_unlock(&kdev->power.lock);
490 spin_unlock_irqrestore(&i915->pmu.lock, flags); 507 spin_unlock_irqrestore(&i915->pmu.lock, flags);
491 } 508 }
492 509
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 709d6ca68074..3ea566f99450 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
730 u32 tmp; 730 u32 tmp;
731 731
732 if (!IS_GEN9_BC(dev_priv)) 732 if (!IS_GEN9(dev_priv))
733 return; 733 return;
734 734
735 i915_audio_component_get_power(kdev); 735 i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c5c7530ba157..447b721c3be9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1256 return; 1256 return;
1257 1257
1258 aux_channel = child->aux_channel; 1258 aux_channel = child->aux_channel;
1259 ddc_pin = child->ddc_pin;
1260 1259
1261 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; 1260 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
1262 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; 1261 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1303 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1302 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
1304 1303
1305 if (is_dvi) { 1304 if (is_dvi) {
1306 info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); 1305 ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
1307 1306 if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
1308 sanitize_ddc_pin(dev_priv, port); 1307 info->alternate_ddc_pin = ddc_pin;
1308 sanitize_ddc_pin(dev_priv, port);
1309 } else {
1310 DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
1311 "sticking to defaults\n",
1312 port_name(port), ddc_pin);
1313 }
1309 } 1314 }
1310 1315
1311 if (is_dp) { 1316 if (is_dp) {
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index fc8b2c6e3508..32d24c69da3c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2140 } 2140 }
2141 } 2141 }
2142 2142
2143 /* According to BSpec, "The CD clock frequency must be at least twice 2143 /*
2144 * According to BSpec, "The CD clock frequency must be at least twice
2144 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2145 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
2146 *
2147 * FIXME: Check the actual, not default, BCLK being used.
2148 *
2149 * FIXME: This does not depend on ->has_audio because the higher CDCLK
2150 * is required for audio probe, also when there are no audio capable
2151 * displays connected at probe time. This leads to unnecessarily high
2152 * CDCLK when audio is not required.
2153 *
2154 * FIXME: This limit is only applied when there are displays connected
2155 * at probe time. If we probe without displays, we'll still end up using
2156 * the platform minimum CDCLK, failing audio probe.
2145 */ 2157 */
2146 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2158 if (INTEL_GEN(dev_priv) >= 9)
2147 min_cdclk = max(2 * 96000, min_cdclk); 2159 min_cdclk = max(2 * 96000, min_cdclk);
2148 2160
2149 /* 2161 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d4368589b355..a80fbad9be0f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
49 * check the condition before the timeout. 49 * check the condition before the timeout.
50 */ 50 */
51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
52 unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ 52 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
54 int ret__; \ 54 int ret__; \
55 might_sleep(); \ 55 might_sleep(); \
56 for (;;) { \ 56 for (;;) { \
57 bool expired__ = time_after(jiffies, timeout__); \ 57 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
58 OP; \ 58 OP; \
59 if (COND) { \ 59 if (COND) { \
60 ret__ = 0; \ 60 ret__ = 0; \
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..6467a5cc2ca3 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
806 return; 806 return;
807 807
808 intel_fbdev_sync(ifbdev); 808 intel_fbdev_sync(ifbdev);
809 if (ifbdev->vma) 809 if (ifbdev->vma || ifbdev->helper.deferred_setup)
810 drm_fb_helper_hotplug_event(&ifbdev->helper); 810 drm_fb_helper_hotplug_event(&ifbdev->helper);
811} 811}
812 812
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 697af5add78b..e3a5f673ff67 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
577 * know the next preemption status we see corresponds 577 * know the next preemption status we see corresponds
578 * to this ELSP update. 578 * to this ELSP update.
579 */ 579 */
580 GEM_BUG_ON(!execlists_is_active(execlists,
581 EXECLISTS_ACTIVE_USER));
580 GEM_BUG_ON(!port_count(&port[0])); 582 GEM_BUG_ON(!port_count(&port[0]));
581 if (port_count(&port[0]) > 1) 583 if (port_count(&port[0]) > 1)
582 goto unlock; 584 goto unlock;
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
738 memset(port, 0, sizeof(*port)); 740 memset(port, 0, sizeof(*port));
739 port++; 741 port++;
740 } 742 }
743
744 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
741} 745}
742 746
743static void execlists_cancel_requests(struct intel_engine_cs *engine) 747static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data)
1001 1005
1002 if (fw) 1006 if (fw)
1003 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); 1007 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
1008
1009 /* If the engine is now idle, so should be the flag; and vice versa. */
1010 GEM_BUG_ON(execlists_is_active(&engine->execlists,
1011 EXECLISTS_ACTIVE_USER) ==
1012 !port_isset(engine->execlists.port));
1004} 1013}
1005 1014
1006static void queue_request(struct intel_engine_cs *engine, 1015static void queue_request(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
641 641
642 DRM_DEBUG_KMS("Enabling DC6\n"); 642 DRM_DEBUG_KMS("Enabling DC6\n");
643 643
644 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 644 /* Wa Display #1183: skl,kbl,cfl */
645 if (IS_GEN9_BC(dev_priv))
646 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
647 SKL_SELECT_ALTERNATE_DC_EXIT);
645 648
649 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
646} 650}
647 651
648void skl_disable_dc6(struct drm_i915_private *dev_priv) 652void skl_disable_dc6(struct drm_i915_private *dev_priv)
649{ 653{
650 DRM_DEBUG_KMS("Disabling DC6\n"); 654 DRM_DEBUG_KMS("Disabling DC6\n");
651 655
652 /* Wa Display #1183: skl,kbl,cfl */
653 if (IS_GEN9_BC(dev_priv))
654 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
655 SKL_SELECT_ALTERNATE_DC_EXIT);
656
657 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
658} 657}
659 658
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6e5e1aa54ce1..b001699297c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
351 351
352 spin_lock_irqsave(&dev->event_lock, flags); 352 spin_lock_irqsave(&dev->event_lock, flags);
353 mdp4_crtc->event = crtc->state->event; 353 mdp4_crtc->event = crtc->state->event;
354 crtc->state->event = NULL;
354 spin_unlock_irqrestore(&dev->event_lock, flags); 355 spin_unlock_irqrestore(&dev->event_lock, flags);
355 356
356 blend_setup(crtc); 357 blend_setup(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 9893e43ba6c5..76b96081916f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
708 708
709 spin_lock_irqsave(&dev->event_lock, flags); 709 spin_lock_irqsave(&dev->event_lock, flags);
710 mdp5_crtc->event = crtc->state->event; 710 mdp5_crtc->event = crtc->state->event;
711 crtc->state->event = NULL;
711 spin_unlock_irqrestore(&dev->event_lock, flags); 712 spin_unlock_irqrestore(&dev->event_lock, flags);
712 713
713 /* 714 /*
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
171 return i; 171 return i;
172} 172}
173 173
174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) 174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
175 uint64_t modifier)
175{ 176{
176 int i; 177 int i;
177 for (i = 0; i < ARRAY_SIZE(formats); i++) { 178 for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) 98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
99 99
100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
102 102
103/* MDP capabilities */ 103/* MDP capabilities */
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 7a03a9489708..8baba30d6c65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -173,6 +173,7 @@ struct msm_dsi_host {
173 173
174 bool registered; 174 bool registered;
175 bool power_on; 175 bool power_on;
176 bool enabled;
176 int irq; 177 int irq;
177}; 178};
178 179
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
775 switch (mipi_fmt) { 776 switch (mipi_fmt) {
776 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 777 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
777 case MIPI_DSI_FMT_RGB666_PACKED: 778 case MIPI_DSI_FMT_RGB666_PACKED:
778 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 779 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
779 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 780 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
780 default: return CMD_DST_FORMAT_RGB888; 781 default: return CMD_DST_FORMAT_RGB888;
781 } 782 }
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
986 987
987static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 988static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
988{ 989{
990 u32 ret = 0;
991 struct device *dev = &msm_host->pdev->dev;
992
989 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 993 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
990 994
991 reinit_completion(&msm_host->video_comp); 995 reinit_completion(&msm_host->video_comp);
992 996
993 wait_for_completion_timeout(&msm_host->video_comp, 997 ret = wait_for_completion_timeout(&msm_host->video_comp,
994 msecs_to_jiffies(70)); 998 msecs_to_jiffies(70));
995 999
1000 if (ret <= 0)
1001 dev_err(dev, "wait for video done timed out\n");
1002
996 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1003 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
997} 1004}
998 1005
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1001 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1008 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1002 return; 1009 return;
1003 1010
1004 if (msm_host->power_on) { 1011 if (msm_host->power_on && msm_host->enabled) {
1005 dsi_wait4video_done(msm_host); 1012 dsi_wait4video_done(msm_host);
1006 /* delay 4 ms to skip BLLP */ 1013 /* delay 4 ms to skip BLLP */
1007 usleep_range(2000, 4000); 1014 usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
2203 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2210 * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2204 * } 2211 * }
2205 */ 2212 */
2206 2213 msm_host->enabled = true;
2207 return 0; 2214 return 0;
2208} 2215}
2209 2216
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
2211{ 2218{
2212 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2219 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2213 2220
2221 msm_host->enabled = false;
2214 dsi_op_mode_config(msm_host, 2222 dsi_op_mode_config(msm_host,
2215 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2223 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2216 2224
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8e9d5c255820..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
265 return 0; 265 return 0;
266} 266}
267 267
268int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
269 struct msm_dsi_phy_clk_request *clk_req)
270{
271 const unsigned long bit_rate = clk_req->bitclk_rate;
272 const unsigned long esc_rate = clk_req->escclk_rate;
273 s32 ui, ui_x8, lpx;
274 s32 tmax, tmin;
275 s32 pcnt0 = 50;
276 s32 pcnt1 = 50;
277 s32 pcnt2 = 10;
278 s32 pcnt3 = 30;
279 s32 pcnt4 = 10;
280 s32 pcnt5 = 2;
281 s32 coeff = 1000; /* Precision, should avoid overflow */
282 s32 hb_en, hb_en_ckln;
283 s32 temp;
284
285 if (!bit_rate || !esc_rate)
286 return -EINVAL;
287
288 timing->hs_halfbyte_en = 0;
289 hb_en = 0;
290 timing->hs_halfbyte_en_ckln = 0;
291 hb_en_ckln = 0;
292
293 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
294 ui_x8 = ui << 3;
295 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
296
297 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
298 tmin = max_t(s32, temp, 0);
299 temp = (95 * coeff) / ui_x8;
300 tmax = max_t(s32, temp, 0);
301 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
302
303 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
304 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
305 tmax = (tmin > 255) ? 511 : 255;
306 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
307
308 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
309 temp = 105 * coeff + 12 * ui - 20 * coeff;
310 tmax = (temp + 3 * ui) / ui_x8;
311 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
312
313 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
314 tmin = max_t(s32, temp, 0);
315 temp = (85 * coeff + 6 * ui) / ui_x8;
316 tmax = max_t(s32, temp, 0);
317 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
318
319 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
320 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
321 tmax = 255;
322 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
323
324 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
325 temp = 105 * coeff + 12 * ui - 20 * coeff;
326 tmax = (temp / ui_x8) - 1;
327 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
328
329 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
330 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
331
332 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
333 tmax = 255;
334 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
335
336 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
337 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
338
339 temp = 60 * coeff + 52 * ui - 43 * ui;
340 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
341 tmax = 63;
342 timing->shared_timings.clk_post =
343 linear_inter(tmax, tmin, pcnt2, 0, false);
344
345 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
346 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
347 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
348 (((timing->hs_rqst_ckln << 3) + 8) * ui);
349 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
350 tmax = 63;
351 if (tmin > tmax) {
352 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
353 timing->shared_timings.clk_pre = temp >> 1;
354 timing->shared_timings.clk_pre_inc_by_2 = 1;
355 } else {
356 timing->shared_timings.clk_pre =
357 linear_inter(tmax, tmin, pcnt2, 0, false);
358 timing->shared_timings.clk_pre_inc_by_2 = 0;
359 }
360
361 timing->ta_go = 3;
362 timing->ta_sure = 0;
363 timing->ta_get = 4;
364
365 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
366 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
367 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
368 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
369 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
370 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
371 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
372 timing->hs_prep_dly_ckln);
373
374 return 0;
375}
376
268void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 377void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
269 u32 bit_mask) 378 u32 bit_mask)
270{ 379{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c56268cbdb3d..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
101 struct msm_dsi_phy_clk_request *clk_req); 101 struct msm_dsi_phy_clk_request *clk_req);
102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, 102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
103 struct msm_dsi_phy_clk_request *clk_req); 103 struct msm_dsi_phy_clk_request *clk_req);
104int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
105 struct msm_dsi_phy_clk_request *clk_req);
104void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 106void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
105 u32 bit_mask); 107 u32 bit_mask);
106int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); 108int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 0af951aaeea1..b3fffc8dbb2a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
80} 80}
81 81
82static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
83 struct msm_dsi_phy_clk_request *clk_req)
84{
85 /*
86 * TODO: These params need to be computed, they're currently hardcoded
87 * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
88 * default escape clock of 19.2 Mhz.
89 */
90
91 timing->hs_halfbyte_en = 0;
92 timing->clk_zero = 0x1c;
93 timing->clk_prepare = 0x07;
94 timing->clk_trail = 0x07;
95 timing->hs_exit = 0x23;
96 timing->hs_zero = 0x21;
97 timing->hs_prepare = 0x07;
98 timing->hs_trail = 0x07;
99 timing->hs_rqst = 0x05;
100 timing->ta_sure = 0x00;
101 timing->ta_go = 0x03;
102 timing->ta_get = 0x04;
103
104 timing->shared_timings.clk_pre = 0x2d;
105 timing->shared_timings.clk_post = 0x0d;
106
107 return 0;
108}
109
110static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 82static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
111 struct msm_dsi_phy_clk_request *clk_req) 83 struct msm_dsi_phy_clk_request *clk_req)
112{ 84{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0e0c87252ab0..7a16242bf8bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); 183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); 184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
185 185
186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format); 186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
187 mode_cmd->modifier[0]);
187 if (!format) { 188 if (!format) {
188 dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 189 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
189 (char *)&mode_cmd->pixel_format); 190 (char *)&mode_cmd->pixel_format);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
92 92
93 if (IS_ERR(fb)) { 93 if (IS_ERR(fb)) {
94 dev_err(dev->dev, "failed to allocate fb\n"); 94 dev_err(dev->dev, "failed to allocate fb\n");
95 ret = PTR_ERR(fb); 95 return PTR_ERR(fb);
96 goto fail;
97 } 96 }
98 97
99 bo = msm_framebuffer_bo(fb, 0); 98 bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
151 150
152fail_unlock: 151fail_unlock:
153 mutex_unlock(&dev->struct_mutex); 152 mutex_unlock(&dev->struct_mutex);
154fail: 153 drm_framebuffer_remove(fb);
155
156 if (ret) {
157 if (fb)
158 drm_framebuffer_remove(fb);
159 }
160
161 return ret; 154 return ret;
162} 155}
163 156
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 95196479f651..f583bb4222f9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 133
134 if (msm_obj->pages) { 134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean 135 if (msm_obj->sgt) {
136 * because display controller, GPU, etc. are not coherent: 136 /* For non-cached buffers, ensure the new
137 */ 137 * pages are clean because display controller,
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 138 * GPU, etc. are not coherent:
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 139 */
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
141 144
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt); 145 sg_free_table(msm_obj->sgt);
144 146 kfree(msm_obj->sgt);
145 kfree(msm_obj->sgt); 147 }
146 148
147 if (use_pages(obj)) 149 if (use_pages(obj))
148 drm_gem_put_pages(obj, msm_obj->pages, true, false); 150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..aaa329dc020e 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
48 /* functions to wait for atomic commit completed on each CRTC */ 48 /* functions to wait for atomic commit completed on each CRTC */
49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms, 49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
50 struct drm_crtc *crtc); 50 struct drm_crtc *crtc);
51 /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
52 const struct msm_format *(*get_format)(struct msm_kms *kms,
53 const uint32_t format,
54 const uint64_t modifiers);
51 /* misc: */ 55 /* misc: */
52 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
53 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 56 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
54 struct drm_encoder *encoder); 57 struct drm_encoder *encoder);
55 int (*set_split_display)(struct msm_kms *kms, 58 int (*set_split_display)(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..01665b98c57e 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
183 182
184 cmd.type = type; 183 cmd.type = type;
185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
186 185
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188} 187}
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
192 uint32_t type, bool interruptible) 191 uint32_t type, bool interruptible)
193{ 192{
194 struct qxl_command cmd; 193 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
196 194
197 cmd.type = type; 195 cmd.type = type;
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
199 197
200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
201} 199}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..864b456080c4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -167,6 +167,7 @@ struct qxl_release {
167 167
168 int id; 168 int id;
169 int type; 169 int type;
170 struct qxl_bo *release_bo;
170 uint32_t release_offset; 171 uint32_t release_offset;
171 uint32_t surface_release_id; 172 uint32_t surface_release_id;
172 struct ww_acquire_ctx ticket; 173 struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e238a1a2eca1..6cc9f3367fa0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
182 goto out_free_reloc; 182 goto out_free_reloc;
183 183
184 /* TODO copy slow path code from i915 */ 184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186 unwritten = __copy_from_user_inatomic_nocache 186 unwritten = __copy_from_user_inatomic_nocache
187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), 187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188 u64_to_user_ptr(cmd->command), cmd->command_size); 188 u64_to_user_ptr(cmd->command), cmd->command_size);
189 189
190 { 190 {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..7cb214577275 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
173 list_del(&entry->tv.head); 173 list_del(&entry->tv.head);
174 kfree(entry); 174 kfree(entry);
175 } 175 }
176 release->release_bo = NULL;
176} 177}
177 178
178void 179void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296{ 297{
297 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
298 int idr_ret; 299 int idr_ret;
299 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
300 struct qxl_bo *bo; 300 struct qxl_bo *bo;
301 union qxl_release_info *info; 301 union qxl_release_info *info;
302 302
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
305 if (idr_ret < 0) 305 if (idr_ret < 0)
306 return idr_ret; 306 return idr_ret;
307 bo = to_qxl_bo(entry->tv.bo); 307 bo = create_rel->release_bo;
308 308
309 (*release)->release_bo = bo;
309 (*release)->release_offset = create_rel->release_offset + 64; 310 (*release)->release_offset = create_rel->release_offset + 64;
310 311
311 qxl_release_list_add(*release, bo); 312 qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
365 366
366 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
367 368
369 (*release)->release_bo = bo;
368 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
369 qdev->current_release_bo_offset[cur_idx]++; 371 qdev->current_release_bo_offset[cur_idx]++;
370 372
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
408{ 410{
409 void *ptr; 411 void *ptr;
410 union qxl_release_info *info; 412 union qxl_release_info *info;
411 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 413 struct qxl_bo *bo = release->release_bo;
412 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
413 414
414 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
415 if (!ptr) 416 if (!ptr)
416 return NULL; 417 return NULL;
417 info = ptr + (release->release_offset & ~PAGE_SIZE); 418 info = ptr + (release->release_offset & ~PAGE_MASK);
418 return info; 419 return info;
419} 420}
420 421
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
422 struct qxl_release *release, 423 struct qxl_release *release,
423 union qxl_release_info *info) 424 union qxl_release_info *info)
424{ 425{
425 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 426 struct qxl_bo *bo = release->release_bo;
426 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
427 void *ptr; 427 void *ptr;
428 428
429 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 429 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431} 431}
432 432
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index bffff4c9fbf5..be3f14d7746d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
94 } 94 }
95} 95}
96 96
97static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
98 const struct drm_display_mode *mode)
99{
100 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
101 struct sun4i_tcon *tcon = lvds->tcon;
102 u32 hsync = mode->hsync_end - mode->hsync_start;
103 u32 vsync = mode->vsync_end - mode->vsync_start;
104 unsigned long rate = mode->clock * 1000;
105 long rounded_rate;
106
107 DRM_DEBUG_DRIVER("Validating modes...\n");
108
109 if (hsync < 1)
110 return MODE_HSYNC_NARROW;
111
112 if (hsync > 0x3ff)
113 return MODE_HSYNC_WIDE;
114
115 if ((mode->hdisplay < 1) || (mode->htotal < 1))
116 return MODE_H_ILLEGAL;
117
118 if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
119 return MODE_BAD_HVALUE;
120
121 DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
122
123 if (vsync < 1)
124 return MODE_VSYNC_NARROW;
125
126 if (vsync > 0x3ff)
127 return MODE_VSYNC_WIDE;
128
129 if ((mode->vdisplay < 1) || (mode->vtotal < 1))
130 return MODE_V_ILLEGAL;
131
132 if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
133 return MODE_BAD_VVALUE;
134
135 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
136
137 tcon->dclk_min_div = 7;
138 tcon->dclk_max_div = 7;
139 rounded_rate = clk_round_rate(tcon->dclk, rate);
140 if (rounded_rate < rate)
141 return MODE_CLOCK_LOW;
142
143 if (rounded_rate > rate)
144 return MODE_CLOCK_HIGH;
145
146 DRM_DEBUG_DRIVER("Clock rate OK\n");
147
148 return MODE_OK;
149}
150
151static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { 97static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
152 .disable = sun4i_lvds_encoder_disable, 98 .disable = sun4i_lvds_encoder_disable,
153 .enable = sun4i_lvds_encoder_enable, 99 .enable = sun4i_lvds_encoder_enable,
154 .mode_valid = sun4i_lvds_encoder_mode_valid,
155}; 100};
156 101
157static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { 102static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 2decc8e2c79f..add9cc97a3b6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
195 vc4_bo_set_label(obj, -1); 195 vc4_bo_set_label(obj, -1);
196 196
197 if (bo->validated_shader) { 197 if (bo->validated_shader) {
198 kfree(bo->validated_shader->uniform_addr_offsets);
198 kfree(bo->validated_shader->texture_samples); 199 kfree(bo->validated_shader->texture_samples);
199 kfree(bo->validated_shader); 200 kfree(bo->validated_shader);
200 bo->validated_shader = NULL; 201 bo->validated_shader = NULL;
@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
591 } 592 }
592 593
593 if (bo->validated_shader) { 594 if (bo->validated_shader) {
595 kfree(bo->validated_shader->uniform_addr_offsets);
594 kfree(bo->validated_shader->texture_samples); 596 kfree(bo->validated_shader->texture_samples);
595 kfree(bo->validated_shader); 597 kfree(bo->validated_shader);
596 bo->validated_shader = NULL; 598 bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index d3f15bf60900..7cf82b071de2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
942fail: 942fail:
943 kfree(validation_state.branch_targets); 943 kfree(validation_state.branch_targets);
944 if (validated_shader) { 944 if (validated_shader) {
945 kfree(validated_shader->uniform_addr_offsets);
945 kfree(validated_shader->texture_samples); 946 kfree(validated_shader->texture_samples);
946 kfree(validated_shader); 947 kfree(validated_shader);
947 } 948 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 48e4f1df6e5d..020070d483d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) { 294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock); 295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock); 297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry; 298 goto retry;
299 } else { 299 } else {
@@ -368,7 +368,7 @@ retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) { 369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock); 370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free); 371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock); 372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry; 373 goto retry;
374 } else { 374 } else {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a3a7ead3012..0b5cc910f62e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -525,6 +525,9 @@
525#define I2C_VENDOR_ID_HANTICK 0x0911 525#define I2C_VENDOR_ID_HANTICK 0x0911
526#define I2C_PRODUCT_ID_HANTICK_5288 0x5288 526#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
527 527
528#define I2C_VENDOR_ID_RAYD 0x2386
529#define I2C_PRODUCT_ID_RAYD_3118 0x3118
530
528#define USB_VENDOR_ID_HANWANG 0x0b57 531#define USB_VENDOR_ID_HANWANG 0x0b57
529#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 532#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
530#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff 533#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6836a856c243..930652c25120 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
387 break; 387 break;
388 388
389 case POWER_SUPPLY_PROP_CAPACITY: 389 case POWER_SUPPLY_PROP_CAPACITY:
390 if (dev->battery_report_type == HID_FEATURE_REPORT) { 390 if (dev->battery_status != HID_BATTERY_REPORTED &&
391 !dev->battery_avoid_query) {
391 value = hidinput_query_battery_capacity(dev); 392 value = hidinput_query_battery_capacity(dev);
392 if (value < 0) 393 if (value < 0)
393 return value; 394 return value;
@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
403 break; 404 break;
404 405
405 case POWER_SUPPLY_PROP_STATUS: 406 case POWER_SUPPLY_PROP_STATUS:
406 if (!dev->battery_reported && 407 if (dev->battery_status != HID_BATTERY_REPORTED &&
407 dev->battery_report_type == HID_FEATURE_REPORT) { 408 !dev->battery_avoid_query) {
408 value = hidinput_query_battery_capacity(dev); 409 value = hidinput_query_battery_capacity(dev);
409 if (value < 0) 410 if (value < 0)
410 return value; 411 return value;
411 412
412 dev->battery_capacity = value; 413 dev->battery_capacity = value;
413 dev->battery_reported = true; 414 dev->battery_status = HID_BATTERY_QUERIED;
414 } 415 }
415 416
416 if (!dev->battery_reported) 417 if (dev->battery_status == HID_BATTERY_UNKNOWN)
417 val->intval = POWER_SUPPLY_STATUS_UNKNOWN; 418 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
418 else if (dev->battery_capacity == 100) 419 else if (dev->battery_capacity == 100)
419 val->intval = POWER_SUPPLY_STATUS_FULL; 420 val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
486 dev->battery_report_type = report_type; 487 dev->battery_report_type = report_type;
487 dev->battery_report_id = field->report->id; 488 dev->battery_report_id = field->report->id;
488 489
490 /*
491 * Stylus is normally not connected to the device and thus we
492 * can't query the device and get meaningful battery strength.
493 * We have to wait for the device to report it on its own.
494 */
495 dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
496 field->physical == HID_DG_STYLUS;
497
489 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); 498 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
490 if (IS_ERR(dev->battery)) { 499 if (IS_ERR(dev->battery)) {
491 error = PTR_ERR(dev->battery); 500 error = PTR_ERR(dev->battery);
@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
530 539
531 capacity = hidinput_scale_battery_capacity(dev, value); 540 capacity = hidinput_scale_battery_capacity(dev, value);
532 541
533 if (!dev->battery_reported || capacity != dev->battery_capacity) { 542 if (dev->battery_status != HID_BATTERY_REPORTED ||
543 capacity != dev->battery_capacity) {
534 dev->battery_capacity = capacity; 544 dev->battery_capacity = capacity;
535 dev->battery_reported = true; 545 dev->battery_status = HID_BATTERY_REPORTED;
536 power_supply_changed(dev->battery); 546 power_supply_changed(dev->battery);
537 } 547 }
538} 548}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index fbfcc8009432..b39844adea47 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
192 int ret = 0, len; 192 int ret = 0, len;
193 unsigned char report_number; 193 unsigned char report_number;
194 194
195 if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
196 ret = -ENODEV;
197 goto out;
198 }
199
195 dev = hidraw_table[minor]->hid; 200 dev = hidraw_table[minor]->hid;
196 201
197 if (!dev->ll_driver->raw_request) { 202 if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 97689e98e53f..963328674e93 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,6 +47,7 @@
47/* quirks to control the device */ 47/* quirks to control the device */
48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
50#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
50 51
51/* flags */ 52/* flags */
52#define I2C_HID_STARTED 0 53#define I2C_HID_STARTED 0
@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
171 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 172 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
172 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 173 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
173 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 174 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
175 { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
176 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
174 { 0, 0 } 177 { 0, 0 }
175}; 178};
176 179
@@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
1220 if (ret) 1223 if (ret)
1221 return ret; 1224 return ret;
1222 1225
1226 /* RAYDIUM device (2386:3118) need to re-send report descr cmd
1227 * after resume, after this it will be back normal.
1228 * otherwise it issues too many incomplete reports.
1229 */
1230 if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
1231 ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
1232 if (ret)
1233 return ret;
1234 }
1235
1223 if (hid->driver && hid->driver->reset_resume) { 1236 if (hid->driver && hid->driver->reset_resume) {
1224 ret = hid->driver->reset_resume(hid); 1237 ret = hid->driver->reset_resume(hid);
1225 return ret; 1238 return ret;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 6da16a879c9f..5f947ec20dcb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
689 return tool_type; 689 return tool_type;
690} 690}
691 691
692static void wacom_exit_report(struct wacom_wac *wacom)
693{
694 struct input_dev *input = wacom->pen_input;
695 struct wacom_features *features = &wacom->features;
696 unsigned char *data = wacom->data;
697 int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
698
699 /*
700 * Reset all states otherwise we lose the initial states
701 * when in-prox next time
702 */
703 input_report_abs(input, ABS_X, 0);
704 input_report_abs(input, ABS_Y, 0);
705 input_report_abs(input, ABS_DISTANCE, 0);
706 input_report_abs(input, ABS_TILT_X, 0);
707 input_report_abs(input, ABS_TILT_Y, 0);
708 if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
709 input_report_key(input, BTN_LEFT, 0);
710 input_report_key(input, BTN_MIDDLE, 0);
711 input_report_key(input, BTN_RIGHT, 0);
712 input_report_key(input, BTN_SIDE, 0);
713 input_report_key(input, BTN_EXTRA, 0);
714 input_report_abs(input, ABS_THROTTLE, 0);
715 input_report_abs(input, ABS_RZ, 0);
716 } else {
717 input_report_abs(input, ABS_PRESSURE, 0);
718 input_report_key(input, BTN_STYLUS, 0);
719 input_report_key(input, BTN_STYLUS2, 0);
720 input_report_key(input, BTN_TOUCH, 0);
721 input_report_abs(input, ABS_WHEEL, 0);
722 if (features->type >= INTUOS3S)
723 input_report_abs(input, ABS_Z, 0);
724 }
725 input_report_key(input, wacom->tool[idx], 0);
726 input_report_abs(input, ABS_MISC, 0); /* reset tool id */
727 input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
728 wacom->id[idx] = 0;
729}
730
692static int wacom_intuos_inout(struct wacom_wac *wacom) 731static int wacom_intuos_inout(struct wacom_wac *wacom)
693{ 732{
694 struct wacom_features *features = &wacom->features; 733 struct wacom_features *features = &wacom->features;
@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
741 if (!wacom->id[idx]) 780 if (!wacom->id[idx])
742 return 1; 781 return 1;
743 782
744 /* 783 wacom_exit_report(wacom);
745 * Reset all states otherwise we lose the initial states
746 * when in-prox next time
747 */
748 input_report_abs(input, ABS_X, 0);
749 input_report_abs(input, ABS_Y, 0);
750 input_report_abs(input, ABS_DISTANCE, 0);
751 input_report_abs(input, ABS_TILT_X, 0);
752 input_report_abs(input, ABS_TILT_Y, 0);
753 if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
754 input_report_key(input, BTN_LEFT, 0);
755 input_report_key(input, BTN_MIDDLE, 0);
756 input_report_key(input, BTN_RIGHT, 0);
757 input_report_key(input, BTN_SIDE, 0);
758 input_report_key(input, BTN_EXTRA, 0);
759 input_report_abs(input, ABS_THROTTLE, 0);
760 input_report_abs(input, ABS_RZ, 0);
761 } else {
762 input_report_abs(input, ABS_PRESSURE, 0);
763 input_report_key(input, BTN_STYLUS, 0);
764 input_report_key(input, BTN_STYLUS2, 0);
765 input_report_key(input, BTN_TOUCH, 0);
766 input_report_abs(input, ABS_WHEEL, 0);
767 if (features->type >= INTUOS3S)
768 input_report_abs(input, ABS_Z, 0);
769 }
770 input_report_key(input, wacom->tool[idx], 0);
771 input_report_abs(input, ABS_MISC, 0); /* reset tool id */
772 input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
773 wacom->id[idx] = 0;
774 return 2; 784 return 2;
775 } 785 }
776 786
@@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1235 if (!valid) 1245 if (!valid)
1236 continue; 1246 continue;
1237 1247
1248 if (!prox) {
1249 wacom->shared->stylus_in_proximity = false;
1250 wacom_exit_report(wacom);
1251 input_sync(pen_input);
1252 return;
1253 }
1238 if (range) { 1254 if (range) {
1239 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); 1255 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
1240 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); 1256 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 051a72eecb24..d2cc55e21374 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 41#endif
42 42
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
45#endif
46
43/* CPUID function 0x80000001, ebx */ 47/* CPUID function 0x80000001, ebx */
44#define CPUID_PKGTYPE_MASK 0xf0000000 48#define CPUID_PKGTYPE_MASK 0xf0000000
45#define CPUID_PKGTYPE_F 0x00000000 49#define CPUID_PKGTYPE_F 0x00000000
@@ -72,6 +76,7 @@ struct k10temp_data {
72 struct pci_dev *pdev; 76 struct pci_dev *pdev;
73 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
74 int temp_offset; 78 int temp_offset;
79 u32 temp_adjust_mask;
75}; 80};
76 81
77struct tctl_offset { 82struct tctl_offset {
@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
84 { 0x17, "AMD Ryzen 5 1600X", 20000 }, 89 { 0x17, "AMD Ryzen 5 1600X", 20000 },
85 { 0x17, "AMD Ryzen 7 1700X", 20000 }, 90 { 0x17, "AMD Ryzen 7 1700X", 20000 },
86 { 0x17, "AMD Ryzen 7 1800X", 20000 }, 91 { 0x17, "AMD Ryzen 7 1800X", 20000 },
92 { 0x17, "AMD Ryzen 7 2700X", 10000 },
87 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, 93 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
88 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, 94 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
89 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, 95 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
129 135
130 data->read_tempreg(data->pdev, &regval); 136 data->read_tempreg(data->pdev, &regval);
131 temp = (regval >> 21) * 125; 137 temp = (regval >> 21) * 125;
138 if (regval & data->temp_adjust_mask)
139 temp -= 49000;
132 if (temp > data->temp_offset) 140 if (temp > data->temp_offset)
133 temp -= data->temp_offset; 141 temp -= data->temp_offset;
134 else 142 else
@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
259 data->pdev = pdev; 267 data->pdev = pdev;
260 268
261 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
262 boot_cpu_data.x86_model == 0x70)) 270 boot_cpu_data.x86_model == 0x70)) {
263 data->read_tempreg = read_tempreg_nb_f15; 271 data->read_tempreg = read_tempreg_nb_f15;
264 else if (boot_cpu_data.x86 == 0x17) 272 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000;
265 data->read_tempreg = read_tempreg_nb_f17; 274 data->read_tempreg = read_tempreg_nb_f17;
266 else 275 } else {
267 data->read_tempreg = read_tempreg_pci; 276 data->read_tempreg = read_tempreg_pci;
277 }
268 278
269 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { 279 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
270 const struct tctl_offset *entry = &tctl_offset_table[i]; 280 const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
292 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
293 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
294 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
295 {} 306 {}
296}; 307};
297MODULE_DEVICE_TABLE(pci, k10temp_id_table); 308MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 8b0bc4fc06e8..b0bc77bf2cd9 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
1380 /* Activate logical device if needed */ 1380 /* Activate logical device if needed */
1381 val = superio_inb(sioaddr, SIO_REG_ENABLE); 1381 val = superio_inb(sioaddr, SIO_REG_ENABLE);
1382 if (!(val & 0x01)) { 1382 if (!(val & 0x01)) {
1383 pr_err("EC is disabled\n"); 1383 pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
1384 goto fail; 1384 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
1385 } 1385 }
1386 1386
1387 superio_exit(sioaddr); 1387 superio_exit(sioaddr);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 363bf56eb0f2..91976b6ca300 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
170 scmi_chip_info.info = ptr_scmi_ci; 170 scmi_chip_info.info = ptr_scmi_ci;
171 chip_info = &scmi_chip_info; 171 chip_info = &scmi_chip_info;
172 172
173 for (type = 0; type < hwmon_max && nr_count[type]; type++) { 173 for (type = 0; type < hwmon_max; type++) {
174 if (!nr_count[type])
175 continue;
176
174 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], 177 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
175 type, hwmon_attributes[type]); 178 type, hwmon_attributes[type]);
176 *ptr_scmi_ci++ = scmi_hwmon_chan++; 179 *ptr_scmi_ci++ = scmi_hwmon_chan++;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c4865b08d7fb..8d21b9825d71 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -707,7 +707,6 @@ config I2C_MPC
707config I2C_MT65XX 707config I2C_MT65XX
708 tristate "MediaTek I2C adapter" 708 tristate "MediaTek I2C adapter"
709 depends on ARCH_MEDIATEK || COMPILE_TEST 709 depends on ARCH_MEDIATEK || COMPILE_TEST
710 depends on HAS_DMA
711 help 710 help
712 This selects the MediaTek(R) Integrated Inter Circuit bus driver 711 This selects the MediaTek(R) Integrated Inter Circuit bus driver
713 for MT65xx and MT81xx. 712 for MT65xx and MT81xx.
@@ -885,7 +884,6 @@ config I2C_SH7760
885 884
886config I2C_SH_MOBILE 885config I2C_SH_MOBILE
887 tristate "SuperH Mobile I2C Controller" 886 tristate "SuperH Mobile I2C Controller"
888 depends on HAS_DMA
889 depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST 887 depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
890 help 888 help
891 If you say yes to this option, support will be included for the 889 If you say yes to this option, support will be included for the
@@ -1098,7 +1096,6 @@ config I2C_XLP9XX
1098 1096
1099config I2C_RCAR 1097config I2C_RCAR
1100 tristate "Renesas R-Car I2C Controller" 1098 tristate "Renesas R-Car I2C Controller"
1101 depends on HAS_DMA
1102 depends on ARCH_RENESAS || COMPILE_TEST 1099 depends on ARCH_RENESAS || COMPILE_TEST
1103 select I2C_SLAVE 1100 select I2C_SLAVE
1104 help 1101 help
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 25fcc3c1e32b..4053259bccb8 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -86,6 +86,7 @@ struct sprd_i2c {
86 u32 count; 86 u32 count;
87 int irq; 87 int irq;
88 int err; 88 int err;
89 bool is_suspended;
89}; 90};
90 91
91static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) 92static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
283 struct sprd_i2c *i2c_dev = i2c_adap->algo_data; 284 struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
284 int im, ret; 285 int im, ret;
285 286
287 if (i2c_dev->is_suspended)
288 return -EBUSY;
289
286 ret = pm_runtime_get_sync(i2c_dev->dev); 290 ret = pm_runtime_get_sync(i2c_dev->dev);
287 if (ret < 0) 291 if (ret < 0)
288 return ret; 292 return ret;
@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
364 struct sprd_i2c *i2c_dev = dev_id; 368 struct sprd_i2c *i2c_dev = dev_id;
365 struct i2c_msg *msg = i2c_dev->msg; 369 struct i2c_msg *msg = i2c_dev->msg;
366 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 370 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
367 u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
368 u32 i2c_tran; 371 u32 i2c_tran;
369 372
370 if (msg->flags & I2C_M_RD) 373 if (msg->flags & I2C_M_RD)
371 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 374 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
372 else 375 else
373 i2c_tran = i2c_count; 376 i2c_tran = i2c_dev->count;
374 377
375 /* 378 /*
376 * If we got one ACK from slave when writing data, and we did not 379 * If we got one ACK from slave when writing data, and we did not
@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
408{ 411{
409 struct sprd_i2c *i2c_dev = dev_id; 412 struct sprd_i2c *i2c_dev = dev_id;
410 struct i2c_msg *msg = i2c_dev->msg; 413 struct i2c_msg *msg = i2c_dev->msg;
411 u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
412 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 414 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
413 u32 i2c_tran; 415 u32 i2c_tran;
414 416
415 if (msg->flags & I2C_M_RD) 417 if (msg->flags & I2C_M_RD)
416 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 418 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
417 else 419 else
418 i2c_tran = i2c_count; 420 i2c_tran = i2c_dev->count;
419 421
420 /* 422 /*
421 * If we did not get one ACK from slave when writing data, then we 423 * If we did not get one ACK from slave when writing data, then we
@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
586 588
587static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) 589static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
588{ 590{
591 struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
592
593 i2c_lock_adapter(&i2c_dev->adap);
594 i2c_dev->is_suspended = true;
595 i2c_unlock_adapter(&i2c_dev->adap);
596
589 return pm_runtime_force_suspend(pdev); 597 return pm_runtime_force_suspend(pdev);
590} 598}
591 599
592static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) 600static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
593{ 601{
602 struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
603
604 i2c_lock_adapter(&i2c_dev->adap);
605 i2c_dev->is_suspended = false;
606 i2c_unlock_adapter(&i2c_dev->adap);
607
594 return pm_runtime_force_resume(pdev); 608 return pm_runtime_force_resume(pdev);
595} 609}
596 610
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 036a03f0d0a6..1667b6e7674f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
280 */ 280 */
281 if (msgs[i].flags & I2C_M_RECV_LEN) { 281 if (msgs[i].flags & I2C_M_RECV_LEN) {
282 if (!(msgs[i].flags & I2C_M_RD) || 282 if (!(msgs[i].flags & I2C_M_RD) ||
283 msgs[i].buf[0] < 1 || 283 msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
284 msgs[i].len < msgs[i].buf[0] + 284 msgs[i].len < msgs[i].buf[0] +
285 I2C_SMBUS_BLOCK_MAX) { 285 I2C_SMBUS_BLOCK_MAX) {
286 res = -EINVAL; 286 res = -EINVAL;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 46115a392098..c81c79d01d93 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -31,6 +31,7 @@
31enum evdev_clock_type { 31enum evdev_clock_type {
32 EV_CLK_REAL = 0, 32 EV_CLK_REAL = 0,
33 EV_CLK_MONO, 33 EV_CLK_MONO,
34 EV_CLK_BOOT,
34 EV_CLK_MAX 35 EV_CLK_MAX
35}; 36};
36 37
@@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
197 case CLOCK_REALTIME: 198 case CLOCK_REALTIME:
198 clk_type = EV_CLK_REAL; 199 clk_type = EV_CLK_REAL;
199 break; 200 break;
200 case CLOCK_BOOTTIME:
201 case CLOCK_MONOTONIC: 201 case CLOCK_MONOTONIC:
202 clk_type = EV_CLK_MONO; 202 clk_type = EV_CLK_MONO;
203 break; 203 break;
204 case CLOCK_BOOTTIME:
205 clk_type = EV_CLK_BOOT;
206 break;
204 default: 207 default:
205 return -EINVAL; 208 return -EINVAL;
206 } 209 }
@@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle,
311 314
312 ev_time[EV_CLK_MONO] = ktime_get(); 315 ev_time[EV_CLK_MONO] = ktime_get();
313 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 316 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
317 ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
318 TK_OFFS_BOOT);
314 319
315 rcu_read_lock(); 320 rcu_read_lock();
316 321
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
index a6e87076acc2..5336bbdbfdc5 100644
--- a/drivers/isdn/mISDN/dsp_hwec.c
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
68 goto _do; 68 goto _do;
69 69
70 { 70 {
71 char _dup[len + 1];
72 char *dup, *tok, *name, *val; 71 char *dup, *tok, *name, *val;
73 int tmp; 72 int tmp;
74 73
75 strcpy(_dup, arg); 74 dup = kstrdup(arg, GFP_ATOMIC);
76 dup = _dup; 75 if (!dup)
76 return;
77 77
78 while ((tok = strsep(&dup, ","))) { 78 while ((tok = strsep(&dup, ","))) {
79 if (!strlen(tok)) 79 if (!strlen(tok))
@@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
89 deftaps = tmp; 89 deftaps = tmp;
90 } 90 }
91 } 91 }
92
93 kfree(dup);
92 } 94 }
93 95
94_do: 96_do:
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 21d50e4cc5e1..b05022f94f18 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
279 u16 timebase, u8 *buf, int len) 279 u16 timebase, u8 *buf, int len)
280{ 280{
281 u8 *p; 281 u8 *p;
282 u8 frame[len + 32]; 282 u8 frame[MAX_DFRAME_LEN_L1 + 32];
283 struct socket *socket = NULL; 283 struct socket *socket = NULL;
284 284
285 if (debug & DEBUG_L1OIP_MSG) 285 if (debug & DEBUG_L1OIP_MSG)
@@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
902 p = skb->data; 902 p = skb->data;
903 l = skb->len; 903 l = skb->len;
904 while (l) { 904 while (l) {
905 ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 905 /*
906 * This is technically bounded by L1OIP_MAX_PERFRAME but
907 * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
908 */
909 ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
906 l1oip_socket_send(hc, 0, dch->slot, 0, 910 l1oip_socket_send(hc, 0, dch->slot, 0,
907 hc->chan[dch->slot].tx_counter++, p, ll); 911 hc->chan[dch->slot].tx_counter++, p, ll);
908 p += ll; 912 p += ll;
@@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1140 p = skb->data; 1144 p = skb->data;
1141 l = skb->len; 1145 l = skb->len;
1142 while (l) { 1146 while (l) {
1143 ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 1147 /*
1148 * This is technically bounded by L1OIP_MAX_PERFRAME but
1149 * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
1150 */
1151 ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
1144 l1oip_socket_send(hc, hc->codec, bch->slot, 0, 1152 l1oip_socket_send(hc, hc->codec, bch->slot, 0,
1145 hc->chan[bch->slot].tx_counter, p, ll); 1153 hc->chan[bch->slot].tx_counter, p, ll);
1146 hc->chan[bch->slot].tx_counter += ll; 1154 hc->chan[bch->slot].tx_counter += ll;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3bea45e8ccff..c208c01f63a5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr)
9256 check_sb_changes(mddev, rdev); 9256 check_sb_changes(mddev, rdev);
9257 9257
9258 /* Read all rdev's to update recovery_offset */ 9258 /* Read all rdev's to update recovery_offset */
9259 rdev_for_each_rcu(rdev, mddev) 9259 rdev_for_each_rcu(rdev, mddev) {
9260 read_rdev(mddev, rdev); 9260 if (!test_bit(Faulty, &rdev->flags))
9261 read_rdev(mddev, rdev);
9262 }
9261} 9263}
9262EXPORT_SYMBOL(md_reload_sb); 9264EXPORT_SYMBOL(md_reload_sb);
9263 9265
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2943fb74056..e9e3308cb0a7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
854 * there is no normal IO happeing. It must arrange to call 854 * there is no normal IO happeing. It must arrange to call
855 * lower_barrier when the particular background IO completes. 855 * lower_barrier when the particular background IO completes.
856 */ 856 */
857static void raise_barrier(struct r1conf *conf, sector_t sector_nr) 857static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
858{ 858{
859 int idx = sector_to_idx(sector_nr); 859 int idx = sector_to_idx(sector_nr);
860 860
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
885 * max resync count which allowed on current I/O barrier bucket. 885 * max resync count which allowed on current I/O barrier bucket.
886 */ 886 */
887 wait_event_lock_irq(conf->wait_barrier, 887 wait_event_lock_irq(conf->wait_barrier,
888 !conf->array_frozen && 888 (!conf->array_frozen &&
889 !atomic_read(&conf->nr_pending[idx]) && 889 !atomic_read(&conf->nr_pending[idx]) &&
890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, 890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
891 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
891 conf->resync_lock); 892 conf->resync_lock);
892 893
894 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
895 atomic_dec(&conf->barrier[idx]);
896 spin_unlock_irq(&conf->resync_lock);
897 wake_up(&conf->wait_barrier);
898 return -EINTR;
899 }
900
893 atomic_inc(&conf->nr_sync_pending); 901 atomic_inc(&conf->nr_sync_pending);
894 spin_unlock_irq(&conf->resync_lock); 902 spin_unlock_irq(&conf->resync_lock);
903
904 return 0;
895} 905}
896 906
897static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 907static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
1092 goto skip_copy; 1102 goto skip_copy;
1093 } 1103 }
1094 1104
1105 behind_bio->bi_write_hint = bio->bi_write_hint;
1106
1095 while (i < vcnt && size) { 1107 while (i < vcnt && size) {
1096 struct page *page; 1108 struct page *page;
1097 int len = min_t(int, PAGE_SIZE, size); 1109 int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2662 2674
2663 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2675 bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2664 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2676 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2665 r1_bio = raid1_alloc_init_r1buf(conf);
2666 2677
2667 raise_barrier(conf, sector_nr); 2678
2679 if (raise_barrier(conf, sector_nr))
2680 return 0;
2681
2682 r1_bio = raid1_alloc_init_r1buf(conf);
2668 2683
2669 rcu_read_lock(); 2684 rcu_read_lock();
2670 /* 2685 /*
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index 71a89d5d3efd..db8043019ec6 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -16,77 +16,7 @@
16 16
17int main(void) 17int main(void)
18{ 18{
19 DEFINE(EMIF_SDCFG_VAL_OFFSET, 19 ti_emif_asm_offsets();
20 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
21 DEFINE(EMIF_TIMING1_VAL_OFFSET,
22 offsetof(struct emif_regs_amx3, emif_timing1_val));
23 DEFINE(EMIF_TIMING2_VAL_OFFSET,
24 offsetof(struct emif_regs_amx3, emif_timing2_val));
25 DEFINE(EMIF_TIMING3_VAL_OFFSET,
26 offsetof(struct emif_regs_amx3, emif_timing3_val));
27 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
28 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
29 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
30 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
31 DEFINE(EMIF_PMCR_VAL_OFFSET,
32 offsetof(struct emif_regs_amx3, emif_pmcr_val));
33 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
34 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
35 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
36 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
37 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
38 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
39 DEFINE(EMIF_COS_CONFIG_OFFSET,
40 offsetof(struct emif_regs_amx3, emif_cos_config));
41 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
42 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
43 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
44 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
45 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
46 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
47 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
48 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
49 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
50 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
51 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
52 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
53 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
54 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
55 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
56 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
57 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
58 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
59 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
60 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
61 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
62
63 BLANK();
64
65 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
66 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
67 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
68 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
69 DEFINE(EMIF_PM_CONFIG_OFFSET,
70 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
71 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
72 offsetof(struct ti_emif_pm_data, regs_virt));
73 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
74 offsetof(struct ti_emif_pm_data, regs_phys));
75 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
76
77 BLANK();
78
79 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
80 offsetof(struct ti_emif_pm_functions, save_context));
81 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
82 offsetof(struct ti_emif_pm_functions, restore_context));
83 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
84 offsetof(struct ti_emif_pm_functions, enter_sr));
85 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
86 offsetof(struct ti_emif_pm_functions, exit_sr));
87 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
88 offsetof(struct ti_emif_pm_functions, abort_sr));
89 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
90 20
91 return 0; 21 return 0;
92} 22}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8e0acd197c43..6af946d16d24 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/bitops.h>
12#include <linux/device.h> 13#include <linux/device.h>
13#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
14#include <linux/io-64-nonatomic-hi-lo.h> 15#include <linux/io-64-nonatomic-hi-lo.h>
@@ -62,6 +63,17 @@
62 * need a custom accessor. 63 * need a custom accessor.
63 */ 64 */
64 65
66static unsigned long global_flags;
67/*
68 * Workaround for avoiding to use RX DMAC by multiple channels.
69 * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
70 * RX DMAC simultaneously, sometimes hundreds of bytes data are not
71 * stored into the system memory even if the DMAC interrupt happened.
72 * So, this driver then uses one RX DMAC channel only.
73 */
74#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0
75#define SDHI_INTERNAL_DMAC_RX_IN_USE 1
76
65/* Definitions for sampling clocks */ 77/* Definitions for sampling clocks */
66static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { 78static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
67 { 79 {
@@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
126 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, 138 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
127 RST_RESERVED_BITS | val); 139 RST_RESERVED_BITS | val);
128 140
141 if (host->data && host->data->flags & MMC_DATA_READ)
142 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
143
129 renesas_sdhi_internal_dmac_enable_dma(host, true); 144 renesas_sdhi_internal_dmac_enable_dma(host, true);
130} 145}
131 146
@@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
155 if (data->flags & MMC_DATA_READ) { 170 if (data->flags & MMC_DATA_READ) {
156 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 171 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
157 dir = DMA_FROM_DEVICE; 172 dir = DMA_FROM_DEVICE;
173 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
174 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
175 goto force_pio;
158 } else { 176 } else {
159 dtran_mode |= DTRAN_MODE_CH_NUM_CH0; 177 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
160 dir = DMA_TO_DEVICE; 178 dir = DMA_TO_DEVICE;
@@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
208 renesas_sdhi_internal_dmac_enable_dma(host, false); 226 renesas_sdhi_internal_dmac_enable_dma(host, false);
209 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); 227 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir);
210 228
229 if (dir == DMA_FROM_DEVICE)
230 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
231
211 tmio_mmc_do_data_irq(host); 232 tmio_mmc_do_data_irq(host);
212out: 233out:
213 spin_unlock_irq(&host->lock); 234 spin_unlock_irq(&host->lock);
@@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
251 * implementation as others may use a different implementation. 272 * implementation as others may use a different implementation.
252 */ 273 */
253static const struct soc_device_attribute gen3_soc_whitelist[] = { 274static const struct soc_device_attribute gen3_soc_whitelist[] = {
254 { .soc_id = "r8a7795", .revision = "ES1.*" }, 275 { .soc_id = "r8a7795", .revision = "ES1.*",
255 { .soc_id = "r8a7795", .revision = "ES2.0" }, 276 .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
256 { .soc_id = "r8a7796", .revision = "ES1.0" }, 277 { .soc_id = "r8a7795", .revision = "ES2.0" },
257 { .soc_id = "r8a77995", .revision = "ES1.0" }, 278 { .soc_id = "r8a7796", .revision = "ES1.0",
258 { /* sentinel */ } 279 .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
280 { .soc_id = "r8a77995", .revision = "ES1.0" },
281 { /* sentinel */ }
259}; 282};
260 283
261static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) 284static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
262{ 285{
263 if (!soc_device_match(gen3_soc_whitelist)) 286 const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
287
288 if (!soc)
264 return -ENODEV; 289 return -ENODEV;
265 290
291 global_flags |= (unsigned long)soc->data;
292
266 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); 293 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
267} 294}
268 295
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 787434e5589d..78c25ad35fd2 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
1312 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1312 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1313} 1313}
1314 1314
1315static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) 1315static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1316{ 1316{
1317 struct sdhci_pci_slot *slot = sdhci_priv(host); 1317 struct sdhci_pci_slot *slot = sdhci_priv(host);
1318 struct pci_dev *pdev = slot->chip->pdev; 1318 struct pci_dev *pdev = slot->chip->pdev;
@@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
1351 return 0; 1351 return 0;
1352} 1352}
1353 1353
1354static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1355{
1356 struct sdhci_host *host = mmc_priv(mmc);
1357
1358 /* AMD requires custom HS200 tuning */
1359 if (host->timing == MMC_TIMING_MMC_HS200)
1360 return amd_execute_tuning_hs200(host, opcode);
1361
1362 /* Otherwise perform standard SDHCI tuning */
1363 return sdhci_execute_tuning(mmc, opcode);
1364}
1365
1366static int amd_probe_slot(struct sdhci_pci_slot *slot)
1367{
1368 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1369
1370 ops->execute_tuning = amd_execute_tuning;
1371
1372 return 0;
1373}
1374
1354static int amd_probe(struct sdhci_pci_chip *chip) 1375static int amd_probe(struct sdhci_pci_chip *chip)
1355{ 1376{
1356 struct pci_dev *smbus_dev; 1377 struct pci_dev *smbus_dev;
@@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
1385 .set_bus_width = sdhci_set_bus_width, 1406 .set_bus_width = sdhci_set_bus_width,
1386 .reset = sdhci_reset, 1407 .reset = sdhci_reset,
1387 .set_uhs_signaling = sdhci_set_uhs_signaling, 1408 .set_uhs_signaling = sdhci_set_uhs_signaling,
1388 .platform_execute_tuning = amd_execute_tuning,
1389}; 1409};
1390 1410
1391static const struct sdhci_pci_fixes sdhci_amd = { 1411static const struct sdhci_pci_fixes sdhci_amd = {
1392 .probe = amd_probe, 1412 .probe = amd_probe,
1393 .ops = &amd_sdhci_pci_ops, 1413 .ops = &amd_sdhci_pci_ops,
1414 .probe_slot = amd_probe_slot,
1394}; 1415};
1395 1416
1396static const struct pci_device_id pci_ids[] = { 1417static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index d4c07b85f18e..f5695be14499 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
45#define I82802AB 0x00ad 45#define I82802AB 0x00ad
46#define I82802AC 0x00ac 46#define I82802AC 0x00ac
47#define PF38F4476 0x881c 47#define PF38F4476 0x881c
48#define M28F00AP30 0x8963
48/* STMicroelectronics chips */ 49/* STMicroelectronics chips */
49#define M50LPW080 0x002F 50#define M50LPW080 0x002F
50#define M50FLW080A 0x0080 51#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
375 extp->MinorVersion = '1'; 376 extp->MinorVersion = '1';
376} 377}
377 378
379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380{
381 /*
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
384 */
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 return 1;
387 return 0;
388}
389
378static inline struct cfi_pri_intelext * 390static inline struct cfi_pri_intelext *
379read_pri_intelext(struct map_info *map, __u16 adr) 391read_pri_intelext(struct map_info *map, __u16 adr)
380{ 392{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
831 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 843 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
832 goto sleep; 844 goto sleep;
833 845
846 /* Do not allow suspend iff read/write to EB address */
847 if ((adr & chip->in_progress_block_mask) ==
848 chip->in_progress_block_addr)
849 goto sleep;
850
851 /* do not suspend small EBs, buggy Micron Chips */
852 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853 (chip->in_progress_block_mask == ~(0x8000-1)))
854 goto sleep;
834 855
835 /* Erase suspend */ 856 /* Erase suspend */
836 map_write(map, CMD(0xB0), adr); 857 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
837 858
838 /* If the flash has finished erasing, then 'erase suspend' 859 /* If the flash has finished erasing, then 'erase suspend'
839 * appears to make some (28F320) flash devices switch to 860 * appears to make some (28F320) flash devices switch to
840 * 'read' mode. Make sure that we switch to 'read status' 861 * 'read' mode. Make sure that we switch to 'read status'
841 * mode so we get the right data. --rmk 862 * mode so we get the right data. --rmk
842 */ 863 */
843 map_write(map, CMD(0x70), adr); 864 map_write(map, CMD(0x70), chip->in_progress_block_addr);
844 chip->oldstate = FL_ERASING; 865 chip->oldstate = FL_ERASING;
845 chip->state = FL_ERASE_SUSPENDING; 866 chip->state = FL_ERASE_SUSPENDING;
846 chip->erase_suspended = 1; 867 chip->erase_suspended = 1;
847 for (;;) { 868 for (;;) {
848 status = map_read(map, adr); 869 status = map_read(map, chip->in_progress_block_addr);
849 if (map_word_andequal(map, status, status_OK, status_OK)) 870 if (map_word_andequal(map, status, status_OK, status_OK))
850 break; 871 break;
851 872
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1041 sending the 0x70 (Read Status) command to an erasing 1062 sending the 0x70 (Read Status) command to an erasing
1042 chip and expecting it to be ignored, that's what we 1063 chip and expecting it to be ignored, that's what we
1043 do. */ 1064 do. */
1044 map_write(map, CMD(0xd0), adr); 1065 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1045 map_write(map, CMD(0x70), adr); 1066 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1046 chip->oldstate = FL_READY; 1067 chip->oldstate = FL_READY;
1047 chip->state = FL_ERASING; 1068 chip->state = FL_ERASING;
1048 break; 1069 break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1933 map_write(map, CMD(0xD0), adr); 1954 map_write(map, CMD(0xD0), adr);
1934 chip->state = FL_ERASING; 1955 chip->state = FL_ERASING;
1935 chip->erase_suspended = 0; 1956 chip->erase_suspended = 0;
1957 chip->in_progress_block_addr = adr;
1958 chip->in_progress_block_mask = ~(len - 1);
1936 1959
1937 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1960 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1938 adr, len, 1961 adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 668e2cbc155b..692902df2598 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
817 goto sleep; 817 goto sleep;
818 818
819 /* We could check to see if we're trying to access the sector 819 /* Do not allow suspend iff read/write to EB address */
820 * that is currently being erased. However, no user will try 820 if ((adr & chip->in_progress_block_mask) ==
821 * anything like that so we just wait for the timeout. */ 821 chip->in_progress_block_addr)
822 goto sleep;
822 823
823 /* Erase suspend */ 824 /* Erase suspend */
824 /* It's harmless to issue the Erase-Suspend and Erase-Resume 825 /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2267 chip->state = FL_ERASING; 2268 chip->state = FL_ERASING;
2268 chip->erase_suspended = 0; 2269 chip->erase_suspended = 0;
2269 chip->in_progress_block_addr = adr; 2270 chip->in_progress_block_addr = adr;
2271 chip->in_progress_block_mask = ~(map->size - 1);
2270 2272
2271 INVALIDATE_CACHE_UDELAY(map, chip, 2273 INVALIDATE_CACHE_UDELAY(map, chip,
2272 adr, map->size, 2274 adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2356 chip->state = FL_ERASING; 2358 chip->state = FL_ERASING;
2357 chip->erase_suspended = 0; 2359 chip->erase_suspended = 0;
2358 chip->in_progress_block_addr = adr; 2360 chip->in_progress_block_addr = adr;
2361 chip->in_progress_block_mask = ~(len - 1);
2359 2362
2360 INVALIDATE_CACHE_UDELAY(map, chip, 2363 INVALIDATE_CACHE_UDELAY(map, chip,
2361 adr, len, 2364 adr, len,
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index d0cd6f8635d7..9c9f8936b63b 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
162 ret = nanddev_erase(nand, &pos); 162 ret = nanddev_erase(nand, &pos);
163 if (ret) { 163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165 einfo->state = MTD_ERASE_FAILED;
166 165
167 return ret; 166 return ret;
168 } 167 }
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
170 nanddev_pos_next_eraseblock(nand, &pos); 169 nanddev_pos_next_eraseblock(nand, &pos);
171 } 170 }
172 171
173 einfo->state = MTD_ERASE_DONE;
174
175 return 0; 172 return 0;
176} 173}
177EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 10e953218948..1d779a35ac8e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2299,29 +2299,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2299 /* 2299 /*
2300 * The legacy "num-cs" property indicates the number of CS on the only 2300 * The legacy "num-cs" property indicates the number of CS on the only
2301 * chip connected to the controller (legacy bindings does not support 2301 * chip connected to the controller (legacy bindings does not support
2302 * more than one chip). CS are only incremented one by one while the RB 2302 * more than one chip). The CS and RB pins are always the #0.
2303 * pin is always the #0.
2304 * 2303 *
2305 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2304 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2306 * properties must be filled. For each chip, expressed as a subnode, 2305 * properties must be filled. For each chip, expressed as a subnode,
2307 * "reg" points to the CS lines and "nand-rb" to the RB line. 2306 * "reg" points to the CS lines and "nand-rb" to the RB line.
2308 */ 2307 */
2309 if (pdata) { 2308 if (pdata || nfc->caps->legacy_of_bindings) {
2310 nsels = 1; 2309 nsels = 1;
2311 } else if (nfc->caps->legacy_of_bindings && 2310 } else {
2312 !of_get_property(np, "num-cs", &nsels)) { 2311 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2313 dev_err(dev, "missing num-cs property\n"); 2312 if (nsels <= 0) {
2314 return -EINVAL; 2313 dev_err(dev, "missing/invalid reg property\n");
2315 } else if (!of_get_property(np, "reg", &nsels)) { 2314 return -EINVAL;
2316 dev_err(dev, "missing reg property\n"); 2315 }
2317 return -EINVAL;
2318 }
2319
2320 if (!pdata)
2321 nsels /= sizeof(u32);
2322 if (!nsels) {
2323 dev_err(dev, "invalid reg property size\n");
2324 return -EINVAL;
2325 } 2316 }
2326 2317
2327 /* Alloc the nand chip structure */ 2318 /* Alloc the nand chip structure */
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f54518ffb36a..f2052fae21c7 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
645 645
646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); 646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
647 647
648 clk = clk_get(&pdev->dev, NULL); 648 clk = devm_clk_get(&pdev->dev, NULL);
649 if (IS_ERR(clk)) 649 if (IS_ERR(clk))
650 return PTR_ERR(clk); 650 return PTR_ERR(clk);
651 651
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 4b8e9183489a..5872f31eaa60 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
501 void __iomem *reg_base = cqspi->iobase; 501 void __iomem *reg_base = cqspi->iobase;
502 void __iomem *ahb_base = cqspi->ahb_base; 502 void __iomem *ahb_base = cqspi->ahb_base;
503 unsigned int remaining = n_rx; 503 unsigned int remaining = n_rx;
504 unsigned int mod_bytes = n_rx % 4;
504 unsigned int bytes_to_read = 0; 505 unsigned int bytes_to_read = 0;
506 u8 *rxbuf_end = rxbuf + n_rx;
505 int ret = 0; 507 int ret = 0;
506 508
507 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 509 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
530 } 532 }
531 533
532 while (bytes_to_read != 0) { 534 while (bytes_to_read != 0) {
535 unsigned int word_remain = round_down(remaining, 4);
536
533 bytes_to_read *= cqspi->fifo_width; 537 bytes_to_read *= cqspi->fifo_width;
534 bytes_to_read = bytes_to_read > remaining ? 538 bytes_to_read = bytes_to_read > remaining ?
535 remaining : bytes_to_read; 539 remaining : bytes_to_read;
536 ioread32_rep(ahb_base, rxbuf, 540 bytes_to_read = round_down(bytes_to_read, 4);
537 DIV_ROUND_UP(bytes_to_read, 4)); 541 /* Read 4 byte word chunks then single bytes */
542 if (bytes_to_read) {
543 ioread32_rep(ahb_base, rxbuf,
544 (bytes_to_read / 4));
545 } else if (!word_remain && mod_bytes) {
546 unsigned int temp = ioread32(ahb_base);
547
548 bytes_to_read = mod_bytes;
549 memcpy(rxbuf, &temp, min((unsigned int)
550 (rxbuf_end - rxbuf),
551 bytes_to_read));
552 }
538 rxbuf += bytes_to_read; 553 rxbuf += bytes_to_read;
539 remaining -= bytes_to_read; 554 remaining -= bytes_to_read;
540 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 555 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7b113018853..718e4914e3a0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1660 } /* switch(bond_mode) */ 1660 } /* switch(bond_mode) */
1661 1661
1662#ifdef CONFIG_NET_POLL_CONTROLLER 1662#ifdef CONFIG_NET_POLL_CONTROLLER
1663 slave_dev->npinfo = bond->dev->npinfo; 1663 if (bond->dev->npinfo) {
1664 if (slave_dev->npinfo) {
1665 if (slave_enable_netpoll(new_slave)) { 1664 if (slave_enable_netpoll(new_slave)) {
1666 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 1665 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1667 res = -EBUSY; 1666 res = -EBUSY;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index ac7694c71266..a036c490b7ce 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
285 struct sk_buff_head *rxq) 285 struct sk_buff_head *rxq)
286{ 286{
287 u16 buf[4] = { 0 }, status, seq_id; 287 u16 buf[4] = { 0 }, status, seq_id;
288 u64 ns, timelo, timehi;
289 struct skb_shared_hwtstamps *shwt; 288 struct skb_shared_hwtstamps *shwt;
289 struct sk_buff_head received;
290 u64 ns, timelo, timehi;
291 unsigned long flags;
290 int err; 292 int err;
291 293
294 /* The latched timestamp belongs to one of the received frames. */
295 __skb_queue_head_init(&received);
296 spin_lock_irqsave(&rxq->lock, flags);
297 skb_queue_splice_tail_init(rxq, &received);
298 spin_unlock_irqrestore(&rxq->lock, flags);
299
292 mutex_lock(&chip->reg_lock); 300 mutex_lock(&chip->reg_lock);
293 err = mv88e6xxx_port_ptp_read(chip, ps->port_id, 301 err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
294 reg, buf, ARRAY_SIZE(buf)); 302 reg, buf, ARRAY_SIZE(buf));
@@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
311 /* Since the device can only handle one time stamp at a time, 319 /* Since the device can only handle one time stamp at a time,
312 * we purge any extra frames from the queue. 320 * we purge any extra frames from the queue.
313 */ 321 */
314 for ( ; skb; skb = skb_dequeue(rxq)) { 322 for ( ; skb; skb = __skb_dequeue(&received)) {
315 if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { 323 if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
316 ns = timehi << 16 | timelo; 324 ns = timehi << 16 | timelo;
317 325
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ea72ef11a55..d272dc6984ac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1321,6 +1321,10 @@
1321#define MDIO_VEND2_AN_STAT 0x8002 1321#define MDIO_VEND2_AN_STAT 0x8002
1322#endif 1322#endif
1323 1323
1324#ifndef MDIO_VEND2_PMA_CDR_CONTROL
1325#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
1326#endif
1327
1324#ifndef MDIO_CTRL1_SPEED1G 1328#ifndef MDIO_CTRL1_SPEED1G
1325#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 1329#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
1326#endif 1330#endif
@@ -1369,6 +1373,10 @@
1369#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 1373#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
1370#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 1374#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
1371 1375
1376#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
1377#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
1378#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
1379
1372/* Bit setting and getting macros 1380/* Bit setting and getting macros
1373 * The get macro will extract the current bit field value from within 1381 * The get macro will extract the current bit field value from within
1374 * the variable 1382 * the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 7d128be61310..b91143947ed2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
519 "debugfs_create_file failed\n"); 519 "debugfs_create_file failed\n");
520 } 520 }
521 521
522 if (pdata->vdata->an_cdr_workaround) {
523 pfile = debugfs_create_bool("an_cdr_workaround", 0600,
524 pdata->xgbe_debugfs,
525 &pdata->debugfs_an_cdr_workaround);
526 if (!pfile)
527 netdev_err(pdata->netdev,
528 "debugfs_create_bool failed\n");
529
530 pfile = debugfs_create_bool("an_cdr_track_early", 0600,
531 pdata->xgbe_debugfs,
532 &pdata->debugfs_an_cdr_track_early);
533 if (!pfile)
534 netdev_err(pdata->netdev,
535 "debugfs_create_bool failed\n");
536 }
537
522 kfree(buf); 538 kfree(buf);
523} 539}
524 540
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 795e556d4a3f..441d0973957b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
350 350
351 /* Call MDIO/PHY initialization routine */ 351 /* Call MDIO/PHY initialization routine */
352 pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
352 ret = pdata->phy_if.phy_init(pdata); 353 ret = pdata->phy_if.phy_init(pdata);
353 if (ret) 354 if (ret)
354 return ret; 355 return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 072b9f664597..1b45cd73a258 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
432 xgbe_an73_set(pdata, false, false); 432 xgbe_an73_set(pdata, false, false);
433 xgbe_an73_disable_interrupts(pdata); 433 xgbe_an73_disable_interrupts(pdata);
434 434
435 pdata->an_start = 0;
436
435 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); 437 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
436} 438}
437 439
438static void xgbe_an_restart(struct xgbe_prv_data *pdata) 440static void xgbe_an_restart(struct xgbe_prv_data *pdata)
439{ 441{
442 if (pdata->phy_if.phy_impl.an_pre)
443 pdata->phy_if.phy_impl.an_pre(pdata);
444
440 switch (pdata->an_mode) { 445 switch (pdata->an_mode) {
441 case XGBE_AN_MODE_CL73: 446 case XGBE_AN_MODE_CL73:
442 case XGBE_AN_MODE_CL73_REDRV: 447 case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
453 458
454static void xgbe_an_disable(struct xgbe_prv_data *pdata) 459static void xgbe_an_disable(struct xgbe_prv_data *pdata)
455{ 460{
461 if (pdata->phy_if.phy_impl.an_post)
462 pdata->phy_if.phy_impl.an_post(pdata);
463
456 switch (pdata->an_mode) { 464 switch (pdata->an_mode) {
457 case XGBE_AN_MODE_CL73: 465 case XGBE_AN_MODE_CL73:
458 case XGBE_AN_MODE_CL73_REDRV: 466 case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
505 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, 513 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
506 reg); 514 reg);
507 515
508 if (pdata->phy_if.phy_impl.kr_training_post)
509 pdata->phy_if.phy_impl.kr_training_post(pdata);
510
511 netif_dbg(pdata, link, pdata->netdev, 516 netif_dbg(pdata, link, pdata->netdev,
512 "KR training initiated\n"); 517 "KR training initiated\n");
518
519 if (pdata->phy_if.phy_impl.kr_training_post)
520 pdata->phy_if.phy_impl.kr_training_post(pdata);
513 } 521 }
514 522
515 return XGBE_AN_PAGE_RECEIVED; 523 return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
637 return XGBE_AN_NO_LINK; 645 return XGBE_AN_NO_LINK;
638 } 646 }
639 647
640 xgbe_an73_disable(pdata); 648 xgbe_an_disable(pdata);
641 649
642 xgbe_switch_mode(pdata); 650 xgbe_switch_mode(pdata);
643 651
644 xgbe_an73_restart(pdata); 652 xgbe_an_restart(pdata);
645 653
646 return XGBE_AN_INCOMPAT_LINK; 654 return XGBE_AN_INCOMPAT_LINK;
647} 655}
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
820 pdata->an_result = pdata->an_state; 828 pdata->an_result = pdata->an_state;
821 pdata->an_state = XGBE_AN_READY; 829 pdata->an_state = XGBE_AN_READY;
822 830
831 if (pdata->phy_if.phy_impl.an_post)
832 pdata->phy_if.phy_impl.an_post(pdata);
833
823 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", 834 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
824 xgbe_state_as_string(pdata->an_result)); 835 xgbe_state_as_string(pdata->an_result));
825 } 836 }
@@ -903,6 +914,9 @@ again:
903 pdata->kx_state = XGBE_RX_BPA; 914 pdata->kx_state = XGBE_RX_BPA;
904 pdata->an_start = 0; 915 pdata->an_start = 0;
905 916
917 if (pdata->phy_if.phy_impl.an_post)
918 pdata->phy_if.phy_impl.an_post(pdata);
919
906 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", 920 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
907 xgbe_state_as_string(pdata->an_result)); 921 xgbe_state_as_string(pdata->an_result));
908 } 922 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index eb23f9ba1a9a..82d1f416ee2a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
456 .irq_reissue_support = 1, 456 .irq_reissue_support = 1,
457 .tx_desc_prefetch = 5, 457 .tx_desc_prefetch = 5,
458 .rx_desc_prefetch = 5, 458 .rx_desc_prefetch = 5,
459 .an_cdr_workaround = 1,
459}; 460};
460 461
461static const struct xgbe_version_data xgbe_v2b = { 462static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
470 .irq_reissue_support = 1, 471 .irq_reissue_support = 1,
471 .tx_desc_prefetch = 5, 472 .tx_desc_prefetch = 5,
472 .rx_desc_prefetch = 5, 473 .rx_desc_prefetch = 5,
474 .an_cdr_workaround = 1,
473}; 475};
474 476
475static const struct pci_device_id xgbe_pci_table[] = { 477static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3304a291aa96..aac884314000 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -147,6 +147,14 @@
147/* Rate-change complete wait/retry count */ 147/* Rate-change complete wait/retry count */
148#define XGBE_RATECHANGE_COUNT 500 148#define XGBE_RATECHANGE_COUNT 500
149 149
150/* CDR delay values for KR support (in usec) */
151#define XGBE_CDR_DELAY_INIT 10000
152#define XGBE_CDR_DELAY_INC 10000
153#define XGBE_CDR_DELAY_MAX 100000
154
155/* RRC frequency during link status check */
156#define XGBE_RRC_FREQUENCY 10
157
150enum xgbe_port_mode { 158enum xgbe_port_mode {
151 XGBE_PORT_MODE_RSVD = 0, 159 XGBE_PORT_MODE_RSVD = 0,
152 XGBE_PORT_MODE_BACKPLANE, 160 XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
245#define XGBE_SFP_BASE_VENDOR_SN 4 253#define XGBE_SFP_BASE_VENDOR_SN 4
246#define XGBE_SFP_BASE_VENDOR_SN_LEN 16 254#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
247 255
256#define XGBE_SFP_EXTD_OPT1 1
257#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
258#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
259
248#define XGBE_SFP_EXTD_DIAG 28 260#define XGBE_SFP_EXTD_DIAG 28
249#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) 261#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
250 262
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
324 336
325 unsigned int sfp_gpio_address; 337 unsigned int sfp_gpio_address;
326 unsigned int sfp_gpio_mask; 338 unsigned int sfp_gpio_mask;
339 unsigned int sfp_gpio_inputs;
327 unsigned int sfp_gpio_rx_los; 340 unsigned int sfp_gpio_rx_los;
328 unsigned int sfp_gpio_tx_fault; 341 unsigned int sfp_gpio_tx_fault;
329 unsigned int sfp_gpio_mod_absent; 342 unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
355 unsigned int redrv_addr; 368 unsigned int redrv_addr;
356 unsigned int redrv_lane; 369 unsigned int redrv_lane;
357 unsigned int redrv_model; 370 unsigned int redrv_model;
371
372 /* KR AN support */
373 unsigned int phy_cdr_notrack;
374 unsigned int phy_cdr_delay;
358}; 375};
359 376
360/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ 377/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
974 phy_data->sfp_phy_avail = 1; 991 phy_data->sfp_phy_avail = 1;
975} 992}
976 993
994static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
995{
996 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
997
998 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
999 return false;
1000
1001 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
1002 return false;
1003
1004 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
1005 return true;
1006
1007 return false;
1008}
1009
1010static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
1011{
1012 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
1013
1014 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
1015 return false;
1016
1017 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
1018 return false;
1019
1020 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
1021 return true;
1022
1023 return false;
1024}
1025
1026static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
1027{
1028 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
1029 return false;
1030
1031 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
1032 return true;
1033
1034 return false;
1035}
1036
977static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) 1037static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
978{ 1038{
979 struct xgbe_phy_data *phy_data = pdata->phy_data; 1039 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1019 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) 1079 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
1020 return; 1080 return;
1021 1081
1082 /* Update transceiver signals (eeprom extd/options) */
1083 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
1084 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
1085
1022 if (xgbe_phy_sfp_parse_quirks(pdata)) 1086 if (xgbe_phy_sfp_parse_quirks(pdata))
1023 return; 1087 return;
1024 1088
@@ -1184,7 +1248,6 @@ put:
1184static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) 1248static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1185{ 1249{
1186 struct xgbe_phy_data *phy_data = pdata->phy_data; 1250 struct xgbe_phy_data *phy_data = pdata->phy_data;
1187 unsigned int gpio_input;
1188 u8 gpio_reg, gpio_ports[2]; 1251 u8 gpio_reg, gpio_ports[2];
1189 int ret; 1252 int ret;
1190 1253
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1199 return; 1262 return;
1200 } 1263 }
1201 1264
1202 gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; 1265 phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
1203
1204 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
1205 /* No GPIO, just assume the module is present for now */
1206 phy_data->sfp_mod_absent = 0;
1207 } else {
1208 if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
1209 phy_data->sfp_mod_absent = 0;
1210 }
1211
1212 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
1213 (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
1214 phy_data->sfp_rx_los = 1;
1215 1266
1216 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && 1267 phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
1217 (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
1218 phy_data->sfp_tx_fault = 1;
1219} 1268}
1220 1269
1221static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) 1270static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
2361 return 1; 2410 return 1;
2362 2411
2363 /* No link, attempt a receiver reset cycle */ 2412 /* No link, attempt a receiver reset cycle */
2364 if (phy_data->rrc_count++) { 2413 if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
2365 phy_data->rrc_count = 0; 2414 phy_data->rrc_count = 0;
2366 xgbe_phy_rrc(pdata); 2415 xgbe_phy_rrc(pdata);
2367 } 2416 }
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
2669 return true; 2718 return true;
2670} 2719}
2671 2720
2721static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
2722{
2723 struct xgbe_phy_data *phy_data = pdata->phy_data;
2724
2725 if (!pdata->debugfs_an_cdr_workaround)
2726 return;
2727
2728 if (!phy_data->phy_cdr_notrack)
2729 return;
2730
2731 usleep_range(phy_data->phy_cdr_delay,
2732 phy_data->phy_cdr_delay + 500);
2733
2734 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2735 XGBE_PMA_CDR_TRACK_EN_MASK,
2736 XGBE_PMA_CDR_TRACK_EN_ON);
2737
2738 phy_data->phy_cdr_notrack = 0;
2739}
2740
2741static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
2742{
2743 struct xgbe_phy_data *phy_data = pdata->phy_data;
2744
2745 if (!pdata->debugfs_an_cdr_workaround)
2746 return;
2747
2748 if (phy_data->phy_cdr_notrack)
2749 return;
2750
2751 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2752 XGBE_PMA_CDR_TRACK_EN_MASK,
2753 XGBE_PMA_CDR_TRACK_EN_OFF);
2754
2755 xgbe_phy_rrc(pdata);
2756
2757 phy_data->phy_cdr_notrack = 1;
2758}
2759
2760static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
2761{
2762 if (!pdata->debugfs_an_cdr_track_early)
2763 xgbe_phy_cdr_track(pdata);
2764}
2765
2766static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
2767{
2768 if (pdata->debugfs_an_cdr_track_early)
2769 xgbe_phy_cdr_track(pdata);
2770}
2771
2772static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
2773{
2774 struct xgbe_phy_data *phy_data = pdata->phy_data;
2775
2776 switch (pdata->an_mode) {
2777 case XGBE_AN_MODE_CL73:
2778 case XGBE_AN_MODE_CL73_REDRV:
2779 if (phy_data->cur_mode != XGBE_MODE_KR)
2780 break;
2781
2782 xgbe_phy_cdr_track(pdata);
2783
2784 switch (pdata->an_result) {
2785 case XGBE_AN_READY:
2786 case XGBE_AN_COMPLETE:
2787 break;
2788 default:
2789 if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
2790 phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
2791 else
2792 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
2793 break;
2794 }
2795 break;
2796 default:
2797 break;
2798 }
2799}
2800
2801static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
2802{
2803 struct xgbe_phy_data *phy_data = pdata->phy_data;
2804
2805 switch (pdata->an_mode) {
2806 case XGBE_AN_MODE_CL73:
2807 case XGBE_AN_MODE_CL73_REDRV:
2808 if (phy_data->cur_mode != XGBE_MODE_KR)
2809 break;
2810
2811 xgbe_phy_cdr_notrack(pdata);
2812 break;
2813 default:
2814 break;
2815 }
2816}
2817
2672static void xgbe_phy_stop(struct xgbe_prv_data *pdata) 2818static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2673{ 2819{
2674 struct xgbe_phy_data *phy_data = pdata->phy_data; 2820 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2680 xgbe_phy_sfp_reset(phy_data); 2826 xgbe_phy_sfp_reset(phy_data);
2681 xgbe_phy_sfp_mod_absent(pdata); 2827 xgbe_phy_sfp_mod_absent(pdata);
2682 2828
2829 /* Reset CDR support */
2830 xgbe_phy_cdr_track(pdata);
2831
2683 /* Power off the PHY */ 2832 /* Power off the PHY */
2684 xgbe_phy_power_off(pdata); 2833 xgbe_phy_power_off(pdata);
2685 2834
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
2712 /* Start in highest supported mode */ 2861 /* Start in highest supported mode */
2713 xgbe_phy_set_mode(pdata, phy_data->start_mode); 2862 xgbe_phy_set_mode(pdata, phy_data->start_mode);
2714 2863
2864 /* Reset CDR support */
2865 xgbe_phy_cdr_track(pdata);
2866
2715 /* After starting the I2C controller, we can check for an SFP */ 2867 /* After starting the I2C controller, we can check for an SFP */
2716 switch (phy_data->port_mode) { 2868 switch (phy_data->port_mode) {
2717 case XGBE_PORT_MODE_SFP: 2869 case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
3019 } 3171 }
3020 } 3172 }
3021 3173
3174 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
3175
3022 /* Register for driving external PHYs */ 3176 /* Register for driving external PHYs */
3023 mii = devm_mdiobus_alloc(pdata->dev); 3177 mii = devm_mdiobus_alloc(pdata->dev);
3024 if (!mii) { 3178 if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
3071 phy_impl->an_advertising = xgbe_phy_an_advertising; 3225 phy_impl->an_advertising = xgbe_phy_an_advertising;
3072 3226
3073 phy_impl->an_outcome = xgbe_phy_an_outcome; 3227 phy_impl->an_outcome = xgbe_phy_an_outcome;
3228
3229 phy_impl->an_pre = xgbe_phy_an_pre;
3230 phy_impl->an_post = xgbe_phy_an_post;
3231
3232 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
3233 phy_impl->kr_training_post = xgbe_phy_kr_training_post;
3074} 3234}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad102c8bac7b..95d4b56448c6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
833/* This structure represents implementation specific routines for an 833/* This structure represents implementation specific routines for an
834 * implementation of a PHY. All routines are required unless noted below. 834 * implementation of a PHY. All routines are required unless noted below.
835 * Optional routines: 835 * Optional routines:
836 * an_pre, an_post
836 * kr_training_pre, kr_training_post 837 * kr_training_pre, kr_training_post
837 */ 838 */
838struct xgbe_phy_impl_if { 839struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
875 /* Process results of auto-negotiation */ 876 /* Process results of auto-negotiation */
876 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); 877 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
877 878
879 /* Pre/Post auto-negotiation support */
880 void (*an_pre)(struct xgbe_prv_data *);
881 void (*an_post)(struct xgbe_prv_data *);
882
878 /* Pre/Post KR training enablement support */ 883 /* Pre/Post KR training enablement support */
879 void (*kr_training_pre)(struct xgbe_prv_data *); 884 void (*kr_training_pre)(struct xgbe_prv_data *);
880 void (*kr_training_post)(struct xgbe_prv_data *); 885 void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
989 unsigned int irq_reissue_support; 994 unsigned int irq_reissue_support;
990 unsigned int tx_desc_prefetch; 995 unsigned int tx_desc_prefetch;
991 unsigned int rx_desc_prefetch; 996 unsigned int rx_desc_prefetch;
997 unsigned int an_cdr_workaround;
992}; 998};
993 999
994struct xgbe_vxlan_data { 1000struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
1257 unsigned int debugfs_xprop_reg; 1263 unsigned int debugfs_xprop_reg;
1258 1264
1259 unsigned int debugfs_xi2c_reg; 1265 unsigned int debugfs_xi2c_reg;
1266
1267 bool debugfs_an_cdr_workaround;
1268 bool debugfs_an_cdr_track_early;
1260}; 1269};
1261 1270
1262/* Function prototypes*/ 1271/* Function prototypes*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f622ca2a64f..8ba14ae00e8f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1927 return retval; 1927 return retval;
1928} 1928}
1929 1929
1930static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) 1930static void bnxt_get_pkgver(struct net_device *dev)
1931{ 1931{
1932 struct bnxt *bp = netdev_priv(dev);
1932 u16 index = 0; 1933 u16 index = 0;
1933 u32 datalen; 1934 char *pkgver;
1935 u32 pkglen;
1936 u8 *pkgbuf;
1937 int len;
1934 1938
1935 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 1939 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1936 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 1940 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1937 &index, NULL, &datalen) != 0) 1941 &index, NULL, &pkglen) != 0)
1938 return NULL; 1942 return;
1939 1943
1940 memset(buf, 0, buflen); 1944 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
1941 if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) 1945 if (!pkgbuf) {
1942 return NULL; 1946 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
1947 pkglen);
1948 return;
1949 }
1950
1951 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
1952 goto err;
1943 1953
1944 return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, 1954 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
1945 datalen); 1955 pkglen);
1956 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
1957 len = strlen(bp->fw_ver_str);
1958 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
1959 "/pkg %s", pkgver);
1960 }
1961err:
1962 kfree(pkgbuf);
1946} 1963}
1947 1964
1948static int bnxt_get_eeprom(struct net_device *dev, 1965static int bnxt_get_eeprom(struct net_device *dev,
@@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
2615 struct hwrm_selftest_qlist_input req = {0}; 2632 struct hwrm_selftest_qlist_input req = {0};
2616 struct bnxt_test_info *test_info; 2633 struct bnxt_test_info *test_info;
2617 struct net_device *dev = bp->dev; 2634 struct net_device *dev = bp->dev;
2618 char *pkglog;
2619 int i, rc; 2635 int i, rc;
2620 2636
2621 pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); 2637 bnxt_get_pkgver(dev);
2622 if (pkglog) {
2623 char *pkgver;
2624 int len;
2625 2638
2626 pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
2627 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
2628 len = strlen(bp->fw_ver_str);
2629 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2630 "/pkg %s", pkgver);
2631 }
2632 kfree(pkglog);
2633 }
2634 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) 2639 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
2635 return; 2640 return;
2636 2641
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 73f2249555b5..83444811d3c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
59#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) 59#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
60#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) 60#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
61 61
62#define BNX_PKG_LOG_MAX_LENGTH 4096
63
64enum bnxnvm_pkglog_field_index { 62enum bnxnvm_pkglog_field_index {
65 BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, 63 BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
66 BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, 64 BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 3e62692af011..fa5b30f547f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -87,7 +87,7 @@ do { \
87 87
88#define HNAE_AE_REGISTER 0x1 88#define HNAE_AE_REGISTER 0x1
89 89
90#define RCB_RING_NAME_LEN 16 90#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
91 91
92#define HNAE_LOWEST_LATENCY_COAL_PARAM 30 92#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
93#define HNAE_LOW_LATENCY_COAL_PARAM 80 93#define HNAE_LOW_LATENCY_COAL_PARAM 80
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index aad5658d79d5..6e8d6a6f6aaf 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev)
794{ 794{
795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 795 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
796 unsigned long timeout = msecs_to_jiffies(30000); 796 unsigned long timeout = msecs_to_jiffies(30000);
797 struct device *dev = &adapter->vdev->dev; 797 int retry_count = 0;
798 int rc; 798 int rc;
799 799
800 do { 800 do {
801 if (adapter->renegotiate) { 801 if (retry_count > IBMVNIC_MAX_QUEUES) {
802 adapter->renegotiate = false; 802 netdev_warn(netdev, "Login attempts exceeded\n");
803 return -1;
804 }
805
806 adapter->init_done_rc = 0;
807 reinit_completion(&adapter->init_done);
808 rc = send_login(adapter);
809 if (rc) {
810 netdev_warn(netdev, "Unable to login\n");
811 return rc;
812 }
813
814 if (!wait_for_completion_timeout(&adapter->init_done,
815 timeout)) {
816 netdev_warn(netdev, "Login timed out\n");
817 return -1;
818 }
819
820 if (adapter->init_done_rc == PARTIALSUCCESS) {
821 retry_count++;
803 release_sub_crqs(adapter, 1); 822 release_sub_crqs(adapter, 1);
804 823
824 adapter->init_done_rc = 0;
805 reinit_completion(&adapter->init_done); 825 reinit_completion(&adapter->init_done);
806 send_cap_queries(adapter); 826 send_cap_queries(adapter);
807 if (!wait_for_completion_timeout(&adapter->init_done, 827 if (!wait_for_completion_timeout(&adapter->init_done,
808 timeout)) { 828 timeout)) {
809 dev_err(dev, "Capabilities query timeout\n"); 829 netdev_warn(netdev,
830 "Capabilities query timed out\n");
810 return -1; 831 return -1;
811 } 832 }
833
812 rc = init_sub_crqs(adapter); 834 rc = init_sub_crqs(adapter);
813 if (rc) { 835 if (rc) {
814 dev_err(dev, 836 netdev_warn(netdev,
815 "Initialization of SCRQ's failed\n"); 837 "SCRQ initialization failed\n");
816 return -1; 838 return -1;
817 } 839 }
840
818 rc = init_sub_crq_irqs(adapter); 841 rc = init_sub_crq_irqs(adapter);
819 if (rc) { 842 if (rc) {
820 dev_err(dev, 843 netdev_warn(netdev,
821 "Initialization of SCRQ's irqs failed\n"); 844 "SCRQ irq initialization failed\n");
822 return -1; 845 return -1;
823 } 846 }
824 } 847 } else if (adapter->init_done_rc) {
825 848 netdev_warn(netdev, "Adapter login failed\n");
826 reinit_completion(&adapter->init_done);
827 rc = send_login(adapter);
828 if (rc) {
829 dev_err(dev, "Unable to attempt device login\n");
830 return rc;
831 } else if (!wait_for_completion_timeout(&adapter->init_done,
832 timeout)) {
833 dev_err(dev, "Login timeout\n");
834 return -1; 849 return -1;
835 } 850 }
836 } while (adapter->renegotiate); 851 } while (adapter->init_done_rc == PARTIALSUCCESS);
837 852
838 /* handle pending MAC address changes after successful login */ 853 /* handle pending MAC address changes after successful login */
839 if (adapter->mac_change_pending) { 854 if (adapter->mac_change_pending) {
@@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev)
1034 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1049 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1035 if (prev_state == VNIC_CLOSED) 1050 if (prev_state == VNIC_CLOSED)
1036 enable_irq(adapter->rx_scrq[i]->irq); 1051 enable_irq(adapter->rx_scrq[i]->irq);
1037 else 1052 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1038 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1039 } 1053 }
1040 1054
1041 for (i = 0; i < adapter->req_tx_queues; i++) { 1055 for (i = 0; i < adapter->req_tx_queues; i++) {
1042 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1056 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1043 if (prev_state == VNIC_CLOSED) 1057 if (prev_state == VNIC_CLOSED)
1044 enable_irq(adapter->tx_scrq[i]->irq); 1058 enable_irq(adapter->tx_scrq[i]->irq);
1045 else 1059 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1046 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1047 } 1060 }
1048 1061
1049 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1062 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1115,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1115 if (!adapter->rx_pool) 1128 if (!adapter->rx_pool)
1116 return; 1129 return;
1117 1130
1118 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 1131 rx_scrqs = adapter->num_active_rx_pools;
1119 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1132 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1120 1133
1121 /* Free any remaining skbs in the rx buffer pools */ 1134 /* Free any remaining skbs in the rx buffer pools */
@@ -1164,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1164 if (!adapter->tx_pool || !adapter->tso_pool) 1177 if (!adapter->tx_pool || !adapter->tso_pool)
1165 return; 1178 return;
1166 1179
1167 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 1180 tx_scrqs = adapter->num_active_tx_pools;
1168 1181
1169 /* Free any remaining skbs in the tx buffer pools */ 1182 /* Free any remaining skbs in the tx buffer pools */
1170 for (i = 0; i < tx_scrqs; i++) { 1183 for (i = 0; i < tx_scrqs; i++) {
@@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1184 if (adapter->tx_scrq[i]->irq) { 1197 if (adapter->tx_scrq[i]->irq) {
1185 netdev_dbg(netdev, 1198 netdev_dbg(netdev,
1186 "Disabling tx_scrq[%d] irq\n", i); 1199 "Disabling tx_scrq[%d] irq\n", i);
1200 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1187 disable_irq(adapter->tx_scrq[i]->irq); 1201 disable_irq(adapter->tx_scrq[i]->irq);
1188 } 1202 }
1189 } 1203 }
@@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1193 if (adapter->rx_scrq[i]->irq) { 1207 if (adapter->rx_scrq[i]->irq) {
1194 netdev_dbg(netdev, 1208 netdev_dbg(netdev,
1195 "Disabling rx_scrq[%d] irq\n", i); 1209 "Disabling rx_scrq[%d] irq\n", i);
1210 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1196 disable_irq(adapter->rx_scrq[i]->irq); 1211 disable_irq(adapter->rx_scrq[i]->irq);
1197 } 1212 }
1198 } 1213 }
@@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1828 for (i = 0; i < adapter->req_rx_queues; i++) 1843 for (i = 0; i < adapter->req_rx_queues; i++)
1829 napi_schedule(&adapter->napi[i]); 1844 napi_schedule(&adapter->napi[i]);
1830 1845
1831 if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1846 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1847 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1832 netdev_notify_peers(netdev); 1848 netdev_notify_peers(netdev);
1833 1849
1834 netif_carrier_on(netdev); 1850 netif_carrier_on(netdev);
@@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2601{ 2617{
2602 struct device *dev = &adapter->vdev->dev; 2618 struct device *dev = &adapter->vdev->dev;
2603 unsigned long rc; 2619 unsigned long rc;
2620 u64 val;
2604 2621
2605 if (scrq->hw_irq > 0x100000000ULL) { 2622 if (scrq->hw_irq > 0x100000000ULL) {
2606 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2607 return 1; 2624 return 1;
2608 } 2625 }
2609 2626
2627 val = (0xff000000) | scrq->hw_irq;
2628 rc = plpar_hcall_norets(H_EOI, val);
2629 if (rc)
2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2631 val, rc);
2632
2610 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2611 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2612 if (rc) 2635 if (rc)
@@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
3170struct vnic_login_client_data { 3193struct vnic_login_client_data {
3171 u8 type; 3194 u8 type;
3172 __be16 len; 3195 __be16 len;
3173 char name; 3196 char name[];
3174} __packed; 3197} __packed;
3175 3198
3176static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3199static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
@@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3199 vlcd->type = 1; 3222 vlcd->type = 1;
3200 len = strlen(os_name) + 1; 3223 len = strlen(os_name) + 1;
3201 vlcd->len = cpu_to_be16(len); 3224 vlcd->len = cpu_to_be16(len);
3202 strncpy(&vlcd->name, os_name, len); 3225 strncpy(vlcd->name, os_name, len);
3203 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3226 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3204 3227
3205 /* Type 2 - LPAR name */ 3228 /* Type 2 - LPAR name */
3206 vlcd->type = 2; 3229 vlcd->type = 2;
3207 len = strlen(utsname()->nodename) + 1; 3230 len = strlen(utsname()->nodename) + 1;
3208 vlcd->len = cpu_to_be16(len); 3231 vlcd->len = cpu_to_be16(len);
3209 strncpy(&vlcd->name, utsname()->nodename, len); 3232 strncpy(vlcd->name, utsname()->nodename, len);
3210 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3233 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3211 3234
3212 /* Type 3 - device name */ 3235 /* Type 3 - device name */
3213 vlcd->type = 3; 3236 vlcd->type = 3;
3214 len = strlen(adapter->netdev->name) + 1; 3237 len = strlen(adapter->netdev->name) + 1;
3215 vlcd->len = cpu_to_be16(len); 3238 vlcd->len = cpu_to_be16(len);
3216 strncpy(&vlcd->name, adapter->netdev->name, len); 3239 strncpy(vlcd->name, adapter->netdev->name, len);
3217} 3240}
3218 3241
3219static int send_login(struct ibmvnic_adapter *adapter) 3242static int send_login(struct ibmvnic_adapter *adapter)
@@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3942 * to resend the login buffer with fewer queues requested. 3965 * to resend the login buffer with fewer queues requested.
3943 */ 3966 */
3944 if (login_rsp_crq->generic.rc.code) { 3967 if (login_rsp_crq->generic.rc.code) {
3945 adapter->renegotiate = true; 3968 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
3946 complete(&adapter->init_done); 3969 complete(&adapter->init_done);
3947 return 0; 3970 return 0;
3948 } 3971 }
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c0b58c2c39..22391e8805f6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1035,7 +1035,6 @@ struct ibmvnic_adapter {
1035 1035
1036 struct ibmvnic_sub_crq_queue **tx_scrq; 1036 struct ibmvnic_sub_crq_queue **tx_scrq;
1037 struct ibmvnic_sub_crq_queue **rx_scrq; 1037 struct ibmvnic_sub_crq_queue **rx_scrq;
1038 bool renegotiate;
1039 1038
1040 /* rx structs */ 1039 /* rx structs */
1041 struct napi_struct *napi; 1040 struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5b13ca1bd85f..7dc5f045e969 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) 587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
588 588
589 /* Action type = 5 - Large Action */ 589 /* Action type = 5 - Generic Value */
590#define ICE_LG_ACT_GENERIC 0x5 590#define ICE_LG_ACT_GENERIC 0x5
591#define ICE_LG_ACT_GENERIC_VALUE_S 3 591#define ICE_LG_ACT_GENERIC_VALUE_S 3
592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) 592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 21977ec984c4..71d032cc5fa7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
78 struct ice_aq_desc desc; 78 struct ice_aq_desc desc;
79 enum ice_status status; 79 enum ice_status status;
80 u16 flags; 80 u16 flags;
81 u8 i;
81 82
82 cmd = &desc.params.mac_read; 83 cmd = &desc.params.mac_read;
83 84
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
98 return ICE_ERR_CFG; 99 return ICE_ERR_CFG;
99 } 100 }
100 101
101 ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); 102 /* A single port can report up to two (LAN and WoL) addresses */
102 ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); 103 for (i = 0; i < cmd->num_addr; i++)
104 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
105 ether_addr_copy(hw->port_info->mac.lan_addr,
106 resp[i].mac_addr);
107 ether_addr_copy(hw->port_info->mac.perm_addr,
108 resp[i].mac_addr);
109 break;
110 }
111
103 return 0; 112 return 0;
104} 113}
105 114
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
464 if (status) 473 if (status)
465 goto err_unroll_sched; 474 goto err_unroll_sched;
466 475
467 /* Get port MAC information */ 476 /* Get MAC information */
468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 477 /* A single port can report up to two (LAN and WoL) addresses */
469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 478 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
479 sizeof(struct ice_aqc_manage_mac_read_resp),
480 GFP_KERNEL);
481 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
470 482
471 if (!mac_buf) { 483 if (!mac_buf) {
472 status = ICE_ERR_NO_MEMORY; 484 status = ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1b9e2ef48a9d..499904874b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,8 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_INTEVENT_S 0
125#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
126#define PFINT_OICR_HLP_RDY_S 14 124#define PFINT_OICR_HLP_RDY_S 14
127#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) 125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
128#define PFINT_OICR_CPM_RDY_S 15 126#define PFINT_OICR_CPM_RDY_S 15
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 210b7910f1cd..5299caf55a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1722 oicr = rd32(hw, PFINT_OICR); 1722 oicr = rd32(hw, PFINT_OICR);
1723 ena_mask = rd32(hw, PFINT_OICR_ENA); 1723 ena_mask = rd32(hw, PFINT_OICR_ENA);
1724 1724
1725 if (!(oicr & PFINT_OICR_INTEVENT_M))
1726 goto ena_intr;
1727
1728 if (oicr & PFINT_OICR_GRST_M) { 1725 if (oicr & PFINT_OICR_GRST_M) {
1729 u32 reset; 1726 u32 reset;
1730 /* we have a reset warning */ 1727 /* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1782 } 1779 }
1783 ret = IRQ_HANDLED; 1780 ret = IRQ_HANDLED;
1784 1781
1785ena_intr:
1786 /* re-enable interrupt causes that are not handled during this pass */ 1782 /* re-enable interrupt causes that are not handled during this pass */
1787 wr32(hw, PFINT_OICR_ENA, ena_mask); 1783 wr32(hw, PFINT_OICR_ENA, ena_mask);
1788 if (!test_bit(__ICE_DOWN, pf->state)) { 1784 if (!test_bit(__ICE_DOWN, pf->state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f16ff3e4a840..2e6c1d92cc88 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
751 u16 num_added = 0; 751 u16 num_added = 0;
752 u32 temp; 752 u32 temp;
753 753
754 *num_nodes_added = 0;
755
754 if (!num_nodes) 756 if (!num_nodes)
755 return status; 757 return status;
756 758
757 if (!parent || layer < hw->sw_entry_point_layer) 759 if (!parent || layer < hw->sw_entry_point_layer)
758 return ICE_ERR_PARAM; 760 return ICE_ERR_PARAM;
759 761
760 *num_nodes_added = 0;
761
762 /* max children per node per layer */ 762 /* max children per node per layer */
763 max_child_nodes = 763 max_child_nodes =
764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); 764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1c0bc30a16d..cce7ada89255 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1700 WARN_ON(hw->mac.type != e1000_i210); 1700 WARN_ON(hw->mac.type != e1000_i210);
1701 WARN_ON(queue < 0 || queue > 1); 1701 WARN_ON(queue < 0 || queue > 1);
1702 1702
1703 if (enable) { 1703 if (enable || queue == 0) {
1704 /* i210 does not allow the queue 0 to be in the Strict
1705 * Priority mode while the Qav mode is enabled, so,
1706 * instead of disabling strict priority mode, we give
1707 * queue 0 the maximum of credits possible.
1708 *
1709 * See section 8.12.19 of the i210 datasheet, "Note:
1710 * Queue0 QueueMode must be set to 1b when
1711 * TransmitMode is set to Qav."
1712 */
1713 if (queue == 0 && !enable) {
1714 /* max "linkspeed" idleslope in kbps */
1715 idleslope = 1000000;
1716 hicredit = ETH_FRAME_LEN;
1717 }
1718
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); 1719 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); 1720 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1706 1721
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3d9033f26eff..e3d04f226d57 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3420 if (!err) 3420 if (!err)
3421 continue; 3421 continue;
3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); 3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3423 break; 3423 goto err_setup_tx;
3424 } 3424 }
3425 3425
3426 return 0; 3426 return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 54a038943c06..4202f9b5b966 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -663,7 +663,7 @@ enum mvpp2_tag_type {
663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ 664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) 665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) 666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -916,6 +916,8 @@ static struct {
916 916
917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) 917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
918 918
919#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
920
919/* Definitions */ 921/* Definitions */
920 922
921/* Shared Packet Processor resources */ 923/* Shared Packet Processor resources */
@@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1429 if (port->priv->hw_version == MVPP21) 1431 if (port->priv->hw_version == MVPP21)
1430 return tx_desc->pp21.buf_dma_addr; 1432 return tx_desc->pp21.buf_dma_addr;
1431 else 1433 else
1432 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); 1434 return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
1433} 1435}
1434 1436
1435static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1437static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1447 } else { 1449 } else {
1448 u64 val = (u64)addr; 1450 u64 val = (u64)addr;
1449 1451
1450 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1452 tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
1451 tx_desc->pp22.buf_dma_addr_ptp |= val; 1453 tx_desc->pp22.buf_dma_addr_ptp |= val;
1452 tx_desc->pp22.packet_offset = offset; 1454 tx_desc->pp22.packet_offset = offset;
1453 } 1455 }
@@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1507 if (port->priv->hw_version == MVPP21) 1509 if (port->priv->hw_version == MVPP21)
1508 return rx_desc->pp21.buf_dma_addr; 1510 return rx_desc->pp21.buf_dma_addr;
1509 else 1511 else
1510 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1512 return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
1511} 1513}
1512 1514
1513static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1515static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
@@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1516 if (port->priv->hw_version == MVPP21) 1518 if (port->priv->hw_version == MVPP21)
1517 return rx_desc->pp21.buf_cookie; 1519 return rx_desc->pp21.buf_cookie;
1518 else 1520 else
1519 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1521 return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
1520} 1522}
1521 1523
1522static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1524static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
@@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev)
8789 } 8791 }
8790 8792
8791 if (priv->hw_version == MVPP22) { 8793 if (priv->hw_version == MVPP22) {
8792 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); 8794 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
8793 if (err) 8795 if (err)
8794 goto err_mg_clk; 8796 goto err_mg_clk;
8795 /* Sadly, the BM pools all share the same register to 8797 /* Sadly, the BM pools all share the same register to
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 3735c09d2112..577659f332e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
258 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: 258 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
259 nfp_tunnel_keep_alive(app, skb); 259 nfp_tunnel_keep_alive(app, skb);
260 break; 260 break;
261 case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
262 /* Acks from the NFP that the route is added - ignore. */
263 break;
264 default: 261 default:
265 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", 262 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
266 type); 263 type);
@@ -275,18 +272,49 @@ out:
275 272
276void nfp_flower_cmsg_process_rx(struct work_struct *work) 273void nfp_flower_cmsg_process_rx(struct work_struct *work)
277{ 274{
275 struct sk_buff_head cmsg_joined;
278 struct nfp_flower_priv *priv; 276 struct nfp_flower_priv *priv;
279 struct sk_buff *skb; 277 struct sk_buff *skb;
280 278
281 priv = container_of(work, struct nfp_flower_priv, cmsg_work); 279 priv = container_of(work, struct nfp_flower_priv, cmsg_work);
280 skb_queue_head_init(&cmsg_joined);
281
282 spin_lock_bh(&priv->cmsg_skbs_high.lock);
283 skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
284 spin_unlock_bh(&priv->cmsg_skbs_high.lock);
282 285
283 while ((skb = skb_dequeue(&priv->cmsg_skbs))) 286 spin_lock_bh(&priv->cmsg_skbs_low.lock);
287 skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
288 spin_unlock_bh(&priv->cmsg_skbs_low.lock);
289
290 while ((skb = __skb_dequeue(&cmsg_joined)))
284 nfp_flower_cmsg_process_one_rx(priv->app, skb); 291 nfp_flower_cmsg_process_one_rx(priv->app, skb);
285} 292}
286 293
287void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) 294static void
295nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
288{ 296{
289 struct nfp_flower_priv *priv = app->priv; 297 struct nfp_flower_priv *priv = app->priv;
298 struct sk_buff_head *skb_head;
299
300 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
301 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
302 skb_head = &priv->cmsg_skbs_high;
303 else
304 skb_head = &priv->cmsg_skbs_low;
305
306 if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
307 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
308 dev_kfree_skb_any(skb);
309 return;
310 }
311
312 skb_queue_tail(skb_head, skb);
313 schedule_work(&priv->cmsg_work);
314}
315
316void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
317{
290 struct nfp_flower_cmsg_hdr *cmsg_hdr; 318 struct nfp_flower_cmsg_hdr *cmsg_hdr;
291 319
292 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); 320 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
306 nfp_flower_process_mtu_ack(app, skb)) { 334 nfp_flower_process_mtu_ack(app, skb)) {
307 /* Handle MTU acks outside wq to prevent RTNL conflict. */ 335 /* Handle MTU acks outside wq to prevent RTNL conflict. */
308 dev_consume_skb_any(skb); 336 dev_consume_skb_any(skb);
337 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
338 /* Acks from the NFP that the route is added - ignore. */
339 dev_consume_skb_any(skb);
309 } else { 340 } else {
310 skb_queue_tail(&priv->cmsg_skbs, skb); 341 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
311 schedule_work(&priv->cmsg_work);
312 } 342 }
313} 343}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 96bc0e33980c..b6c0fd053a50 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -108,6 +108,8 @@
108#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) 108#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
109#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) 109#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
110 110
111#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
112
111#define nfp_flower_cmsg_warn(app, fmt, args...) \ 113#define nfp_flower_cmsg_warn(app, fmt, args...) \
112 do { \ 114 do { \
113 if (net_ratelimit()) \ 115 if (net_ratelimit()) \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 6357e0720f43..ad02592a82b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app)
519 519
520 app->priv = app_priv; 520 app->priv = app_priv;
521 app_priv->app = app; 521 app_priv->app = app;
522 skb_queue_head_init(&app_priv->cmsg_skbs); 522 skb_queue_head_init(&app_priv->cmsg_skbs_high);
523 skb_queue_head_init(&app_priv->cmsg_skbs_low);
523 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 524 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
524 init_waitqueue_head(&app_priv->reify_wait_queue); 525 init_waitqueue_head(&app_priv->reify_wait_queue);
525 526
@@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app)
549{ 550{
550 struct nfp_flower_priv *app_priv = app->priv; 551 struct nfp_flower_priv *app_priv = app->priv;
551 552
552 skb_queue_purge(&app_priv->cmsg_skbs); 553 skb_queue_purge(&app_priv->cmsg_skbs_high);
554 skb_queue_purge(&app_priv->cmsg_skbs_low);
553 flush_work(&app_priv->cmsg_work); 555 flush_work(&app_priv->cmsg_work);
554 556
555 nfp_flower_metadata_cleanup(app); 557 nfp_flower_metadata_cleanup(app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e030b3ce4510..c67e1b54c614 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -107,7 +107,10 @@ struct nfp_mtu_conf {
107 * @mask_table: Hash table used to store masks 107 * @mask_table: Hash table used to store masks
108 * @flow_table: Hash table used to store flower rules 108 * @flow_table: Hash table used to store flower rules
109 * @cmsg_work: Workqueue for control messages processing 109 * @cmsg_work: Workqueue for control messages processing
110 * @cmsg_skbs: List of skbs for control message processing 110 * @cmsg_skbs_high: List of higher priority skbs for control message
111 * processing
112 * @cmsg_skbs_low: List of lower priority skbs for control message
113 * processing
111 * @nfp_mac_off_list: List of MAC addresses to offload 114 * @nfp_mac_off_list: List of MAC addresses to offload
112 * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs 115 * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
113 * @nfp_ipv4_off_list: List of IPv4 addresses to offload 116 * @nfp_ipv4_off_list: List of IPv4 addresses to offload
@@ -136,7 +139,8 @@ struct nfp_flower_priv {
136 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); 139 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
137 DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); 140 DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
138 struct work_struct cmsg_work; 141 struct work_struct cmsg_work;
139 struct sk_buff_head cmsg_skbs; 142 struct sk_buff_head cmsg_skbs_high;
143 struct sk_buff_head cmsg_skbs_low;
140 struct list_head nfp_mac_off_list; 144 struct list_head nfp_mac_off_list;
141 struct list_head nfp_mac_index_list; 145 struct list_head nfp_mac_index_list;
142 struct list_head nfp_ipv4_off_list; 146 struct list_head nfp_ipv4_off_list;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index f7b958181126..cb28ac03e4ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
211 break; 211 break;
212 212
213 err = msleep_interruptible(timeout_ms); 213 err = msleep_interruptible(timeout_ms);
214 if (err != 0) 214 if (err != 0) {
215 nfp_info(mutex->cpp,
216 "interrupted waiting for NFP mutex\n");
215 return -ERESTARTSYS; 217 return -ERESTARTSYS;
218 }
216 219
217 if (time_is_before_eq_jiffies(warn_at)) { 220 if (time_is_before_eq_jiffies(warn_at)) {
218 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; 221 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 99bb679a9801..2abee0fe3a7c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
281 if ((*reg & mask) == val) 281 if ((*reg & mask) == val)
282 return 0; 282 return 0;
283 283
284 if (msleep_interruptible(25)) 284 msleep(25);
285 return -ERESTARTSYS;
286 285
287 if (time_after(start_time, wait_until)) 286 if (time_after(start_time, wait_until))
288 return -ETIMEDOUT; 287 return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index d33988570217..5f4e447c5dce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
350 350
351 real_dev = priv->real_dev; 351 real_dev = priv->real_dev;
352 352
353 if (!rmnet_is_real_dev_registered(real_dev))
354 return -ENODEV;
355
356 if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) 353 if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
357 goto nla_put_failure; 354 goto nla_put_failure;
358 355
359 port = rmnet_get_port_rtnl(real_dev); 356 if (rmnet_is_real_dev_registered(real_dev)) {
357 port = rmnet_get_port_rtnl(real_dev);
358 f.flags = port->data_format;
359 } else {
360 f.flags = 0;
361 }
360 362
361 f.flags = port->data_format;
362 f.mask = ~0; 363 f.mask = ~0;
363 364
364 if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) 365 if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50daad0a1482..63036d9bf3e6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
3999 atomic_set(&efx->active_queues, 0); 3999 atomic_set(&efx->active_queues, 0);
4000} 4000}
4001 4001
4002static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
4003 const struct efx_filter_spec *right)
4004{
4005 if ((left->match_flags ^ right->match_flags) |
4006 ((left->flags ^ right->flags) &
4007 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
4008 return false;
4009
4010 return memcmp(&left->outer_vid, &right->outer_vid,
4011 sizeof(struct efx_filter_spec) -
4012 offsetof(struct efx_filter_spec, outer_vid)) == 0;
4013}
4014
4015static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
4016{
4017 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
4018 return jhash2((const u32 *)&spec->outer_vid,
4019 (sizeof(struct efx_filter_spec) -
4020 offsetof(struct efx_filter_spec, outer_vid)) / 4,
4021 0);
4022 /* XXX should we randomise the initval? */
4023}
4024
4025/* Decide whether a filter should be exclusive or else should allow 4002/* Decide whether a filter should be exclusive or else should allow
4026 * delivery to additional recipients. Currently we decide that 4003 * delivery to additional recipients. Currently we decide that
4027 * filters for specific local unicast MAC and IP addresses are 4004 * filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4346 goto out_unlock; 4323 goto out_unlock;
4347 match_pri = rc; 4324 match_pri = rc;
4348 4325
4349 hash = efx_ef10_filter_hash(spec); 4326 hash = efx_filter_spec_hash(spec);
4350 is_mc_recip = efx_filter_is_mc_recipient(spec); 4327 is_mc_recip = efx_filter_is_mc_recipient(spec);
4351 if (is_mc_recip) 4328 if (is_mc_recip)
4352 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4329 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4378 if (!saved_spec) { 4355 if (!saved_spec) {
4379 if (ins_index < 0) 4356 if (ins_index < 0)
4380 ins_index = i; 4357 ins_index = i;
4381 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4358 } else if (efx_filter_spec_equal(spec, saved_spec)) {
4382 if (spec->priority < saved_spec->priority && 4359 if (spec->priority < saved_spec->priority &&
4383 spec->priority != EFX_FILTER_PRI_AUTO) { 4360 spec->priority != EFX_FILTER_PRI_AUTO) {
4384 rc = -EPERM; 4361 rc = -EPERM;
@@ -4762,28 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4762static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4739static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4763 unsigned int filter_idx) 4740 unsigned int filter_idx)
4764{ 4741{
4742 struct efx_filter_spec *spec, saved_spec;
4765 struct efx_ef10_filter_table *table; 4743 struct efx_ef10_filter_table *table;
4766 struct efx_filter_spec *spec; 4744 struct efx_arfs_rule *rule = NULL;
4767 bool ret; 4745 bool ret = true, force = false;
4746 u16 arfs_id;
4768 4747
4769 down_read(&efx->filter_sem); 4748 down_read(&efx->filter_sem);
4770 table = efx->filter_state; 4749 table = efx->filter_state;
4771 down_write(&table->lock); 4750 down_write(&table->lock);
4772 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4751 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4773 4752
4774 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { 4753 if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
4775 ret = true;
4776 goto out_unlock; 4754 goto out_unlock;
4777 }
4778 4755
4779 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 4756 spin_lock_bh(&efx->rps_hash_lock);
4780 flow_id, filter_idx)) { 4757 if (!efx->rps_hash_table) {
4781 ret = false; 4758 /* In the absence of the table, we always return 0 to ARFS. */
4782 goto out_unlock; 4759 arfs_id = 0;
4760 } else {
4761 rule = efx_rps_hash_find(efx, spec);
4762 if (!rule)
4763 /* ARFS table doesn't know of this filter, so remove it */
4764 goto expire;
4765 arfs_id = rule->arfs_id;
4766 ret = efx_rps_check_rule(rule, filter_idx, &force);
4767 if (force)
4768 goto expire;
4769 if (!ret) {
4770 spin_unlock_bh(&efx->rps_hash_lock);
4771 goto out_unlock;
4772 }
4783 } 4773 }
4784 4774 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
4775 ret = false;
4776 else if (rule)
4777 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
4778expire:
4779 saved_spec = *spec; /* remove operation will kfree spec */
4780 spin_unlock_bh(&efx->rps_hash_lock);
4781 /* At this point (since we dropped the lock), another thread might queue
4782 * up a fresh insertion request (but the actual insertion will be held
4783 * up by our possession of the filter table lock). In that case, it
4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4785 * the rule is not removed by efx_rps_hash_del() below.
4786 */
4785 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4787 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4786 filter_idx, true) == 0; 4788 filter_idx, true) == 0;
4789 /* While we can't safely dereference rule (we dropped the lock), we can
4790 * still test it for NULL.
4791 */
4792 if (ret && rule) {
4793 /* Expiring, so remove entry from ARFS table */
4794 spin_lock_bh(&efx->rps_hash_lock);
4795 efx_rps_hash_del(efx, &saved_spec);
4796 spin_unlock_bh(&efx->rps_hash_lock);
4797 }
4787out_unlock: 4798out_unlock:
4788 up_write(&table->lock); 4799 up_write(&table->lock);
4789 up_read(&efx->filter_sem); 4800 up_read(&efx->filter_sem);
@@ -5265,7 +5276,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5265 ids = vlan->uc; 5276 ids = vlan->uc;
5266 } 5277 }
5267 5278
5268 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5279 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5269 5280
5270 /* Insert/renew filters */ 5281 /* Insert/renew filters */
5271 for (i = 0; i < addr_count; i++) { 5282 for (i = 0; i < addr_count; i++) {
@@ -5334,7 +5345,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5334 int rc; 5345 int rc;
5335 u16 *id; 5346 u16 *id;
5336 5347
5337 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5348 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5338 5349
5339 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5350 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5340 5351
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 692dd729ee2a..a4ebd8715494 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx,
3027 mutex_init(&efx->mac_lock); 3027 mutex_init(&efx->mac_lock);
3028#ifdef CONFIG_RFS_ACCEL 3028#ifdef CONFIG_RFS_ACCEL
3029 mutex_init(&efx->rps_mutex); 3029 mutex_init(&efx->rps_mutex);
3030 spin_lock_init(&efx->rps_hash_lock);
3031 /* Failure to allocate is not fatal, but may degrade ARFS performance */
3032 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3033 sizeof(*efx->rps_hash_table), GFP_KERNEL);
3030#endif 3034#endif
3031 efx->phy_op = &efx_dummy_phy_operations; 3035 efx->phy_op = &efx_dummy_phy_operations;
3032 efx->mdio.dev = net_dev; 3036 efx->mdio.dev = net_dev;
@@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx)
3070{ 3074{
3071 int i; 3075 int i;
3072 3076
3077#ifdef CONFIG_RFS_ACCEL
3078 kfree(efx->rps_hash_table);
3079#endif
3080
3073 for (i = 0; i < EFX_MAX_CHANNELS; i++) 3081 for (i = 0; i < EFX_MAX_CHANNELS; i++)
3074 kfree(efx->channel[i]); 3082 kfree(efx->channel[i]);
3075 3083
@@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3092 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 3100 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3093} 3101}
3094 3102
3103bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3104 const struct efx_filter_spec *right)
3105{
3106 if ((left->match_flags ^ right->match_flags) |
3107 ((left->flags ^ right->flags) &
3108 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3109 return false;
3110
3111 return memcmp(&left->outer_vid, &right->outer_vid,
3112 sizeof(struct efx_filter_spec) -
3113 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3114}
3115
3116u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3117{
3118 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3119 return jhash2((const u32 *)&spec->outer_vid,
3120 (sizeof(struct efx_filter_spec) -
3121 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3122 0);
3123}
3124
3125#ifdef CONFIG_RFS_ACCEL
3126bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3127 bool *force)
3128{
3129 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3130 /* ARFS is currently updating this entry, leave it */
3131 return false;
3132 }
3133 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3134 /* ARFS tried and failed to update this, so it's probably out
3135 * of date. Remove the filter and the ARFS rule entry.
3136 */
3137 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3138 *force = true;
3139 return true;
3140 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3141 /* ARFS has moved on, so old filter is not needed. Since we did
3142 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3143 * not be removed by efx_rps_hash_del() subsequently.
3144 */
3145 *force = true;
3146 return true;
3147 }
3148 /* Remove it iff ARFS wants to. */
3149 return true;
3150}
3151
3152struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3153 const struct efx_filter_spec *spec)
3154{
3155 u32 hash = efx_filter_spec_hash(spec);
3156
3157 WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3158 if (!efx->rps_hash_table)
3159 return NULL;
3160 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3161}
3162
3163struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3164 const struct efx_filter_spec *spec)
3165{
3166 struct efx_arfs_rule *rule;
3167 struct hlist_head *head;
3168 struct hlist_node *node;
3169
3170 head = efx_rps_hash_bucket(efx, spec);
3171 if (!head)
3172 return NULL;
3173 hlist_for_each(node, head) {
3174 rule = container_of(node, struct efx_arfs_rule, node);
3175 if (efx_filter_spec_equal(spec, &rule->spec))
3176 return rule;
3177 }
3178 return NULL;
3179}
3180
3181struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3182 const struct efx_filter_spec *spec,
3183 bool *new)
3184{
3185 struct efx_arfs_rule *rule;
3186 struct hlist_head *head;
3187 struct hlist_node *node;
3188
3189 head = efx_rps_hash_bucket(efx, spec);
3190 if (!head)
3191 return NULL;
3192 hlist_for_each(node, head) {
3193 rule = container_of(node, struct efx_arfs_rule, node);
3194 if (efx_filter_spec_equal(spec, &rule->spec)) {
3195 *new = false;
3196 return rule;
3197 }
3198 }
3199 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3200 *new = true;
3201 if (rule) {
3202 memcpy(&rule->spec, spec, sizeof(rule->spec));
3203 hlist_add_head(&rule->node, head);
3204 }
3205 return rule;
3206}
3207
3208void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3209{
3210 struct efx_arfs_rule *rule;
3211 struct hlist_head *head;
3212 struct hlist_node *node;
3213
3214 head = efx_rps_hash_bucket(efx, spec);
3215 if (WARN_ON(!head))
3216 return;
3217 hlist_for_each(node, head) {
3218 rule = container_of(node, struct efx_arfs_rule, node);
3219 if (efx_filter_spec_equal(spec, &rule->spec)) {
3220 /* Someone already reused the entry. We know that if
3221 * this check doesn't fire (i.e. filter_id == REMOVING)
3222 * then the REMOVING mark was put there by our caller,
3223 * because caller is holding a lock on filter table and
3224 * only holders of that lock set REMOVING.
3225 */
3226 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3227 return;
3228 hlist_del(node);
3229 kfree(rule);
3230 return;
3231 }
3232 }
3233 /* We didn't find it. */
3234 WARN_ON(1);
3235}
3236#endif
3237
3095/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because 3238/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3096 * (a) this is an infrequent control-plane operation and (b) n is small (max 64) 3239 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3097 */ 3240 */
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3140e16fcef..3f759ebdcf10 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
186#endif 186#endif
187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); 187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
188 188
189bool efx_filter_spec_equal(const struct efx_filter_spec *left,
190 const struct efx_filter_spec *right);
191u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
192
193#ifdef CONFIG_RFS_ACCEL
194bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
195 bool *force);
196
197struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
198 const struct efx_filter_spec *spec);
199
200/* @new is written to indicate if entry was newly added (true) or if an old
201 * entry was found and returned (false).
202 */
203struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
204 const struct efx_filter_spec *spec,
205 bool *new);
206
207void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
208#endif
209
189/* RSS contexts */ 210/* RSS contexts */
190struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); 211struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
191struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); 212struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4a19c7efdf8d..c72adf8b52ea 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2905{ 2905{
2906 struct efx_farch_filter_state *state = efx->filter_state; 2906 struct efx_farch_filter_state *state = efx->filter_state;
2907 struct efx_farch_filter_table *table; 2907 struct efx_farch_filter_table *table;
2908 bool ret = false; 2908 bool ret = false, force = false;
2909 u16 arfs_id;
2909 2910
2910 down_write(&state->lock); 2911 down_write(&state->lock);
2912 spin_lock_bh(&efx->rps_hash_lock);
2911 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2912 if (test_bit(index, table->used_bitmap) && 2914 if (test_bit(index, table->used_bitmap) &&
2913 table->spec[index].priority == EFX_FILTER_PRI_HINT && 2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2914 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2916 struct efx_arfs_rule *rule = NULL;
2915 flow_id, index)) { 2917 struct efx_filter_spec spec;
2916 efx_farch_filter_table_clear_entry(efx, table, index); 2918
2917 ret = true; 2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2920 if (!efx->rps_hash_table) {
2921 /* In the absence of the table, we always returned 0 to
2922 * ARFS, so use the same to query it.
2923 */
2924 arfs_id = 0;
2925 } else {
2926 rule = efx_rps_hash_find(efx, &spec);
2927 if (!rule) {
2928 /* ARFS table doesn't know of this filter, remove it */
2929 force = true;
2930 } else {
2931 arfs_id = rule->arfs_id;
2932 if (!efx_rps_check_rule(rule, index, &force))
2933 goto out_unlock;
2934 }
2935 }
2936 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2937 flow_id, arfs_id)) {
2938 if (rule)
2939 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2940 efx_rps_hash_del(efx, &spec);
2941 efx_farch_filter_table_clear_entry(efx, table, index);
2942 ret = true;
2943 }
2918 } 2944 }
2919 2945out_unlock:
2946 spin_unlock_bh(&efx->rps_hash_lock);
2920 up_write(&state->lock); 2947 up_write(&state->lock);
2921 return ret; 2948 return ret;
2922} 2949}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5e379a83c729..65568925c3ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -733,6 +733,56 @@ struct efx_rss_context {
733 u32 rx_indir_table[128]; 733 u32 rx_indir_table[128];
734}; 734};
735 735
736#ifdef CONFIG_RFS_ACCEL
737/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
738 * is used to test if filter does or will exist.
739 */
740#define EFX_ARFS_FILTER_ID_PENDING -1
741#define EFX_ARFS_FILTER_ID_ERROR -2
742#define EFX_ARFS_FILTER_ID_REMOVING -3
743/**
744 * struct efx_arfs_rule - record of an ARFS filter and its IDs
745 * @node: linkage into hash table
746 * @spec: details of the filter (used as key for hash table). Use efx->type to
747 * determine which member to use.
748 * @rxq_index: channel to which the filter will steer traffic.
749 * @arfs_id: filter ID which was returned to ARFS
750 * @filter_id: index in software filter table. May be
751 * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
752 * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
753 * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
754 */
755struct efx_arfs_rule {
756 struct hlist_node node;
757 struct efx_filter_spec spec;
758 u16 rxq_index;
759 u16 arfs_id;
760 s32 filter_id;
761};
762
763/* Size chosen so that the table is one page (4kB) */
764#define EFX_ARFS_HASH_TABLE_SIZE 512
765
766/**
767 * struct efx_async_filter_insertion - Request to asynchronously insert a filter
768 * @net_dev: Reference to the netdevice
769 * @spec: The filter to insert
770 * @work: Workitem for this request
771 * @rxq_index: Identifies the channel for which this request was made
772 * @flow_id: Identifies the kernel-side flow for which this request was made
773 */
774struct efx_async_filter_insertion {
775 struct net_device *net_dev;
776 struct efx_filter_spec spec;
777 struct work_struct work;
778 u16 rxq_index;
779 u32 flow_id;
780};
781
782/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
783#define EFX_RPS_MAX_IN_FLIGHT 8
784#endif /* CONFIG_RFS_ACCEL */
785
736/** 786/**
737 * struct efx_nic - an Efx NIC 787 * struct efx_nic - an Efx NIC
738 * @name: Device name (net device name or bus id before net device registered) 788 * @name: Device name (net device name or bus id before net device registered)
@@ -850,6 +900,12 @@ struct efx_rss_context {
850 * @rps_expire_channel: Next channel to check for expiry 900 * @rps_expire_channel: Next channel to check for expiry
851 * @rps_expire_index: Next index to check for expiry in 901 * @rps_expire_index: Next index to check for expiry in
852 * @rps_expire_channel's @rps_flow_id 902 * @rps_expire_channel's @rps_flow_id
903 * @rps_slot_map: bitmap of in-flight entries in @rps_slot
904 * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
905 * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
906 * @rps_next_id).
907 * @rps_hash_table: Mapping between ARFS filters and their various IDs
908 * @rps_next_id: next arfs_id for an ARFS filter
853 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 909 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
854 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 910 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
855 * Decremented when the efx_flush_rx_queue() is called. 911 * Decremented when the efx_flush_rx_queue() is called.
@@ -1004,6 +1060,11 @@ struct efx_nic {
1004 struct mutex rps_mutex; 1060 struct mutex rps_mutex;
1005 unsigned int rps_expire_channel; 1061 unsigned int rps_expire_channel;
1006 unsigned int rps_expire_index; 1062 unsigned int rps_expire_index;
1063 unsigned long rps_slot_map;
1064 struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
1065 spinlock_t rps_hash_lock;
1066 struct hlist_head *rps_hash_table;
1067 u32 rps_next_id;
1007#endif 1068#endif
1008 1069
1009 atomic_t active_queues; 1070 atomic_t active_queues;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 95682831484e..64a94f242027 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -827,31 +827,36 @@ MODULE_PARM_DESC(rx_refill_threshold,
827 827
828#ifdef CONFIG_RFS_ACCEL 828#ifdef CONFIG_RFS_ACCEL
829 829
830/**
831 * struct efx_async_filter_insertion - Request to asynchronously insert a filter
832 * @net_dev: Reference to the netdevice
833 * @spec: The filter to insert
834 * @work: Workitem for this request
835 * @rxq_index: Identifies the channel for which this request was made
836 * @flow_id: Identifies the kernel-side flow for which this request was made
837 */
838struct efx_async_filter_insertion {
839 struct net_device *net_dev;
840 struct efx_filter_spec spec;
841 struct work_struct work;
842 u16 rxq_index;
843 u32 flow_id;
844};
845
846static void efx_filter_rfs_work(struct work_struct *data) 830static void efx_filter_rfs_work(struct work_struct *data)
847{ 831{
848 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, 832 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
849 work); 833 work);
850 struct efx_nic *efx = netdev_priv(req->net_dev); 834 struct efx_nic *efx = netdev_priv(req->net_dev);
851 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 835 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
836 int slot_idx = req - efx->rps_slot;
837 struct efx_arfs_rule *rule;
838 u16 arfs_id = 0;
852 int rc; 839 int rc;
853 840
854 rc = efx->type->filter_insert(efx, &req->spec, false); 841 rc = efx->type->filter_insert(efx, &req->spec, true);
842 if (efx->rps_hash_table) {
843 spin_lock_bh(&efx->rps_hash_lock);
844 rule = efx_rps_hash_find(efx, &req->spec);
845 /* The rule might have already gone, if someone else's request
846 * for the same spec was already worked and then expired before
847 * we got around to our work. In that case we have nothing
848 * tying us to an arfs_id, meaning that as soon as the filter
849 * is considered for expiry it will be removed.
850 */
851 if (rule) {
852 if (rc < 0)
853 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
854 else
855 rule->filter_id = rc;
856 arfs_id = rule->arfs_id;
857 }
858 spin_unlock_bh(&efx->rps_hash_lock);
859 }
855 if (rc >= 0) { 860 if (rc >= 0) {
856 /* Remember this so we can check whether to expire the filter 861 /* Remember this so we can check whether to expire the filter
857 * later. 862 * later.
@@ -863,23 +868,23 @@ static void efx_filter_rfs_work(struct work_struct *data)
863 868
864 if (req->spec.ether_type == htons(ETH_P_IP)) 869 if (req->spec.ether_type == htons(ETH_P_IP))
865 netif_info(efx, rx_status, efx->net_dev, 870 netif_info(efx, rx_status, efx->net_dev,
866 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 871 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
867 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 872 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
868 req->spec.rem_host, ntohs(req->spec.rem_port), 873 req->spec.rem_host, ntohs(req->spec.rem_port),
869 req->spec.loc_host, ntohs(req->spec.loc_port), 874 req->spec.loc_host, ntohs(req->spec.loc_port),
870 req->rxq_index, req->flow_id, rc); 875 req->rxq_index, req->flow_id, rc, arfs_id);
871 else 876 else
872 netif_info(efx, rx_status, efx->net_dev, 877 netif_info(efx, rx_status, efx->net_dev,
873 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 878 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
874 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 879 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
875 req->spec.rem_host, ntohs(req->spec.rem_port), 880 req->spec.rem_host, ntohs(req->spec.rem_port),
876 req->spec.loc_host, ntohs(req->spec.loc_port), 881 req->spec.loc_host, ntohs(req->spec.loc_port),
877 req->rxq_index, req->flow_id, rc); 882 req->rxq_index, req->flow_id, rc, arfs_id);
878 } 883 }
879 884
880 /* Release references */ 885 /* Release references */
886 clear_bit(slot_idx, &efx->rps_slot_map);
881 dev_put(req->net_dev); 887 dev_put(req->net_dev);
882 kfree(req);
883} 888}
884 889
885int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 890int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -887,23 +892,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
887{ 892{
888 struct efx_nic *efx = netdev_priv(net_dev); 893 struct efx_nic *efx = netdev_priv(net_dev);
889 struct efx_async_filter_insertion *req; 894 struct efx_async_filter_insertion *req;
895 struct efx_arfs_rule *rule;
890 struct flow_keys fk; 896 struct flow_keys fk;
897 int slot_idx;
898 bool new;
899 int rc;
891 900
892 if (flow_id == RPS_FLOW_ID_INVALID) 901 /* find a free slot */
893 return -EINVAL; 902 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
903 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
904 break;
905 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
906 return -EBUSY;
894 907
895 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 908 if (flow_id == RPS_FLOW_ID_INVALID) {
896 return -EPROTONOSUPPORT; 909 rc = -EINVAL;
910 goto out_clear;
911 }
897 912
898 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) 913 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
899 return -EPROTONOSUPPORT; 914 rc = -EPROTONOSUPPORT;
900 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) 915 goto out_clear;
901 return -EPROTONOSUPPORT; 916 }
902 917
903 req = kmalloc(sizeof(*req), GFP_ATOMIC); 918 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
904 if (!req) 919 rc = -EPROTONOSUPPORT;
905 return -ENOMEM; 920 goto out_clear;
921 }
922 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
923 rc = -EPROTONOSUPPORT;
924 goto out_clear;
925 }
906 926
927 req = efx->rps_slot + slot_idx;
907 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, 928 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
908 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 929 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
909 rxq_index); 930 rxq_index);
@@ -927,12 +948,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
927 req->spec.rem_port = fk.ports.src; 948 req->spec.rem_port = fk.ports.src;
928 req->spec.loc_port = fk.ports.dst; 949 req->spec.loc_port = fk.ports.dst;
929 950
951 if (efx->rps_hash_table) {
952 /* Add it to ARFS hash table */
953 spin_lock(&efx->rps_hash_lock);
954 rule = efx_rps_hash_add(efx, &req->spec, &new);
955 if (!rule) {
956 rc = -ENOMEM;
957 goto out_unlock;
958 }
959 if (new)
960 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
961 rc = rule->arfs_id;
962 /* Skip if existing or pending filter already does the right thing */
963 if (!new && rule->rxq_index == rxq_index &&
964 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
965 goto out_unlock;
966 rule->rxq_index = rxq_index;
967 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
968 spin_unlock(&efx->rps_hash_lock);
969 } else {
970 /* Without an ARFS hash table, we just use arfs_id 0 for all
971 * filters. This means if multiple flows hash to the same
972 * flow_id, all but the most recently touched will be eligible
973 * for expiry.
974 */
975 rc = 0;
976 }
977
978 /* Queue the request */
930 dev_hold(req->net_dev = net_dev); 979 dev_hold(req->net_dev = net_dev);
931 INIT_WORK(&req->work, efx_filter_rfs_work); 980 INIT_WORK(&req->work, efx_filter_rfs_work);
932 req->rxq_index = rxq_index; 981 req->rxq_index = rxq_index;
933 req->flow_id = flow_id; 982 req->flow_id = flow_id;
934 schedule_work(&req->work); 983 schedule_work(&req->work);
935 return 0; 984 return rc;
985out_unlock:
986 spin_unlock(&efx->rps_hash_lock);
987out_clear:
988 clear_bit(slot_idx, &efx->rps_slot_map);
989 return rc;
936} 990}
937 991
938bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 992bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index c7bff596c665..dedd40613090 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -347,7 +347,7 @@ enum power_event {
347#define MTL_RX_OVERFLOW_INT BIT(16) 347#define MTL_RX_OVERFLOW_INT BIT(16)
348 348
349/* Default operating mode of the MAC */ 349/* Default operating mode of the MAC */
350#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ 350#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
351 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) 351 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
352 352
353/* To dump the core regs excluding the Address Registers */ 353/* To dump the core regs excluding the Address Registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a3af92ebbca8..517b1f6736a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
31 31
32 value |= GMAC_CORE_INIT; 32 value |= GMAC_CORE_INIT;
33 33
34 /* Clear ACS bit because Ethernet switch tagging formats such as
35 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
36 * hardware to truncate packets on reception.
37 */
38 if (netdev_uses_dsa(dev))
39 value &= ~GMAC_CONFIG_ACS;
40
41 if (mtu > 1500) 34 if (mtu > 1500)
42 value |= GMAC_CONFIG_2K; 35 value |= GMAC_CONFIG_2K;
43 if (mtu > 2000) 36 if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a16931ce39d..b65e2d144698 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3495 3495
3496 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3496 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3497 * Type frames (LLC/LLC-SNAP) 3497 * Type frames (LLC/LLC-SNAP)
3498 *
3499 * llc_snap is never checked in GMAC >= 4, so this ACS
3500 * feature is always disabled and packets need to be
3501 * stripped manually.
3498 */ 3502 */
3499 if (unlikely(status != llc_snap)) 3503 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3504 unlikely(status != llc_snap))
3500 frame_len -= ETH_FCS_LEN; 3505 frame_len -= ETH_FCS_LEN;
3501 3506
3502 if (netif_msg_rx_status(priv)) { 3507 if (netif_msg_rx_status(priv)) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 30371274409d..74f828412055 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,7 +129,7 @@ do { \
129 129
130#define RX_PRIORITY_MAPPING 0x76543210 130#define RX_PRIORITY_MAPPING 0x76543210
131#define TX_PRIORITY_MAPPING 0x33221100 131#define TX_PRIORITY_MAPPING 0x33221100
132#define CPDMA_TX_PRIORITY_MAP 0x01234567 132#define CPDMA_TX_PRIORITY_MAP 0x76543210
133 133
134#define CPSW_VLAN_AWARE BIT(1) 134#define CPSW_VLAN_AWARE BIT(1)
135#define CPSW_RX_VLAN_ENCAP BIT(2) 135#define CPSW_RX_VLAN_ENCAP BIT(2)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9cbb0c8a896a..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3277 3277
3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3278 err = netdev_upper_dev_link(real_dev, dev, extack);
3279 if (err < 0) 3279 if (err < 0)
3280 goto put_dev; 3280 goto unregister;
3281 3281
3282 /* need to be already registered so that ->init has run and 3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set 3283 * the MAC addr is set
@@ -3316,8 +3316,7 @@ del_dev:
3316 macsec_del_dev(macsec); 3316 macsec_del_dev(macsec);
3317unlink: 3317unlink:
3318 netdev_upper_dev_unlink(real_dev, dev); 3318 netdev_upper_dev_unlink(real_dev, dev);
3319put_dev: 3319unregister:
3320 dev_put(real_dev);
3321 unregister_netdevice(dev); 3320 unregister_netdevice(dev);
3322 return err; 3321 return err;
3323} 3322}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c22e8e383247..25e2a099b71c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1393 if (err < 0) 1393 if (err < 0)
1394 goto error; 1394 goto error;
1395 1395
1396 /* If WOL event happened once, the LED[2] interrupt pin
1397 * will not be cleared unless we reading the interrupt status
1398 * register. If interrupts are in use, the normal interrupt
1399 * handling will clear the WOL event. Clear the WOL event
1400 * before enabling it if !phy_interrupt_is_valid()
1401 */
1402 if (!phy_interrupt_is_valid(phydev))
1403 phy_read(phydev, MII_M1011_IEVENT);
1404
1396 /* Enable the WOL interrupt */ 1405 /* Enable the WOL interrupt */
1397 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1406 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1398 MII_88E1318S_PHY_CSIER_WOL_EIE); 1407 MII_88E1318S_PHY_CSIER_WOL_EIE);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0f293ef28935..a97ac8c12c4c 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -20,6 +20,7 @@
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22#include <linux/microchipphy.h> 22#include <linux/microchipphy.h>
23#include <linux/delay.h>
23 24
24#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 25#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
25#define DRIVER_DESC "Microchip LAN88XX PHY driver" 26#define DRIVER_DESC "Microchip LAN88XX PHY driver"
@@ -30,6 +31,16 @@ struct lan88xx_priv {
30 __u32 wolopts; 31 __u32 wolopts;
31}; 32};
32 33
34static int lan88xx_read_page(struct phy_device *phydev)
35{
36 return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
37}
38
39static int lan88xx_write_page(struct phy_device *phydev, int page)
40{
41 return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
42}
43
33static int lan88xx_phy_config_intr(struct phy_device *phydev) 44static int lan88xx_phy_config_intr(struct phy_device *phydev)
34{ 45{
35 int rc; 46 int rc;
@@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev)
66 return 0; 77 return 0;
67} 78}
68 79
80static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
81 u32 data)
82{
83 int val, save_page, ret = 0;
84 u16 buf;
85
86 /* Save current page */
87 save_page = phy_save_page(phydev);
88 if (save_page < 0) {
89 pr_warn("Failed to get current page\n");
90 goto err;
91 }
92
93 /* Switch to TR page */
94 lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
95
96 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
97 (data & 0xFFFF));
98 if (ret < 0) {
99 pr_warn("Failed to write TR low data\n");
100 goto err;
101 }
102
103 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
104 (data & 0x00FF0000) >> 16);
105 if (ret < 0) {
106 pr_warn("Failed to write TR high data\n");
107 goto err;
108 }
109
110 /* Config control bits [15:13] of register */
111 buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
112 buf |= 0x8000; /* Set [15] to Packet transmit */
113
114 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
115 if (ret < 0) {
116 pr_warn("Failed to write data in reg\n");
117 goto err;
118 }
119
120 usleep_range(1000, 2000);/* Wait for Data to be written */
121 val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
122 if (!(val & 0x8000))
123 pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
124err:
125 return phy_restore_page(phydev, save_page, ret);
126}
127
128static void lan88xx_config_TR_regs(struct phy_device *phydev)
129{
130 int err;
131
132 /* Get access to Channel 0x1, Node 0xF , Register 0x01.
133 * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
134 * MrvlTrFix1000Kp, MasterEnableTR bits.
135 */
136 err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
137 if (err < 0)
138 pr_warn("Failed to Set Register[0x0F82]\n");
139
140 /* Get access to Channel b'10, Node b'1101, Register 0x06.
141 * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
142 * SSTrKp1000Mas bits.
143 */
144 err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
145 if (err < 0)
146 pr_warn("Failed to Set Register[0x168C]\n");
147
148 /* Get access to Channel b'10, Node b'1111, Register 0x11.
149 * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
150 * bits
151 */
152 err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
153 if (err < 0)
154 pr_warn("Failed to Set Register[0x17A2]\n");
155
156 /* Get access to Channel b'10, Node b'1101, Register 0x10.
157 * Write 24-bit value 0xEEFFDD to register. Setting
158 * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
159 * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
160 */
161 err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
162 if (err < 0)
163 pr_warn("Failed to Set Register[0x16A0]\n");
164
165 /* Get access to Channel b'10, Node b'1101, Register 0x13.
166 * Write 24-bit value 0x071448 to register. Setting
167 * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
168 */
169 err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
170 if (err < 0)
171 pr_warn("Failed to Set Register[0x16A6]\n");
172
173 /* Get access to Channel b'10, Node b'1101, Register 0x12.
174 * Write 24-bit value 0x13132F to register. Setting
175 * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
176 */
177 err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
178 if (err < 0)
179 pr_warn("Failed to Set Register[0x16A4]\n");
180
181 /* Get access to Channel b'10, Node b'1101, Register 0x14.
182 * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
183 * eee_TrKf_freeze_delay bits.
184 */
185 err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
186 if (err < 0)
187 pr_warn("Failed to Set Register[0x16A8]\n");
188
189 /* Get access to Channel b'01, Node b'1111, Register 0x34.
190 * Write 24-bit value 0x91B06C to register. Setting
191 * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
192 * FastMseSearchUpdGain1000 bits.
193 */
194 err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
195 if (err < 0)
196 pr_warn("Failed to Set Register[0x0FE8]\n");
197
198 /* Get access to Channel b'01, Node b'1111, Register 0x3E.
199 * Write 24-bit value 0xC0A028 to register. Setting
200 * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
201 * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
202 */
203 err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
204 if (err < 0)
205 pr_warn("Failed to Set Register[0x0FFC]\n");
206
207 /* Get access to Channel b'01, Node b'1111, Register 0x35.
208 * Write 24-bit value 0x041600 to register. Setting
209 * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
210 * FastMsePhChangeDelay1000 bits.
211 */
212 err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
213 if (err < 0)
214 pr_warn("Failed to Set Register[0x0FEA]\n");
215
216 /* Get access to Channel b'10, Node b'1101, Register 0x03.
217 * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
218 */
219 err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
220 if (err < 0)
221 pr_warn("Failed to Set Register[0x1686]\n");
222}
223
69static int lan88xx_probe(struct phy_device *phydev) 224static int lan88xx_probe(struct phy_device *phydev)
70{ 225{
71 struct device *dev = &phydev->mdio.dev; 226 struct device *dev = &phydev->mdio.dev;
@@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
132 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); 287 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
133} 288}
134 289
290static int lan88xx_config_init(struct phy_device *phydev)
291{
292 int val;
293
294 genphy_config_init(phydev);
295 /*Zerodetect delay enable */
296 val = phy_read_mmd(phydev, MDIO_MMD_PCS,
297 PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
298 val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
299
300 phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
301 val);
302
303 /* Config DSP registers */
304 lan88xx_config_TR_regs(phydev);
305
306 return 0;
307}
308
135static int lan88xx_config_aneg(struct phy_device *phydev) 309static int lan88xx_config_aneg(struct phy_device *phydev)
136{ 310{
137 lan88xx_set_mdix(phydev); 311 lan88xx_set_mdix(phydev);
@@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = {
151 .probe = lan88xx_probe, 325 .probe = lan88xx_probe,
152 .remove = lan88xx_remove, 326 .remove = lan88xx_remove,
153 327
154 .config_init = genphy_config_init, 328 .config_init = lan88xx_config_init,
155 .config_aneg = lan88xx_config_aneg, 329 .config_aneg = lan88xx_config_aneg,
156 330
157 .ack_interrupt = lan88xx_phy_ack_interrupt, 331 .ack_interrupt = lan88xx_phy_ack_interrupt,
@@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = {
160 .suspend = lan88xx_suspend, 334 .suspend = lan88xx_suspend,
161 .resume = genphy_resume, 335 .resume = genphy_resume,
162 .set_wol = lan88xx_set_wol, 336 .set_wol = lan88xx_set_wol,
337 .read_page = lan88xx_read_page,
338 .write_page = lan88xx_write_page,
163} }; 339} };
164 340
165module_phy_driver(microchip_phy_driver); 341module_phy_driver(microchip_phy_driver);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1483bc7b01e1..7df07337d69c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
620 lock_sock(sk); 620 lock_sock(sk);
621 621
622 error = -EINVAL; 622 error = -EINVAL;
623
624 if (sockaddr_len != sizeof(struct sockaddr_pppox))
625 goto end;
626
623 if (sp->sa_protocol != PX_PROTO_OE) 627 if (sp->sa_protocol != PX_PROTO_OE)
624 goto end; 628 goto end;
625 629
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a6c6ce19eeee..ddb6bf85a59c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
261 } 261 }
262} 262}
263 263
264static bool __team_option_inst_tmp_find(const struct list_head *opts,
265 const struct team_option_inst *needle)
266{
267 struct team_option_inst *opt_inst;
268
269 list_for_each_entry(opt_inst, opts, tmp_list)
270 if (opt_inst == needle)
271 return true;
272 return false;
273}
274
264static int __team_options_register(struct team *team, 275static int __team_options_register(struct team *team,
265 const struct team_option *option, 276 const struct team_option *option,
266 size_t option_count) 277 size_t option_count)
@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1061} 1072}
1062 1073
1063#ifdef CONFIG_NET_POLL_CONTROLLER 1074#ifdef CONFIG_NET_POLL_CONTROLLER
1064static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1075static int __team_port_enable_netpoll(struct team_port *port)
1065{ 1076{
1066 struct netpoll *np; 1077 struct netpoll *np;
1067 int err; 1078 int err;
1068 1079
1069 if (!team->dev->npinfo)
1070 return 0;
1071
1072 np = kzalloc(sizeof(*np), GFP_KERNEL); 1080 np = kzalloc(sizeof(*np), GFP_KERNEL);
1073 if (!np) 1081 if (!np)
1074 return -ENOMEM; 1082 return -ENOMEM;
@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1082 return err; 1090 return err;
1083} 1091}
1084 1092
1093static int team_port_enable_netpoll(struct team_port *port)
1094{
1095 if (!port->team->dev->npinfo)
1096 return 0;
1097
1098 return __team_port_enable_netpoll(port);
1099}
1100
1085static void team_port_disable_netpoll(struct team_port *port) 1101static void team_port_disable_netpoll(struct team_port *port)
1086{ 1102{
1087 struct netpoll *np = port->np; 1103 struct netpoll *np = port->np;
@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1096 kfree(np); 1112 kfree(np);
1097} 1113}
1098#else 1114#else
1099static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1115static int team_port_enable_netpoll(struct team_port *port)
1100{ 1116{
1101 return 0; 1117 return 0;
1102} 1118}
@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1210 goto err_vids_add; 1226 goto err_vids_add;
1211 } 1227 }
1212 1228
1213 err = team_port_enable_netpoll(team, port); 1229 err = team_port_enable_netpoll(port);
1214 if (err) { 1230 if (err) {
1215 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1231 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1216 portname); 1232 portname);
@@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
1907 1923
1908 mutex_lock(&team->lock); 1924 mutex_lock(&team->lock);
1909 list_for_each_entry(port, &team->port_list, list) { 1925 list_for_each_entry(port, &team->port_list, list) {
1910 err = team_port_enable_netpoll(team, port); 1926 err = __team_port_enable_netpoll(port);
1911 if (err) { 1927 if (err) {
1912 __team_netpoll_cleanup(team); 1928 __team_netpoll_cleanup(team);
1913 break; 1929 break;
@@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2568 if (err) 2584 if (err)
2569 goto team_put; 2585 goto team_put;
2570 opt_inst->changed = true; 2586 opt_inst->changed = true;
2587
2588 /* dumb/evil user-space can send us duplicate opt,
2589 * keep only the last one
2590 */
2591 if (__team_option_inst_tmp_find(&opt_inst_list,
2592 opt_inst))
2593 continue;
2594
2571 list_add(&opt_inst->tmp_list, &opt_inst_list); 2595 list_add(&opt_inst->tmp_list, &opt_inst_list);
2572 } 2596 }
2573 if (!opt_found) { 2597 if (!opt_found) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28583aa0c17d..ef33950a45d9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1102 goto drop; 1102 goto drop;
1103 1103
1104 len = run_ebpf_filter(tun, skb, len); 1104 len = run_ebpf_filter(tun, skb, len);
1105 1105 if (len == 0 || pskb_trim(skb, len))
1106 /* Trim extra bytes since we may insert vlan proto & TCI
1107 * in tun_put_user().
1108 */
1109 len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
1110 if (len <= 0 || pskb_trim(skb, len))
1111 goto drop; 1106 goto drop;
1112 1107
1113 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1108 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ca066b785e9f..c853e7410f5a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ 1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ 1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1110 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1110 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1111 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1111 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 1112 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1112 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 1113 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b187ec7411e..770422e953f7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -147,6 +147,17 @@ struct receive_queue {
147 struct xdp_rxq_info xdp_rxq; 147 struct xdp_rxq_info xdp_rxq;
148}; 148};
149 149
150/* Control VQ buffers: protected by the rtnl lock */
151struct control_buf {
152 struct virtio_net_ctrl_hdr hdr;
153 virtio_net_ctrl_ack status;
154 struct virtio_net_ctrl_mq mq;
155 u8 promisc;
156 u8 allmulti;
157 __virtio16 vid;
158 __virtio64 offloads;
159};
160
150struct virtnet_info { 161struct virtnet_info {
151 struct virtio_device *vdev; 162 struct virtio_device *vdev;
152 struct virtqueue *cvq; 163 struct virtqueue *cvq;
@@ -192,14 +203,7 @@ struct virtnet_info {
192 struct hlist_node node; 203 struct hlist_node node;
193 struct hlist_node node_dead; 204 struct hlist_node node_dead;
194 205
195 /* Control VQ buffers: protected by the rtnl lock */ 206 struct control_buf *ctrl;
196 struct virtio_net_ctrl_hdr ctrl_hdr;
197 virtio_net_ctrl_ack ctrl_status;
198 struct virtio_net_ctrl_mq ctrl_mq;
199 u8 ctrl_promisc;
200 u8 ctrl_allmulti;
201 u16 ctrl_vid;
202 u64 ctrl_offloads;
203 207
204 /* Ethtool settings */ 208 /* Ethtool settings */
205 u8 duplex; 209 u8 duplex;
@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1269{ 1273{
1270 struct receive_queue *rq = 1274 struct receive_queue *rq =
1271 container_of(napi, struct receive_queue, napi); 1275 container_of(napi, struct receive_queue, napi);
1272 unsigned int received; 1276 struct virtnet_info *vi = rq->vq->vdev->priv;
1277 struct send_queue *sq;
1278 unsigned int received, qp;
1273 bool xdp_xmit = false; 1279 bool xdp_xmit = false;
1274 1280
1275 virtnet_poll_cleantx(rq); 1281 virtnet_poll_cleantx(rq);
@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1280 if (received < budget) 1286 if (received < budget)
1281 virtqueue_napi_complete(napi, rq->vq, received); 1287 virtqueue_napi_complete(napi, rq->vq, received);
1282 1288
1283 if (xdp_xmit) 1289 if (xdp_xmit) {
1290 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1291 smp_processor_id();
1292 sq = &vi->sq[qp];
1293 virtqueue_kick(sq->vq);
1284 xdp_do_flush_map(); 1294 xdp_do_flush_map();
1295 }
1285 1296
1286 return received; 1297 return received;
1287} 1298}
@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1454 /* Caller should know better */ 1465 /* Caller should know better */
1455 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 1466 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1456 1467
1457 vi->ctrl_status = ~0; 1468 vi->ctrl->status = ~0;
1458 vi->ctrl_hdr.class = class; 1469 vi->ctrl->hdr.class = class;
1459 vi->ctrl_hdr.cmd = cmd; 1470 vi->ctrl->hdr.cmd = cmd;
1460 /* Add header */ 1471 /* Add header */
1461 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1472 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1462 sgs[out_num++] = &hdr; 1473 sgs[out_num++] = &hdr;
1463 1474
1464 if (out) 1475 if (out)
1465 sgs[out_num++] = out; 1476 sgs[out_num++] = out;
1466 1477
1467 /* Add return status. */ 1478 /* Add return status. */
1468 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1479 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1469 sgs[out_num] = &stat; 1480 sgs[out_num] = &stat;
1470 1481
1471 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1482 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1472 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1483 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1473 1484
1474 if (unlikely(!virtqueue_kick(vi->cvq))) 1485 if (unlikely(!virtqueue_kick(vi->cvq)))
1475 return vi->ctrl_status == VIRTIO_NET_OK; 1486 return vi->ctrl->status == VIRTIO_NET_OK;
1476 1487
1477 /* Spin for a response, the kick causes an ioport write, trapping 1488 /* Spin for a response, the kick causes an ioport write, trapping
1478 * into the hypervisor, so the request should be handled immediately. 1489 * into the hypervisor, so the request should be handled immediately.
@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1481 !virtqueue_is_broken(vi->cvq)) 1492 !virtqueue_is_broken(vi->cvq))
1482 cpu_relax(); 1493 cpu_relax();
1483 1494
1484 return vi->ctrl_status == VIRTIO_NET_OK; 1495 return vi->ctrl->status == VIRTIO_NET_OK;
1485} 1496}
1486 1497
1487static int virtnet_set_mac_address(struct net_device *dev, void *p) 1498static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1593 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1604 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1594 return 0; 1605 return 0;
1595 1606
1596 vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1607 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1597 sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1608 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1598 1609
1599 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1610 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1600 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1611 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1653 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1664 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1654 return; 1665 return;
1655 1666
1656 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 1667 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1657 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1668 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1658 1669
1659 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 1670 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1660 1671
1661 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1672 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1662 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1673 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1663 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1674 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1664 vi->ctrl_promisc ? "en" : "dis"); 1675 vi->ctrl->promisc ? "en" : "dis");
1665 1676
1666 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 1677 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1667 1678
1668 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1679 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1669 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1680 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1670 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1681 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1671 vi->ctrl_allmulti ? "en" : "dis"); 1682 vi->ctrl->allmulti ? "en" : "dis");
1672 1683
1673 uc_count = netdev_uc_count(dev); 1684 uc_count = netdev_uc_count(dev);
1674 mc_count = netdev_mc_count(dev); 1685 mc_count = netdev_mc_count(dev);
@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1714 struct virtnet_info *vi = netdev_priv(dev); 1725 struct virtnet_info *vi = netdev_priv(dev);
1715 struct scatterlist sg; 1726 struct scatterlist sg;
1716 1727
1717 vi->ctrl_vid = vid; 1728 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1718 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1729 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1719 1730
1720 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1731 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1721 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 1732 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1729 struct virtnet_info *vi = netdev_priv(dev); 1740 struct virtnet_info *vi = netdev_priv(dev);
1730 struct scatterlist sg; 1741 struct scatterlist sg;
1731 1742
1732 vi->ctrl_vid = vid; 1743 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1733 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1744 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1734 1745
1735 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1746 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1736 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 1747 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
2126static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 2137static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2127{ 2138{
2128 struct scatterlist sg; 2139 struct scatterlist sg;
2129 vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); 2140 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2130 2141
2131 sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); 2142 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2132 2143
2133 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 2144 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2134 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 2145 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
2351 2362
2352 kfree(vi->rq); 2363 kfree(vi->rq);
2353 kfree(vi->sq); 2364 kfree(vi->sq);
2365 kfree(vi->ctrl);
2354} 2366}
2355 2367
2356static void _free_receive_bufs(struct virtnet_info *vi) 2368static void _free_receive_bufs(struct virtnet_info *vi)
@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2543{ 2555{
2544 int i; 2556 int i;
2545 2557
2558 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2559 if (!vi->ctrl)
2560 goto err_ctrl;
2546 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2561 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2547 if (!vi->sq) 2562 if (!vi->sq)
2548 goto err_sq; 2563 goto err_sq;
@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2571err_rq: 2586err_rq:
2572 kfree(vi->sq); 2587 kfree(vi->sq);
2573err_sq: 2588err_sq:
2589 kfree(vi->ctrl);
2590err_ctrl:
2574 return -ENOMEM; 2591 return -ENOMEM;
2575} 2592}
2576 2593
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e04937f44f33..9ebe2a689966 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1218 union { 1218 union {
1219 void *ptr; 1219 void *ptr;
1220 struct ethhdr *eth; 1220 struct ethhdr *eth;
1221 struct vlan_ethhdr *veth;
1221 struct iphdr *ipv4; 1222 struct iphdr *ipv4;
1222 struct ipv6hdr *ipv6; 1223 struct ipv6hdr *ipv6;
1223 struct tcphdr *tcp; 1224 struct tcphdr *tcp;
@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1228 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) 1229 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1229 return 0; 1230 return 0;
1230 1231
1232 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1233 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1234 hlen = sizeof(struct vlan_ethhdr);
1235 else
1236 hlen = sizeof(struct ethhdr);
1237
1231 hdr.eth = eth_hdr(skb); 1238 hdr.eth = eth_hdr(skb);
1232 if (gdesc->rcd.v4) { 1239 if (gdesc->rcd.v4) {
1233 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); 1240 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1234 hdr.ptr += sizeof(struct ethhdr); 1241 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1242 hdr.ptr += hlen;
1235 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); 1243 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1236 hlen = hdr.ipv4->ihl << 2; 1244 hlen = hdr.ipv4->ihl << 2;
1237 hdr.ptr += hdr.ipv4->ihl << 2; 1245 hdr.ptr += hdr.ipv4->ihl << 2;
1238 } else if (gdesc->rcd.v6) { 1246 } else if (gdesc->rcd.v6) {
1239 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); 1247 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1240 hdr.ptr += sizeof(struct ethhdr); 1248 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1249 hdr.ptr += hlen;
1241 /* Use an estimated value, since we also need to handle 1250 /* Use an estimated value, since we also need to handle
1242 * TSO case. 1251 * TSO case.
1243 */ 1252 */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 59ec34052a65..a3326463b71f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 85997184e047..9d36473dc2a2 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -103,8 +103,7 @@ config NVDIMM_DAX
103 Select Y if unsure 103 Select Y if unsure
104 104
105config OF_PMEM 105config OF_PMEM
106 # FIXME: make tristate once OF_NUMA dependency removed 106 tristate "Device-tree support for persistent memory regions"
107 bool "Device-tree support for persistent memory regions"
108 depends on OF 107 depends on OF
109 default LIBNVDIMM 108 default LIBNVDIMM
110 help 109 help
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index e00d45522b80..8d348b22ba45 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) 88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
89{ 89{
90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
91 int rc = validate_dimm(ndd), cmd_rc = 0;
91 struct nd_cmd_get_config_data_hdr *cmd; 92 struct nd_cmd_get_config_data_hdr *cmd;
92 struct nvdimm_bus_descriptor *nd_desc; 93 struct nvdimm_bus_descriptor *nd_desc;
93 int rc = validate_dimm(ndd);
94 u32 max_cmd_size, config_size; 94 u32 max_cmd_size, config_size;
95 size_t offset; 95 size_t offset;
96 96
@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
124 cmd->in_offset = offset; 124 cmd->in_offset = offset;
125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
126 ND_CMD_GET_CONFIG_DATA, cmd, 126 ND_CMD_GET_CONFIG_DATA, cmd,
127 cmd->in_length + sizeof(*cmd), NULL); 127 cmd->in_length + sizeof(*cmd), &cmd_rc);
128 if (rc || cmd->status) { 128 if (rc < 0)
129 rc = -ENXIO; 129 break;
130 if (cmd_rc < 0) {
131 rc = cmd_rc;
130 break; 132 break;
131 } 133 }
132 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); 134 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
140int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, 142int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
141 void *buf, size_t len) 143 void *buf, size_t len)
142{ 144{
143 int rc = validate_dimm(ndd);
144 size_t max_cmd_size, buf_offset; 145 size_t max_cmd_size, buf_offset;
145 struct nd_cmd_set_config_hdr *cmd; 146 struct nd_cmd_set_config_hdr *cmd;
147 int rc = validate_dimm(ndd), cmd_rc = 0;
146 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 148 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
147 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 149 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
148 150
@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
164 for (buf_offset = 0; len; len -= cmd->in_length, 166 for (buf_offset = 0; len; len -= cmd->in_length,
165 buf_offset += cmd->in_length) { 167 buf_offset += cmd->in_length) {
166 size_t cmd_size; 168 size_t cmd_size;
167 u32 *status;
168 169
169 cmd->in_offset = offset + buf_offset; 170 cmd->in_offset = offset + buf_offset;
170 cmd->in_length = min(max_cmd_size, len); 171 cmd->in_length = min(max_cmd_size, len);
@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
172 173
173 /* status is output in the last 4-bytes of the command buffer */ 174 /* status is output in the last 4-bytes of the command buffer */
174 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); 175 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
175 status = ((void *) cmd) + cmd_size - sizeof(u32);
176 176
177 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 177 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
178 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); 178 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
179 if (rc || *status) { 179 if (rc < 0)
180 rc = rc ? rc : -ENXIO; 180 break;
181 if (cmd_rc < 0) {
182 rc = cmd_rc;
181 break; 183 break;
182 } 184 }
183 } 185 }
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 85013bad35de..0a701837dfc0 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
67 */ 67 */
68 memset(&ndr_desc, 0, sizeof(ndr_desc)); 68 memset(&ndr_desc, 0, sizeof(ndr_desc));
69 ndr_desc.attr_groups = region_attr_groups; 69 ndr_desc.attr_groups = region_attr_groups;
70 ndr_desc.numa_node = of_node_to_nid(np); 70 ndr_desc.numa_node = dev_to_node(&pdev->dev);
71 ndr_desc.res = &pdev->resource[i]; 71 ndr_desc.res = &pdev->resource[i];
72 ndr_desc.of_node = np; 72 ndr_desc.of_node = np;
73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); 73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 84aa9d676375..6da20b9688f7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
942 int offset; 942 int offset;
943 const char *p, *q, *options = NULL; 943 const char *p, *q, *options = NULL;
944 int l; 944 int l;
945 const struct earlycon_id *match; 945 const struct earlycon_id **p_match;
946 const void *fdt = initial_boot_params; 946 const void *fdt = initial_boot_params;
947 947
948 offset = fdt_path_offset(fdt, "/chosen"); 948 offset = fdt_path_offset(fdt, "/chosen");
@@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
969 return 0; 969 return 0;
970 } 970 }
971 971
972 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 972 for (p_match = __earlycon_table; p_match < __earlycon_table_end;
973 p_match++) {
974 const struct earlycon_id *match = *p_match;
975
973 if (!match->compatible[0]) 976 if (!match->compatible[0])
974 continue; 977 continue;
975 978
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index a6b88c7f6e3e..d2970a009eb5 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
486 return ret; 486 return ret;
487 487
488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, 488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
489 "reset-gpio", 0); 489 "reset-gpios", 0);
490 if (kirin_pcie->gpio_id_reset < 0) 490 if (kirin_pcie->gpio_id_reset < 0)
491 return -ENODEV; 491 return -ENODEV;
492 492
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..9abf549631b4 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -29,6 +29,7 @@
29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) 30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
32#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
32#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 33#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
33#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) 34#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
34#define PCIE_CORE_LINK_TRAINING BIT(5) 35#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -100,7 +101,8 @@
100#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) 101#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
101#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) 102#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
102#define PCIE_ISR1_FLUSH BIT(5) 103#define PCIE_ISR1_FLUSH BIT(5)
103#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) 104#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
105#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
104#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) 106#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
105#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) 107#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
106#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) 108#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
@@ -172,8 +174,6 @@
172#define PCIE_CONFIG_WR_TYPE0 0xa 174#define PCIE_CONFIG_WR_TYPE0 0xa
173#define PCIE_CONFIG_WR_TYPE1 0xb 175#define PCIE_CONFIG_WR_TYPE1 0xb
174 176
175/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
176#define PCIE_BDF(dev) (dev << 4)
177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) 177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) 178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) 179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | 296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | 297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | 298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
299 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; 299 (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
300 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
300 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); 301 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
301 302
302 /* Program PCIe Control 2 to disable strict ordering */ 303 /* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
437 u32 reg; 438 u32 reg;
438 int ret; 439 int ret;
439 440
440 if (PCI_SLOT(devfn) != 0) { 441 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
441 *val = 0xffffffff; 442 *val = 0xffffffff;
442 return PCIBIOS_DEVICE_NOT_FOUND; 443 return PCIBIOS_DEVICE_NOT_FOUND;
443 } 444 }
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
456 advk_writel(pcie, reg, PIO_CTRL); 457 advk_writel(pcie, reg, PIO_CTRL);
457 458
458 /* Program the address registers */ 459 /* Program the address registers */
459 reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); 460 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
460 advk_writel(pcie, reg, PIO_ADDR_LS); 461 advk_writel(pcie, reg, PIO_ADDR_LS);
461 advk_writel(pcie, 0, PIO_ADDR_MS); 462 advk_writel(pcie, 0, PIO_ADDR_MS);
462 463
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
491 int offset; 492 int offset;
492 int ret; 493 int ret;
493 494
494 if (PCI_SLOT(devfn) != 0) 495 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
495 return PCIBIOS_DEVICE_NOT_FOUND; 496 return PCIBIOS_DEVICE_NOT_FOUND;
496 497
497 if (where % size) 498 if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
609 irq_hw_number_t hwirq = irqd_to_hwirq(d); 610 irq_hw_number_t hwirq = irqd_to_hwirq(d);
610 u32 mask; 611 u32 mask;
611 612
612 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 613 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
613 mask |= PCIE_ISR0_INTX_ASSERT(hwirq); 614 mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
614 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 615 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
615} 616}
616 617
617static void advk_pcie_irq_unmask(struct irq_data *d) 618static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
620 irq_hw_number_t hwirq = irqd_to_hwirq(d); 621 irq_hw_number_t hwirq = irqd_to_hwirq(d);
621 u32 mask; 622 u32 mask;
622 623
623 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 624 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
624 mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); 625 mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
625 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 626 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
626} 627}
627 628
628static int advk_pcie_irq_map(struct irq_domain *h, 629static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
765 766
766static void advk_pcie_handle_int(struct advk_pcie *pcie) 767static void advk_pcie_handle_int(struct advk_pcie *pcie)
767{ 768{
768 u32 val, mask, status; 769 u32 isr0_val, isr0_mask, isr0_status;
770 u32 isr1_val, isr1_mask, isr1_status;
769 int i, virq; 771 int i, virq;
770 772
771 val = advk_readl(pcie, PCIE_ISR0_REG); 773 isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
772 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 774 isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
773 status = val & ((~mask) & PCIE_ISR0_ALL_MASK); 775 isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
776
777 isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
778 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
779 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
774 780
775 if (!status) { 781 if (!isr0_status && !isr1_status) {
776 advk_writel(pcie, val, PCIE_ISR0_REG); 782 advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
783 advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
777 return; 784 return;
778 } 785 }
779 786
780 /* Process MSI interrupts */ 787 /* Process MSI interrupts */
781 if (status & PCIE_ISR0_MSI_INT_PENDING) 788 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
782 advk_pcie_handle_msi(pcie); 789 advk_pcie_handle_msi(pcie);
783 790
784 /* Process legacy interrupts */ 791 /* Process legacy interrupts */
785 for (i = 0; i < PCI_NUM_INTX; i++) { 792 for (i = 0; i < PCI_NUM_INTX; i++) {
786 if (!(status & PCIE_ISR0_INTX_ASSERT(i))) 793 if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
787 continue; 794 continue;
788 795
789 advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), 796 advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
790 PCIE_ISR0_REG); 797 PCIE_ISR1_REG);
791 798
792 virq = irq_find_mapping(pcie->irq_domain, i); 799 virq = irq_find_mapping(pcie->irq_domain, i);
793 generic_handle_irq(virq); 800 generic_handle_irq(virq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
958 * devices should not be touched during freeze/thaw transitions, 958 * devices should not be touched during freeze/thaw transitions,
959 * however. 959 * however.
960 */ 960 */
961 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 961 if (!dev_pm_smart_suspend_and_suspended(dev)) {
962 pm_runtime_resume(dev); 962 pm_runtime_resume(dev);
963 pci_dev->state_saved = false;
964 }
963 965
964 pci_dev->state_saved = false;
965 if (pm->freeze) { 966 if (pm->freeze) {
966 int error; 967 int error;
967 968
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e597655a5643..a04197ce767d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5273,11 +5273,11 @@ void pcie_print_link_status(struct pci_dev *dev)
5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); 5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5274 5274
5275 if (bw_avail >= bw_cap) 5275 if (bw_avail >= bw_cap)
5276 pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", 5276 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5277 bw_cap / 1000, bw_cap % 1000, 5277 bw_cap / 1000, bw_cap % 1000,
5278 PCIE_SPEED2STR(speed_cap), width_cap); 5278 PCIE_SPEED2STR(speed_cap), width_cap);
5279 else 5279 else
5280 pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", 5280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5281 bw_avail / 1000, bw_avail % 1000, 5281 bw_avail / 1000, bw_avail % 1000,
5282 PCIE_SPEED2STR(speed), width, 5282 PCIE_SPEED2STR(speed), width,
5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>", 5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
740 tx->callback = dma_xfer_callback; 740 tx->callback = dma_xfer_callback;
741 tx->callback_param = req; 741 tx->callback_param = req;
742 742
743 req->dmach = chan;
744 req->sync = sync;
745 req->status = DMA_IN_PROGRESS; 743 req->status = DMA_IN_PROGRESS;
746 init_completion(&req->req_comp);
747 kref_get(&req->refcount); 744 kref_get(&req->refcount);
748 745
749 cookie = dmaengine_submit(tx); 746 cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
831 if (!req) 828 if (!req)
832 return -ENOMEM; 829 return -ENOMEM;
833 830
834 kref_init(&req->refcount);
835
836 ret = get_dma_channel(priv); 831 ret = get_dma_channel(priv);
837 if (ret) { 832 if (ret) {
838 kfree(req); 833 kfree(req);
839 return ret; 834 return ret;
840 } 835 }
836 chan = priv->dmach;
837
838 kref_init(&req->refcount);
839 init_completion(&req->req_comp);
840 req->dir = dir;
841 req->filp = filp;
842 req->priv = priv;
843 req->dmach = chan;
844 req->sync = sync;
841 845
842 /* 846 /*
843 * If parameter loc_addr != NULL, we are transferring data from/to 847 * If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
925 xfer->offset, xfer->length); 929 xfer->offset, xfer->length);
926 } 930 }
927 931
928 req->dir = dir;
929 req->filp = filp;
930 req->priv = priv;
931 chan = priv->dmach;
932
933 nents = dma_map_sg(chan->device->dev, 932 nents = dma_map_sg(chan->device->dev,
934 req->sgt.sgl, req->sgt.nents, dir); 933 req->sgt.sgl, req->sgt.nents, dir);
935 if (nents == 0) { 934 if (nents == 0) {
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 304e891e35fc..60f2250fd96b 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
57 57
58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) 58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
59{ 59{
60 long rc = OPAL_BUSY; 60 s64 rc = OPAL_BUSY;
61 int retries = 10; 61 int retries = 10;
62 u32 y_m_d; 62 u32 y_m_d;
63 u64 h_m_s_ms; 63 u64 h_m_s_ms;
@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
66 66
67 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 67 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
69 if (rc == OPAL_BUSY_EVENT) 69 if (rc == OPAL_BUSY_EVENT) {
70 msleep(OPAL_BUSY_DELAY_MS);
70 opal_poll_events(NULL); 71 opal_poll_events(NULL);
71 else if (retries-- && (rc == OPAL_HARDWARE 72 } else if (rc == OPAL_BUSY) {
72 || rc == OPAL_INTERNAL_ERROR)) 73 msleep(OPAL_BUSY_DELAY_MS);
73 msleep(10); 74 } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
74 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 75 if (retries--) {
75 break; 76 msleep(10); /* Wait 10ms before retry */
77 rc = OPAL_BUSY; /* go around again */
78 }
79 }
76 } 80 }
77 81
78 if (rc != OPAL_SUCCESS) 82 if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
87 91
88static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) 92static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
89{ 93{
90 long rc = OPAL_BUSY; 94 s64 rc = OPAL_BUSY;
91 int retries = 10; 95 int retries = 10;
92 u32 y_m_d = 0; 96 u32 y_m_d = 0;
93 u64 h_m_s_ms = 0; 97 u64 h_m_s_ms = 0;
94 98
95 tm_to_opal(tm, &y_m_d, &h_m_s_ms); 99 tm_to_opal(tm, &y_m_d, &h_m_s_ms);
100
96 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 101 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
97 rc = opal_rtc_write(y_m_d, h_m_s_ms); 102 rc = opal_rtc_write(y_m_d, h_m_s_ms);
98 if (rc == OPAL_BUSY_EVENT) 103 if (rc == OPAL_BUSY_EVENT) {
104 msleep(OPAL_BUSY_DELAY_MS);
99 opal_poll_events(NULL); 105 opal_poll_events(NULL);
100 else if (retries-- && (rc == OPAL_HARDWARE 106 } else if (rc == OPAL_BUSY) {
101 || rc == OPAL_INTERNAL_ERROR)) 107 msleep(OPAL_BUSY_DELAY_MS);
102 msleep(10); 108 } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
103 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 109 if (retries--) {
104 break; 110 msleep(10); /* Wait 10ms before retry */
111 rc = OPAL_BUSY; /* go around again */
112 }
113 }
105 } 114 }
106 115
107 return rc == OPAL_SUCCESS ? 0 : -EIO; 116 return rc == OPAL_SUCCESS ? 0 : -EIO;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
592int dasd_alias_add_device(struct dasd_device *device) 592int dasd_alias_add_device(struct dasd_device *device)
593{ 593{
594 struct dasd_eckd_private *private = device->private; 594 struct dasd_eckd_private *private = device->private;
595 struct alias_lcu *lcu; 595 __u8 uaddr = private->uid.real_unit_addr;
596 struct alias_lcu *lcu = private->lcu;
596 unsigned long flags; 597 unsigned long flags;
597 int rc; 598 int rc;
598 599
599 lcu = private->lcu;
600 rc = 0; 600 rc = 0;
601 spin_lock_irqsave(&lcu->lock, flags); 601 spin_lock_irqsave(&lcu->lock, flags);
602 /*
603 * Check if device and lcu type differ. If so, the uac data may be
604 * outdated and needs to be updated.
605 */
606 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
607 lcu->flags |= UPDATE_PENDING;
608 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
609 "uid type mismatch - trigger rescan");
610 }
602 if (!(lcu->flags & UPDATE_PENDING)) { 611 if (!(lcu->flags & UPDATE_PENDING)) {
603 rc = _add_device_to_lcu(lcu, device, device); 612 rc = _add_device_to_lcu(lcu, device, device);
604 if (rc) 613 if (rc)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f035c2f25d35..131f1989f6f3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -27,7 +27,6 @@
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/vtoc.h> 29#include <asm/vtoc.h>
30#include <asm/diag.h>
31 30
32#include "dasd_int.h" 31#include "dasd_int.h"
33#include "dasd_diag.h" 32#include "dasd_diag.h"
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5f8d9ea69ebd..eceba3858cef 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
18 * Used to keep track of the size of the event masks. Qemu until version 2.11 18 * Used to keep track of the size of the event masks. Qemu until version 2.11
19 * only supports 4 and needs a workaround. 19 * only supports 4 and needs a workaround.
20 */ 20 */
21bool sclp_mask_compat_mode; 21bool sclp_mask_compat_mode __section(.data);
22 22
23void sclp_early_wait_irq(void) 23void sclp_early_wait_irq(void)
24{ 24{
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
452 452
453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
454{ 454{
455 struct channel_path *chp;
455 struct chp_link link; 456 struct chp_link link;
456 struct chp_id chpid; 457 struct chp_id chpid;
457 int status; 458 int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
464 chpid.id = sei_area->rsid; 465 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */ 466 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid); 467 status = chp_get_status(chpid);
467 if (status < 0) 468 if (!status)
468 chp_new(chpid);
469 else if (!status)
470 return; 469 return;
470
471 if (status < 0) {
472 chp_new(chpid);
473 } else {
474 chp = chpid_to_chp(chpid);
475 mutex_lock(&chp->lock);
476 chp_update_desc(chp);
477 mutex_unlock(&chp->lock);
478 }
471 memset(&link, 0, sizeof(struct chp_link)); 479 memset(&link, 0, sizeof(struct chp_link));
472 link.chpid = chpid; 480 link.chpid = chpid;
473 if ((sei_area->vf & 0xc0) != 0) { 481 if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
20 int ccode; 20 int ccode;
21 __u8 lpm; 21 __u8 lpm;
22 unsigned long flags; 22 unsigned long flags;
23 int ret;
23 24
24 sch = private->sch; 25 sch = private->sch;
25 26
26 spin_lock_irqsave(sch->lock, flags); 27 spin_lock_irqsave(sch->lock, flags);
27 private->state = VFIO_CCW_STATE_BUSY; 28 private->state = VFIO_CCW_STATE_BUSY;
28 spin_unlock_irqrestore(sch->lock, flags);
29 29
30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); 30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 31
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
38 * Initialize device status information 38 * Initialize device status information
39 */ 39 */
40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 return 0; 41 ret = 0;
42 break;
42 case 1: /* Status pending */ 43 case 1: /* Status pending */
43 case 2: /* Busy */ 44 case 2: /* Busy */
44 return -EBUSY; 45 ret = -EBUSY;
46 break;
45 case 3: /* Device/path not operational */ 47 case 3: /* Device/path not operational */
46 { 48 {
47 lpm = orb->cmd.lpm; 49 lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
51 sch->lpm = 0; 53 sch->lpm = 0;
52 54
53 if (cio_update_schib(sch)) 55 if (cio_update_schib(sch))
54 return -ENODEV; 56 ret = -ENODEV;
55 57 else
56 return sch->lpm ? -EACCES : -ENODEV; 58 ret = sch->lpm ? -EACCES : -ENODEV;
59 break;
57 } 60 }
58 default: 61 default:
59 return ccode; 62 ret = ccode;
60 } 63 }
64 spin_unlock_irqrestore(sch->lock, flags);
65 return ret;
61} 66}
62 67
63static void fsm_notoper(struct vfio_ccw_private *private, 68static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..78b98b3e7efa 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -557,7 +557,6 @@ enum qeth_prot_versions {
557enum qeth_cmd_buffer_state { 557enum qeth_cmd_buffer_state {
558 BUF_STATE_FREE, 558 BUF_STATE_FREE,
559 BUF_STATE_LOCKED, 559 BUF_STATE_LOCKED,
560 BUF_STATE_PROCESSED,
561}; 560};
562 561
563enum qeth_cq { 562enum qeth_cq {
@@ -601,7 +600,6 @@ struct qeth_channel {
601 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; 600 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
602 atomic_t irq_pending; 601 atomic_t irq_pending;
603 int io_buf_no; 602 int io_buf_no;
604 int buf_no;
605}; 603};
606 604
607/** 605/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 04fefa5bb08d..dffd820731f2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
706 qeth_put_reply(reply); 706 qeth_put_reply(reply);
707 } 707 }
708 spin_unlock_irqrestore(&card->lock, flags); 708 spin_unlock_irqrestore(&card->lock, flags);
709 atomic_set(&card->write.irq_pending, 0);
710} 709}
711EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 710EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
712 711
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
818 817
819 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 818 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
820 qeth_release_buffer(channel, &channel->iob[cnt]); 819 qeth_release_buffer(channel, &channel->iob[cnt]);
821 channel->buf_no = 0;
822 channel->io_buf_no = 0; 820 channel->io_buf_no = 0;
823} 821}
824EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); 822EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
924 kfree(channel->iob[cnt].data); 922 kfree(channel->iob[cnt].data);
925 return -ENOMEM; 923 return -ENOMEM;
926 } 924 }
927 channel->buf_no = 0;
928 channel->io_buf_no = 0; 925 channel->io_buf_no = 0;
929 atomic_set(&channel->irq_pending, 0); 926 atomic_set(&channel->irq_pending, 0);
930 spin_lock_init(&channel->iob_lock); 927 spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1100{ 1097{
1101 int rc; 1098 int rc;
1102 int cstat, dstat; 1099 int cstat, dstat;
1103 struct qeth_cmd_buffer *buffer; 1100 struct qeth_cmd_buffer *iob = NULL;
1104 struct qeth_channel *channel; 1101 struct qeth_channel *channel;
1105 struct qeth_card *card; 1102 struct qeth_card *card;
1106 struct qeth_cmd_buffer *iob;
1107 __u8 index;
1108
1109 if (__qeth_check_irb_error(cdev, intparm, irb))
1110 return;
1111 cstat = irb->scsw.cmd.cstat;
1112 dstat = irb->scsw.cmd.dstat;
1113 1103
1114 card = CARD_FROM_CDEV(cdev); 1104 card = CARD_FROM_CDEV(cdev);
1115 if (!card) 1105 if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1127 channel = &card->data; 1117 channel = &card->data;
1128 QETH_CARD_TEXT(card, 5, "data"); 1118 QETH_CARD_TEXT(card, 5, "data");
1129 } 1119 }
1120
1121 if (qeth_intparm_is_iob(intparm))
1122 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1123
1124 if (__qeth_check_irb_error(cdev, intparm, irb)) {
1125 /* IO was terminated, free its resources. */
1126 if (iob)
1127 qeth_release_buffer(iob->channel, iob);
1128 atomic_set(&channel->irq_pending, 0);
1129 wake_up(&card->wait_q);
1130 return;
1131 }
1132
1130 atomic_set(&channel->irq_pending, 0); 1133 atomic_set(&channel->irq_pending, 0);
1131 1134
1132 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1135 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1150 /* we don't have to handle this further */ 1153 /* we don't have to handle this further */
1151 intparm = 0; 1154 intparm = 0;
1152 } 1155 }
1156
1157 cstat = irb->scsw.cmd.cstat;
1158 dstat = irb->scsw.cmd.dstat;
1159
1153 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1160 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1154 (dstat & DEV_STAT_UNIT_CHECK) || 1161 (dstat & DEV_STAT_UNIT_CHECK) ||
1155 (cstat)) { 1162 (cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1182 channel->state = CH_STATE_RCD_DONE; 1189 channel->state = CH_STATE_RCD_DONE;
1183 goto out; 1190 goto out;
1184 } 1191 }
1185 if (intparm) {
1186 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1187 buffer->state = BUF_STATE_PROCESSED;
1188 }
1189 if (channel == &card->data) 1192 if (channel == &card->data)
1190 return; 1193 return;
1191 if (channel == &card->read && 1194 if (channel == &card->read &&
1192 channel->state == CH_STATE_UP) 1195 channel->state == CH_STATE_UP)
1193 __qeth_issue_next_read(card); 1196 __qeth_issue_next_read(card);
1194 1197
1195 iob = channel->iob; 1198 if (iob && iob->callback)
1196 index = channel->buf_no; 1199 iob->callback(iob->channel, iob);
1197 while (iob[index].state == BUF_STATE_PROCESSED) {
1198 if (iob[index].callback != NULL)
1199 iob[index].callback(channel, iob + index);
1200 1200
1201 index = (index + 1) % QETH_CMD_BUFFER_NO;
1202 }
1203 channel->buf_no = index;
1204out: 1201out:
1205 wake_up(&card->wait_q); 1202 wake_up(&card->wait_q);
1206 return; 1203 return;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1870 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1867 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1871 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1868 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1872 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1869 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1873 rc = ccw_device_start(channel->ccwdev, 1870 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1874 &channel->ccw, (addr_t) iob, 0, 0); 1871 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1875 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1872 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1876 1873
1877 if (rc) { 1874 if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1888 if (channel->state != CH_STATE_UP) { 1885 if (channel->state != CH_STATE_UP) {
1889 rc = -ETIME; 1886 rc = -ETIME;
1890 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 1887 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1891 qeth_clear_cmd_buffers(channel);
1892 } else 1888 } else
1893 rc = 0; 1889 rc = 0;
1894 return rc; 1890 return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1942 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1938 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1943 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1939 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1944 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1940 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1945 rc = ccw_device_start(channel->ccwdev, 1941 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1946 &channel->ccw, (addr_t) iob, 0, 0); 1942 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1947 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1943 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1948 1944
1949 if (rc) { 1945 if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1964 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", 1960 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1965 dev_name(&channel->ccwdev->dev)); 1961 dev_name(&channel->ccwdev->dev));
1966 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); 1962 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1967 qeth_clear_cmd_buffers(channel);
1968 return -ETIME; 1963 return -ETIME;
1969 } 1964 }
1970 return qeth_idx_activate_get_answer(channel, idx_reply_cb); 1965 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2166 2161
2167 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2162 QETH_CARD_TEXT(card, 6, "noirqpnd");
2168 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 2163 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
2169 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 2164 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
2170 (addr_t) iob, 0, 0); 2165 (addr_t) iob, 0, 0, event_timeout);
2171 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 2166 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
2172 if (rc) { 2167 if (rc) {
2173 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " 2168 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2199 } 2194 }
2200 } 2195 }
2201 2196
2202 if (reply->rc == -EIO)
2203 goto error;
2204 rc = reply->rc; 2197 rc = reply->rc;
2205 qeth_put_reply(reply); 2198 qeth_put_reply(reply);
2206 return rc; 2199 return rc;
@@ -2211,10 +2204,6 @@ time_err:
2211 list_del_init(&reply->list); 2204 list_del_init(&reply->list);
2212 spin_unlock_irqrestore(&reply->card->lock, flags); 2205 spin_unlock_irqrestore(&reply->card->lock, flags);
2213 atomic_inc(&reply->received); 2206 atomic_inc(&reply->received);
2214error:
2215 atomic_set(&card->write.irq_pending, 0);
2216 qeth_release_buffer(iob->channel, iob);
2217 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
2218 rc = reply->rc; 2207 rc = reply->rc;
2219 qeth_put_reply(reply); 2208 qeth_put_reply(reply);
2220 return rc; 2209 return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
3033 return rc; 3022 return rc;
3034} 3023}
3035 3024
3036static int qeth_default_setadapterparms_cb(struct qeth_card *card, 3025static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3037 struct qeth_reply *reply, unsigned long data)
3038{ 3026{
3039 struct qeth_ipa_cmd *cmd; 3027 if (!cmd->hdr.return_code)
3040
3041 QETH_CARD_TEXT(card, 4, "defadpcb");
3042
3043 cmd = (struct qeth_ipa_cmd *) data;
3044 if (cmd->hdr.return_code == 0)
3045 cmd->hdr.return_code = 3028 cmd->hdr.return_code =
3046 cmd->data.setadapterparms.hdr.return_code; 3029 cmd->data.setadapterparms.hdr.return_code;
3047 return 0; 3030 return cmd->hdr.return_code;
3048} 3031}
3049 3032
3050static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3033static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3051 struct qeth_reply *reply, unsigned long data) 3034 struct qeth_reply *reply, unsigned long data)
3052{ 3035{
3053 struct qeth_ipa_cmd *cmd; 3036 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3054 3037
3055 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3038 QETH_CARD_TEXT(card, 3, "quyadpcb");
3039 if (qeth_setadpparms_inspect_rc(cmd))
3040 return 0;
3056 3041
3057 cmd = (struct qeth_ipa_cmd *) data;
3058 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 3042 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
3059 card->info.link_type = 3043 card->info.link_type =
3060 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 3044 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3062 } 3046 }
3063 card->options.adp.supported_funcs = 3047 card->options.adp.supported_funcs =
3064 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 3048 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
3065 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); 3049 return 0;
3066} 3050}
3067 3051
3068static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3052static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
3154static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3138static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3155 struct qeth_reply *reply, unsigned long data) 3139 struct qeth_reply *reply, unsigned long data)
3156{ 3140{
3157 struct qeth_ipa_cmd *cmd; 3141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3158 struct qeth_switch_info *sw_info;
3159 struct qeth_query_switch_attributes *attrs; 3142 struct qeth_query_switch_attributes *attrs;
3143 struct qeth_switch_info *sw_info;
3160 3144
3161 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3145 QETH_CARD_TEXT(card, 2, "qswiatcb");
3162 cmd = (struct qeth_ipa_cmd *) data; 3146 if (qeth_setadpparms_inspect_rc(cmd))
3163 sw_info = (struct qeth_switch_info *)reply->param; 3147 return 0;
3164 if (cmd->data.setadapterparms.hdr.return_code == 0) {
3165 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3166 sw_info->capabilities = attrs->capabilities;
3167 sw_info->settings = attrs->settings;
3168 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3169 sw_info->settings);
3170 }
3171 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3172 3148
3149 sw_info = (struct qeth_switch_info *)reply->param;
3150 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3151 sw_info->capabilities = attrs->capabilities;
3152 sw_info->settings = attrs->settings;
3153 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3154 sw_info->settings);
3173 return 0; 3155 return 0;
3174} 3156}
3175 3157
@@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4207static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4189static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4208 struct qeth_reply *reply, unsigned long data) 4190 struct qeth_reply *reply, unsigned long data)
4209{ 4191{
4210 struct qeth_ipa_cmd *cmd; 4192 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4211 struct qeth_ipacmd_setadpparms *setparms; 4193 struct qeth_ipacmd_setadpparms *setparms;
4212 4194
4213 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4195 QETH_CARD_TEXT(card, 4, "prmadpcb");
4214 4196
4215 cmd = (struct qeth_ipa_cmd *) data;
4216 setparms = &(cmd->data.setadapterparms); 4197 setparms = &(cmd->data.setadapterparms);
4217 4198 if (qeth_setadpparms_inspect_rc(cmd)) {
4218 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
4219 if (cmd->hdr.return_code) {
4220 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4199 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4221 setparms->data.mode = SET_PROMISC_MODE_OFF; 4200 setparms->data.mode = SET_PROMISC_MODE_OFF;
4222 } 4201 }
@@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
4286static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4265static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4287 struct qeth_reply *reply, unsigned long data) 4266 struct qeth_reply *reply, unsigned long data)
4288{ 4267{
4289 struct qeth_ipa_cmd *cmd; 4268 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4290 4269
4291 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4270 QETH_CARD_TEXT(card, 4, "chgmaccb");
4271 if (qeth_setadpparms_inspect_rc(cmd))
4272 return 0;
4292 4273
4293 cmd = (struct qeth_ipa_cmd *) data;
4294 if (!card->options.layer2 || 4274 if (!card->options.layer2 ||
4295 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { 4275 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
4296 ether_addr_copy(card->dev->dev_addr, 4276 ether_addr_copy(card->dev->dev_addr,
4297 cmd->data.setadapterparms.data.change_addr.addr); 4277 cmd->data.setadapterparms.data.change_addr.addr);
4298 card->info.mac_bits |= QETH_LAYER2_MAC_READ; 4278 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
4299 } 4279 }
4300 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4301 return 0; 4280 return 0;
4302} 4281}
4303 4282
@@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4328static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4307static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4329 struct qeth_reply *reply, unsigned long data) 4308 struct qeth_reply *reply, unsigned long data)
4330{ 4309{
4331 struct qeth_ipa_cmd *cmd; 4310 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4332 struct qeth_set_access_ctrl *access_ctrl_req; 4311 struct qeth_set_access_ctrl *access_ctrl_req;
4333 int fallback = *(int *)reply->param; 4312 int fallback = *(int *)reply->param;
4334 4313
4335 QETH_CARD_TEXT(card, 4, "setaccb"); 4314 QETH_CARD_TEXT(card, 4, "setaccb");
4315 if (cmd->hdr.return_code)
4316 return 0;
4317 qeth_setadpparms_inspect_rc(cmd);
4336 4318
4337 cmd = (struct qeth_ipa_cmd *) data;
4338 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4319 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4339 QETH_DBF_TEXT_(SETUP, 2, "setaccb"); 4320 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4340 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4321 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4407 card->options.isolation = card->options.prev_isolation; 4388 card->options.isolation = card->options.prev_isolation;
4408 break; 4389 break;
4409 } 4390 }
4410 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4411 return 0; 4391 return 0;
4412} 4392}
4413 4393
@@ -4695,14 +4675,15 @@ out:
4695static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4675static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4696 struct qeth_reply *reply, unsigned long data) 4676 struct qeth_reply *reply, unsigned long data)
4697{ 4677{
4698 struct qeth_ipa_cmd *cmd; 4678 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4699 struct qeth_qoat_priv *priv; 4679 struct qeth_qoat_priv *priv;
4700 char *resdata; 4680 char *resdata;
4701 int resdatalen; 4681 int resdatalen;
4702 4682
4703 QETH_CARD_TEXT(card, 3, "qoatcb"); 4683 QETH_CARD_TEXT(card, 3, "qoatcb");
4684 if (qeth_setadpparms_inspect_rc(cmd))
4685 return 0;
4704 4686
4705 cmd = (struct qeth_ipa_cmd *)data;
4706 priv = (struct qeth_qoat_priv *)reply->param; 4687 priv = (struct qeth_qoat_priv *)reply->param;
4707 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4688 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4708 resdata = (char *)data + 28; 4689 resdata = (char *)data + 28;
@@ -4796,21 +4777,18 @@ out:
4796static int qeth_query_card_info_cb(struct qeth_card *card, 4777static int qeth_query_card_info_cb(struct qeth_card *card,
4797 struct qeth_reply *reply, unsigned long data) 4778 struct qeth_reply *reply, unsigned long data)
4798{ 4779{
4799 struct qeth_ipa_cmd *cmd; 4780 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4781 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4800 struct qeth_query_card_info *card_info; 4782 struct qeth_query_card_info *card_info;
4801 struct carrier_info *carrier_info;
4802 4783
4803 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4784 QETH_CARD_TEXT(card, 2, "qcrdincb");
4804 carrier_info = (struct carrier_info *)reply->param; 4785 if (qeth_setadpparms_inspect_rc(cmd))
4805 cmd = (struct qeth_ipa_cmd *)data; 4786 return 0;
4806 card_info = &cmd->data.setadapterparms.data.card_info;
4807 if (cmd->data.setadapterparms.hdr.return_code == 0) {
4808 carrier_info->card_type = card_info->card_type;
4809 carrier_info->port_mode = card_info->port_mode;
4810 carrier_info->port_speed = card_info->port_speed;
4811 }
4812 4787
4813 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); 4788 card_info = &cmd->data.setadapterparms.data.card_info;
4789 carrier_info->card_type = card_info->card_type;
4790 carrier_info->port_mode = card_info->port_mode;
4791 carrier_info->port_speed = card_info->port_speed;
4814 return 0; 4792 return 0;
4815} 4793}
4816 4794
@@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4857 goto out; 4835 goto out;
4858 } 4836 }
4859 4837
4860 ccw_device_get_id(CARD_DDEV(card), &id); 4838 ccw_device_get_id(CARD_RDEV(card), &id);
4861 request->resp_buf_len = sizeof(*response); 4839 request->resp_buf_len = sizeof(*response);
4862 request->resp_version = DIAG26C_VERSION2; 4840 request->resp_version = DIAG26C_VERSION2;
4863 request->op_code = DIAG26C_GET_MAC; 4841 request->op_code = DIAG26C_GET_MAC;
@@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
6563 mutex_init(&qeth_mod_mutex); 6541 mutex_init(&qeth_mod_mutex);
6564 6542
6565 qeth_wq = create_singlethread_workqueue("qeth_wq"); 6543 qeth_wq = create_singlethread_workqueue("qeth_wq");
6544 if (!qeth_wq) {
6545 rc = -ENOMEM;
6546 goto out_err;
6547 }
6566 6548
6567 rc = qeth_register_dbf_views(); 6549 rc = qeth_register_dbf_views();
6568 if (rc) 6550 if (rc)
6569 goto out_err; 6551 goto dbf_err;
6570 qeth_core_root_dev = root_device_register("qeth"); 6552 qeth_core_root_dev = root_device_register("qeth");
6571 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6553 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6572 if (rc) 6554 if (rc)
@@ -6603,6 +6585,8 @@ slab_err:
6603 root_device_unregister(qeth_core_root_dev); 6585 root_device_unregister(qeth_core_root_dev);
6604register_err: 6586register_err:
6605 qeth_unregister_dbf_views(); 6587 qeth_unregister_dbf_views();
6588dbf_err:
6589 destroy_workqueue(qeth_wq);
6606out_err: 6590out_err:
6607 pr_err("Initializing the qeth device driver failed\n"); 6591 pr_err("Initializing the qeth device driver failed\n");
6608 return rc; 6592 return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..f4d1ec0b8f5a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
35#define QETH_HALT_CHANNEL_PARM -11 35#define QETH_HALT_CHANNEL_PARM -11
36#define QETH_RCD_PARM -12 36#define QETH_RCD_PARM -12
37 37
38static inline bool qeth_intparm_is_iob(unsigned long intparm)
39{
40 switch (intparm) {
41 case QETH_CLEAR_CHANNEL_PARM:
42 case QETH_HALT_CHANNEL_PARM:
43 case QETH_RCD_PARM:
44 case 0:
45 return false;
46 }
47 return true;
48}
49
38/*****************************************************************************/ 50/*****************************************************************************/
39/* IP Assist related definitions */ 51/* IP Assist related definitions */
40/*****************************************************************************/ 52/*****************************************************************************/
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 50a313806dde..b8079f2a65b3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,7 +21,6 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/hashtable.h> 23#include <linux/hashtable.h>
24#include <linux/string.h>
25#include <asm/setup.h> 24#include <asm/setup.h>
26#include "qeth_core.h" 25#include "qeth_core.h"
27#include "qeth_l2.h" 26#include "qeth_l2.h"
@@ -122,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
122 QETH_CARD_TEXT(card, 2, "L2Setmac"); 121 QETH_CARD_TEXT(card, 2, "L2Setmac");
123 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); 122 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
124 if (rc == 0) { 123 if (rc == 0) {
125 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
126 ether_addr_copy(card->dev->dev_addr, mac);
127 dev_info(&card->gdev->dev, 124 dev_info(&card->gdev->dev,
128 "MAC address %pM successfully registered on device %s\n", 125 "MAC address %pM successfully registered on device %s\n",
129 card->dev->dev_addr, card->dev->name); 126 mac, card->dev->name);
130 } else { 127 } else {
131 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
132 switch (rc) { 128 switch (rc) {
133 case -EEXIST: 129 case -EEXIST:
134 dev_warn(&card->gdev->dev, 130 dev_warn(&card->gdev->dev,
@@ -143,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
143 return rc; 139 return rc;
144} 140}
145 141
146static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
147{
148 int rc;
149
150 QETH_CARD_TEXT(card, 2, "L2Delmac");
151 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
152 return 0;
153 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
154 if (rc == 0)
155 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
156 return rc;
157}
158
159static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 142static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
160{ 143{
161 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 144 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -520,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
520{ 503{
521 struct sockaddr *addr = p; 504 struct sockaddr *addr = p;
522 struct qeth_card *card = dev->ml_priv; 505 struct qeth_card *card = dev->ml_priv;
506 u8 old_addr[ETH_ALEN];
523 int rc = 0; 507 int rc = 0;
524 508
525 QETH_CARD_TEXT(card, 3, "setmac"); 509 QETH_CARD_TEXT(card, 3, "setmac");
@@ -531,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
531 return -EOPNOTSUPP; 515 return -EOPNOTSUPP;
532 } 516 }
533 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); 517 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
518 if (!is_valid_ether_addr(addr->sa_data))
519 return -EADDRNOTAVAIL;
520
534 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 521 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
535 QETH_CARD_TEXT(card, 3, "setmcREC"); 522 QETH_CARD_TEXT(card, 3, "setmcREC");
536 return -ERESTARTSYS; 523 return -ERESTARTSYS;
537 } 524 }
538 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 525
539 if (!rc || (rc == -ENOENT)) 526 if (!qeth_card_hw_is_reachable(card)) {
540 rc = qeth_l2_send_setmac(card, addr->sa_data); 527 ether_addr_copy(dev->dev_addr, addr->sa_data);
541 return rc ? -EINVAL : 0; 528 return 0;
529 }
530
531 /* don't register the same address twice */
532 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
533 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
534 return 0;
535
536 /* add the new address, switch over, drop the old */
537 rc = qeth_l2_send_setmac(card, addr->sa_data);
538 if (rc)
539 return rc;
540 ether_addr_copy(old_addr, dev->dev_addr);
541 ether_addr_copy(dev->dev_addr, addr->sa_data);
542
543 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
544 qeth_l2_remove_mac(card, old_addr);
545 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
546 return 0;
542} 547}
543 548
544static void qeth_promisc_to_bridge(struct qeth_card *card) 549static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1068,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1068 goto out_remove; 1073 goto out_remove;
1069 } 1074 }
1070 1075
1071 if (card->info.type != QETH_CARD_TYPE_OSN) 1076 if (card->info.type != QETH_CARD_TYPE_OSN &&
1072 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 1077 !qeth_l2_send_setmac(card, card->dev->dev_addr))
1078 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
1073 1079
1074 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { 1080 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
1075 if (card->info.hwtrap && 1081 if (card->info.hwtrap &&
@@ -1339,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1339 qeth_prepare_control_data(card, len, iob); 1345 qeth_prepare_control_data(card, len, iob);
1340 QETH_CARD_TEXT(card, 6, "osnoirqp"); 1346 QETH_CARD_TEXT(card, 6, "osnoirqp");
1341 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1347 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1342 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 1348 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
1343 (addr_t) iob, 0, 0); 1349 (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
1344 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 1350 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1345 if (rc) { 1351 if (rc) {
1346 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " 1352 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3b0c8b8a7634..066b5c3aaae6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
176 176
177static void __exit smsg_exit(void) 177static void __exit smsg_exit(void)
178{ 178{
179 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 179 cpcmd("SET SMSG OFF", NULL, 0, NULL);
180 device_unregister(smsg_dev); 180 device_unregister(smsg_dev);
181 iucv_unregister(&smsg_handler, 1); 181 iucv_unregister(&smsg_handler, 1);
182 driver_unregister(&smsg_driver); 182 driver_unregister(&smsg_driver);
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index c44d7c7ffc92..1754f55e2fac 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This program is free software: you can redistribute it and/or modify 4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 3 of the License, or 6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 884419c37e84..457ea1f8db30 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
183 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7 183 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
184 }; 184 };
185 185
186 clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); 186 code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
187 187
188 return sizetocode[code - 1]; 188 return sizetocode[code - 1];
189} 189}
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index fe96a8b956fb..f7ed1187518b 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -45,7 +45,7 @@ struct rpi_power_domains {
45struct rpi_power_domain_packet { 45struct rpi_power_domain_packet {
46 u32 domain; 46 u32 domain;
47 u32 on; 47 u32 on;
48} __packet; 48};
49 49
50/* 50/*
51 * Asks the firmware to enable or disable power on a specific power 51 * Asks the firmware to enable or disable power on a specific power
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6b5300ca44a6..885f5fcead77 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
1390 } 1390 }
1391 1391
1392 if (hif_drv->usr_conn_req.ies) { 1392 if (hif_drv->usr_conn_req.ies) {
1393 conn_info.req_ies = kmemdup(conn_info.req_ies, 1393 conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
1394 hif_drv->usr_conn_req.ies_len, 1394 hif_drv->usr_conn_req.ies_len,
1395 GFP_KERNEL); 1395 GFP_KERNEL);
1396 if (conn_info.req_ies) 1396 if (conn_info.req_ies)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 3b3e1f6632d7..1dbe27c9946c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -121,6 +121,9 @@ struct gsm_dlci {
121 struct mutex mutex; 121 struct mutex mutex;
122 122
123 /* Link layer */ 123 /* Link layer */
124 int mode;
125#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
126#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
124 spinlock_t lock; /* Protects the internal state */ 127 spinlock_t lock; /* Protects the internal state */
125 struct timer_list t1; /* Retransmit timer for SABM and UA */ 128 struct timer_list t1; /* Retransmit timer for SABM and UA */
126 int retries; 129 int retries;
@@ -1364,7 +1367,13 @@ retry:
1364 ctrl->data = data; 1367 ctrl->data = data;
1365 ctrl->len = clen; 1368 ctrl->len = clen;
1366 gsm->pending_cmd = ctrl; 1369 gsm->pending_cmd = ctrl;
1367 gsm->cretries = gsm->n2; 1370
1371 /* If DLCI0 is in ADM mode skip retries, it won't respond */
1372 if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
1373 gsm->cretries = 1;
1374 else
1375 gsm->cretries = gsm->n2;
1376
1368 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); 1377 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
1369 gsm_control_transmit(gsm, ctrl); 1378 gsm_control_transmit(gsm, ctrl);
1370 spin_unlock_irqrestore(&gsm->control_lock, flags); 1379 spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
1472 if (debug & 8) 1481 if (debug & 8)
1473 pr_info("DLCI %d opening in ADM mode.\n", 1482 pr_info("DLCI %d opening in ADM mode.\n",
1474 dlci->addr); 1483 dlci->addr);
1484 dlci->mode = DLCI_MODE_ADM;
1475 gsm_dlci_open(dlci); 1485 gsm_dlci_open(dlci);
1476 } else { 1486 } else {
1477 gsm_dlci_close(dlci); 1487 gsm_dlci_close(dlci);
@@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
2861static int gsm_carrier_raised(struct tty_port *port) 2871static int gsm_carrier_raised(struct tty_port *port)
2862{ 2872{
2863 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); 2873 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
2874 struct gsm_mux *gsm = dlci->gsm;
2875
2864 /* Not yet open so no carrier info */ 2876 /* Not yet open so no carrier info */
2865 if (dlci->state != DLCI_OPEN) 2877 if (dlci->state != DLCI_OPEN)
2866 return 0; 2878 return 0;
2867 if (debug & 2) 2879 if (debug & 2)
2868 return 1; 2880 return 1;
2881
2882 /*
2883 * Basic mode with control channel in ADM mode may not respond
2884 * to CMD_MSC at all and modem_rx is empty.
2885 */
2886 if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
2887 !dlci->modem_rx)
2888 return 1;
2889
2869 return dlci->modem_rx & TIOCM_CD; 2890 return dlci->modem_rx & TIOCM_CD;
2870} 2891}
2871 2892
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index a24278380fec..22683393a0f2 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
169 */ 169 */
170int __init setup_earlycon(char *buf) 170int __init setup_earlycon(char *buf)
171{ 171{
172 const struct earlycon_id *match; 172 const struct earlycon_id **p_match;
173 173
174 if (!buf || !buf[0]) 174 if (!buf || !buf[0])
175 return -EINVAL; 175 return -EINVAL;
@@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
177 if (early_con.flags & CON_ENABLED) 177 if (early_con.flags & CON_ENABLED)
178 return -EALREADY; 178 return -EALREADY;
179 179
180 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 180 for (p_match = __earlycon_table; p_match < __earlycon_table_end;
181 p_match++) {
182 const struct earlycon_id *match = *p_match;
181 size_t len = strlen(match->name); 183 size_t len = strlen(match->name);
182 184
183 if (strncmp(buf, match->name, len)) 185 if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 91f3a1a5cb7f..c2fc6bef7a6f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
316 * differ from the value that was last written. As it only 316 * differ from the value that was last written. As it only
317 * clears after being set, reread conditionally. 317 * clears after being set, reread conditionally.
318 */ 318 */
319 if (sport->ucr2 & UCR2_SRST) 319 if (!(sport->ucr2 & UCR2_SRST))
320 sport->ucr2 = readl(sport->port.membase + offset); 320 sport->ucr2 = readl(sport->port.membase + offset);
321 return sport->ucr2; 321 return sport->ucr2;
322 break; 322 break;
@@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port,
1833 rs485conf->flags &= ~SER_RS485_ENABLED; 1833 rs485conf->flags &= ~SER_RS485_ENABLED;
1834 1834
1835 if (rs485conf->flags & SER_RS485_ENABLED) { 1835 if (rs485conf->flags & SER_RS485_ENABLED) {
1836 /* Enable receiver if low-active RTS signal is requested */
1837 if (sport->have_rtscts && !sport->have_rtsgpio &&
1838 !(rs485conf->flags & SER_RS485_RTS_ON_SEND))
1839 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1840
1836 /* disable transmitter */ 1841 /* disable transmitter */
1837 ucr2 = imx_uart_readl(sport, UCR2); 1842 ucr2 = imx_uart_readl(sport, UCR2);
1838 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 1843 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
@@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev)
2265 (!sport->have_rtscts && !sport->have_rtsgpio)) 2270 (!sport->have_rtscts && !sport->have_rtsgpio))
2266 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2271 dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2267 2272
2273 /*
2274 * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
2275 * signal cannot be set low during transmission in case the
2276 * receiver is off (limitation of the i.MX UART IP).
2277 */
2278 if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2279 sport->have_rtscts && !sport->have_rtsgpio &&
2280 (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
2281 !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
2282 dev_err(&pdev->dev,
2283 "low-active RTS not possible when receiver is off, enabling receiver\n");
2284
2268 imx_uart_rs485_config(&sport->port, &sport->port.rs485); 2285 imx_uart_rs485_config(&sport->port, &sport->port.rs485);
2269 2286
2270 /* Disable interrupts before requesting them */ 2287 /* Disable interrupts before requesting them */
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 750e5645dc85..f503fab1e268 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
495 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 495 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
496 termios->c_cflag &= CREAD | CBAUD; 496 termios->c_cflag &= CREAD | CBAUD;
497 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 497 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
498 termios->c_lflag = old->c_lflag;
499 } 498 }
500 499
501 spin_unlock_irqrestore(&port->lock, flags); 500 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 65ff669373d4..a1b3eb04cb32 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
1022 struct qcom_geni_serial_port *port; 1022 struct qcom_geni_serial_port *port;
1023 struct uart_port *uport; 1023 struct uart_port *uport;
1024 struct resource *res; 1024 struct resource *res;
1025 int irq;
1025 1026
1026 if (pdev->dev.of_node) 1027 if (pdev->dev.of_node)
1027 line = of_alias_get_id(pdev->dev.of_node, "serial"); 1028 line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
1061 port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; 1062 port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
1062 port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; 1063 port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
1063 1064
1064 uport->irq = platform_get_irq(pdev, 0); 1065 irq = platform_get_irq(pdev, 0);
1065 if (uport->irq < 0) { 1066 if (irq < 0) {
1066 dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq); 1067 dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
1067 return uport->irq; 1068 return irq;
1068 } 1069 }
1070 uport->irq = irq;
1069 1071
1070 uport->private_data = &qcom_geni_console_driver; 1072 uport->private_data = &qcom_geni_console_driver;
1071 platform_set_drvdata(pdev, port); 1073 platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index abcb4d09a2d8..bd72dd843338 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
1181 /* only set baud if specified on command line - otherwise 1181 /* only set baud if specified on command line - otherwise
1182 * assume it has been initialized by a boot loader. 1182 * assume it has been initialized by a boot loader.
1183 */ 1183 */
1184 if (device->baud) { 1184 if (port->uartclk && device->baud) {
1185 u32 cd = 0, bdiv = 0; 1185 u32 cd = 0, bdiv = 0;
1186 u32 mr; 1186 u32 mr;
1187 int div8; 1187 int div8;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 63114ea35ec1..7c838b90a31d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
2816 2816
2817 kref_init(&tty->kref); 2817 kref_init(&tty->kref);
2818 tty->magic = TTY_MAGIC; 2818 tty->magic = TTY_MAGIC;
2819 tty_ldisc_init(tty); 2819 if (tty_ldisc_init(tty)) {
2820 kfree(tty);
2821 return NULL;
2822 }
2820 tty->session = NULL; 2823 tty->session = NULL;
2821 tty->pgrp = NULL; 2824 tty->pgrp = NULL;
2822 mutex_init(&tty->legacy_mutex); 2825 mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 050f4d650891..fb7329ab2b37 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
176 return ERR_CAST(ldops); 176 return ERR_CAST(ldops);
177 } 177 }
178 178
179 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); 179 /*
180 if (ld == NULL) { 180 * There is no way to handle allocation failure of only 16 bytes.
181 put_ldops(ldops); 181 * Let's simplify error handling and save more memory.
182 return ERR_PTR(-ENOMEM); 182 */
183 } 183 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
184
185 ld->ops = ldops; 184 ld->ops = ldops;
186 ld->tty = tty; 185 ld->tty = tty;
187 186
@@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
527static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) 526static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
528{ 527{
529 /* There is an outstanding reference here so this is safe */ 528 /* There is an outstanding reference here so this is safe */
530 old = tty_ldisc_get(tty, old->ops->num); 529 if (tty_ldisc_failto(tty, old->ops->num) < 0) {
531 WARN_ON(IS_ERR(old)); 530 const char *name = tty_name(tty);
532 tty->ldisc = old; 531
533 tty_set_termios_ldisc(tty, old->ops->num); 532 pr_warn("Falling back ldisc for %s.\n", name);
534 if (tty_ldisc_open(tty, old) < 0) {
535 tty_ldisc_put(old);
536 /* The traditional behaviour is to fall back to N_TTY, we 533 /* The traditional behaviour is to fall back to N_TTY, we
537 want to avoid falling back to N_NULL unless we have no 534 want to avoid falling back to N_NULL unless we have no
538 choice to avoid the risk of breaking anything */ 535 choice to avoid the risk of breaking anything */
539 if (tty_ldisc_failto(tty, N_TTY) < 0 && 536 if (tty_ldisc_failto(tty, N_TTY) < 0 &&
540 tty_ldisc_failto(tty, N_NULL) < 0) 537 tty_ldisc_failto(tty, N_NULL) < 0)
541 panic("Couldn't open N_NULL ldisc for %s.", 538 panic("Couldn't open N_NULL ldisc for %s.", name);
542 tty_name(tty));
543 } 539 }
544} 540}
545 541
@@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
824 * the tty structure is not completely set up when this call is made. 820 * the tty structure is not completely set up when this call is made.
825 */ 821 */
826 822
827void tty_ldisc_init(struct tty_struct *tty) 823int tty_ldisc_init(struct tty_struct *tty)
828{ 824{
829 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); 825 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
830 if (IS_ERR(ld)) 826 if (IS_ERR(ld))
831 panic("n_tty: init_tty"); 827 return PTR_ERR(ld);
832 tty->ldisc = ld; 828 tty->ldisc = ld;
829 return 0;
833} 830}
834 831
835/** 832/**
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index f695a7e8c314..c690d100adcd 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -19,7 +19,7 @@
19 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ 19 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
20 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind 20 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
21 */ 21 */
22 22#define DEBUG 1
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 24
25#include <linux/device.h> 25#include <linux/device.h>
@@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
94 */ 94 */
95static void hv_uio_channel_cb(void *context) 95static void hv_uio_channel_cb(void *context)
96{ 96{
97 struct hv_uio_private_data *pdata = context; 97 struct vmbus_channel *chan = context;
98 struct hv_device *dev = pdata->device; 98 struct hv_device *hv_dev = chan->device_obj;
99 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
99 100
100 dev->channel->inbound.ring_buffer->interrupt_mask = 1; 101 chan->inbound.ring_buffer->interrupt_mask = 1;
101 virt_mb(); 102 virt_mb();
102 103
103 uio_event_notify(&pdata->info); 104 uio_event_notify(&pdata->info);
@@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
121 uio_event_notify(&pdata->info); 122 uio_event_notify(&pdata->info);
122} 123}
123 124
124/* 125/* Sysfs API to allow mmap of the ring buffers
125 * Handle fault when looking for sub channel ring buffer 126 * The ring buffer is allocated as contiguous memory by vmbus_open
126 * Subchannel ring buffer is same as resource 0 which is main ring buffer
127 * This is derived from uio_vma_fault
128 */ 127 */
129static int hv_uio_vma_fault(struct vm_fault *vmf)
130{
131 struct vm_area_struct *vma = vmf->vma;
132 void *ring_buffer = vma->vm_private_data;
133 struct page *page;
134 void *addr;
135
136 addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
137 page = virt_to_page(addr);
138 get_page(page);
139 vmf->page = page;
140 return 0;
141}
142
143static const struct vm_operations_struct hv_uio_vm_ops = {
144 .fault = hv_uio_vma_fault,
145};
146
147/* Sysfs API to allow mmap of the ring buffers */
148static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, 128static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
149 struct bin_attribute *attr, 129 struct bin_attribute *attr,
150 struct vm_area_struct *vma) 130 struct vm_area_struct *vma)
151{ 131{
152 struct vmbus_channel *channel 132 struct vmbus_channel *channel
153 = container_of(kobj, struct vmbus_channel, kobj); 133 = container_of(kobj, struct vmbus_channel, kobj);
154 unsigned long requested_pages, actual_pages; 134 struct hv_device *dev = channel->primary_channel->device_obj;
155 135 u16 q_idx = channel->offermsg.offer.sub_channel_index;
156 if (vma->vm_end < vma->vm_start)
157 return -EINVAL;
158
159 /* only allow 0 for now */
160 if (vma->vm_pgoff > 0)
161 return -EINVAL;
162 136
163 requested_pages = vma_pages(vma); 137 dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
164 actual_pages = 2 * HV_RING_SIZE; 138 q_idx, vma_pages(vma), vma->vm_pgoff);
165 if (requested_pages > actual_pages)
166 return -EINVAL;
167 139
168 vma->vm_private_data = channel->ringbuffer_pages; 140 return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
169 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 141 channel->ringbuffer_pagecount << PAGE_SHIFT);
170 vma->vm_ops = &hv_uio_vm_ops;
171 return 0;
172} 142}
173 143
174static struct bin_attribute ring_buffer_bin_attr __ro_after_init = { 144static const struct bin_attribute ring_buffer_bin_attr = {
175 .attr = { 145 .attr = {
176 .name = "ring", 146 .name = "ring",
177 .mode = 0600, 147 .mode = 0600,
178 /* size is set at init time */
179 }, 148 },
149 .size = 2 * HV_RING_SIZE * PAGE_SIZE,
180 .mmap = hv_uio_ring_mmap, 150 .mmap = hv_uio_ring_mmap,
181}; 151};
182 152
183/* Callback from VMBUS subystem when new channel created. */ 153/* Callback from VMBUS subsystem when new channel created. */
184static void 154static void
185hv_uio_new_channel(struct vmbus_channel *new_sc) 155hv_uio_new_channel(struct vmbus_channel *new_sc)
186{ 156{
187 struct hv_device *hv_dev = new_sc->primary_channel->device_obj; 157 struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
188 struct device *device = &hv_dev->device; 158 struct device *device = &hv_dev->device;
189 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
190 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE; 159 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
191 int ret; 160 int ret;
192 161
193 /* Create host communication ring */ 162 /* Create host communication ring */
194 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0, 163 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
195 hv_uio_channel_cb, pdata); 164 hv_uio_channel_cb, new_sc);
196 if (ret) { 165 if (ret) {
197 dev_err(device, "vmbus_open subchannel failed: %d\n", ret); 166 dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
198 return; 167 return;
@@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev,
234 203
235 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, 204 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
236 HV_RING_SIZE * PAGE_SIZE, NULL, 0, 205 HV_RING_SIZE * PAGE_SIZE, NULL, 0,
237 hv_uio_channel_cb, pdata); 206 hv_uio_channel_cb, dev->channel);
238 if (ret) 207 if (ret)
239 goto fail; 208 goto fail;
240 209
@@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev,
326 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); 295 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
327 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); 296 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
328 297
298 ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
299 if (ret)
300 dev_notice(&dev->device,
301 "sysfs create ring bin file failed; %d\n", ret);
302
329 hv_set_drvdata(dev, pdata); 303 hv_set_drvdata(dev, pdata);
330 304
331 return 0; 305 return 0;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 75f7fb151f71..987fc5ba6321 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -207,5 +207,6 @@ config USB_ULPI_BUS
207 207
208config USB_ROLE_SWITCH 208config USB_ROLE_SWITCH
209 tristate 209 tristate
210 select USB_COMMON
210 211
211endif # USB_SUPPORT 212endif # USB_SUPPORT
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 777036ae6367..0a42c5df3c0f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
2262 hcd->state = HC_STATE_SUSPENDED; 2262 hcd->state = HC_STATE_SUSPENDED;
2263 2263
2264 if (!PMSG_IS_AUTO(msg)) 2264 if (!PMSG_IS_AUTO(msg))
2265 usb_phy_roothub_power_off(hcd->phy_roothub); 2265 usb_phy_roothub_suspend(hcd->self.sysdev,
2266 hcd->phy_roothub);
2266 2267
2267 /* Did we race with a root-hub wakeup event? */ 2268 /* Did we race with a root-hub wakeup event? */
2268 if (rhdev->do_remote_wakeup) { 2269 if (rhdev->do_remote_wakeup) {
@@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2302 } 2303 }
2303 2304
2304 if (!PMSG_IS_AUTO(msg)) { 2305 if (!PMSG_IS_AUTO(msg)) {
2305 status = usb_phy_roothub_power_on(hcd->phy_roothub); 2306 status = usb_phy_roothub_resume(hcd->self.sysdev,
2307 hcd->phy_roothub);
2306 if (status) 2308 if (status)
2307 return status; 2309 return status;
2308 } 2310 }
@@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2344 } 2346 }
2345 } else { 2347 } else {
2346 hcd->state = old_state; 2348 hcd->state = old_state;
2347 usb_phy_roothub_power_off(hcd->phy_roothub); 2349 usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
2348 dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", 2350 dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
2349 "resume", status); 2351 "resume", status);
2350 if (status != -ESHUTDOWN) 2352 if (status != -ESHUTDOWN)
@@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2377 2379
2378 spin_lock_irqsave (&hcd_root_hub_lock, flags); 2380 spin_lock_irqsave (&hcd_root_hub_lock, flags);
2379 if (hcd->rh_registered) { 2381 if (hcd->rh_registered) {
2382 pm_wakeup_event(&hcd->self.root_hub->dev, 0);
2380 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); 2383 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2381 queue_work(pm_wq, &hcd->wakeup_work); 2384 queue_work(pm_wq, &hcd->wakeup_work);
2382 } 2385 }
@@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
2758 } 2761 }
2759 2762
2760 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { 2763 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
2761 hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev); 2764 hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
2762 if (IS_ERR(hcd->phy_roothub)) { 2765 if (IS_ERR(hcd->phy_roothub)) {
2763 retval = PTR_ERR(hcd->phy_roothub); 2766 retval = PTR_ERR(hcd->phy_roothub);
2764 goto err_phy_roothub_init; 2767 goto err_phy_roothub_alloc;
2765 } 2768 }
2766 2769
2770 retval = usb_phy_roothub_init(hcd->phy_roothub);
2771 if (retval)
2772 goto err_phy_roothub_alloc;
2773
2767 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2774 retval = usb_phy_roothub_power_on(hcd->phy_roothub);
2768 if (retval) 2775 if (retval)
2769 goto err_usb_phy_roothub_power_on; 2776 goto err_usb_phy_roothub_power_on;
@@ -2936,7 +2943,7 @@ err_create_buf:
2936 usb_phy_roothub_power_off(hcd->phy_roothub); 2943 usb_phy_roothub_power_off(hcd->phy_roothub);
2937err_usb_phy_roothub_power_on: 2944err_usb_phy_roothub_power_on:
2938 usb_phy_roothub_exit(hcd->phy_roothub); 2945 usb_phy_roothub_exit(hcd->phy_roothub);
2939err_phy_roothub_init: 2946err_phy_roothub_alloc:
2940 if (hcd->remove_phy && hcd->usb_phy) { 2947 if (hcd->remove_phy && hcd->usb_phy) {
2941 usb_phy_shutdown(hcd->usb_phy); 2948 usb_phy_shutdown(hcd->usb_phy);
2942 usb_put_phy(hcd->usb_phy); 2949 usb_put_phy(hcd->usb_phy);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f6ea16e9f6bb..aa9968d90a48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
653 unsigned int portnum) 653 unsigned int portnum)
654{ 654{
655 struct usb_hub *hub; 655 struct usb_hub *hub;
656 struct usb_port *port_dev;
656 657
657 if (!hdev) 658 if (!hdev)
658 return; 659 return;
659 660
660 hub = usb_hub_to_struct_hub(hdev); 661 hub = usb_hub_to_struct_hub(hdev);
661 if (hub) { 662 if (hub) {
663 port_dev = hub->ports[portnum - 1];
664 if (port_dev && port_dev->child)
665 pm_wakeup_event(&port_dev->child->dev, 0);
666
662 set_bit(portnum, hub->wakeup_bits); 667 set_bit(portnum, hub->wakeup_bits);
663 kick_hub_wq(hub); 668 kick_hub_wq(hub);
664 } 669 }
@@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3434 3439
3435 /* Skip the initial Clear-Suspend step for a remote wakeup */ 3440 /* Skip the initial Clear-Suspend step for a remote wakeup */
3436 status = hub_port_status(hub, port1, &portstatus, &portchange); 3441 status = hub_port_status(hub, port1, &portstatus, &portchange);
3437 if (status == 0 && !port_is_suspended(hub, portstatus)) 3442 if (status == 0 && !port_is_suspended(hub, portstatus)) {
3443 if (portchange & USB_PORT_STAT_C_SUSPEND)
3444 pm_wakeup_event(&udev->dev, 0);
3438 goto SuspendCleared; 3445 goto SuspendCleared;
3446 }
3439 3447
3440 /* see 7.1.7.7; affects power usage, but not budgeting */ 3448 /* see 7.1.7.7; affects power usage, but not budgeting */
3441 if (hub_is_superspeed(hub->hdev)) 3449 if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 09b7c43c0ea4..9879767452a2 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,19 +19,6 @@ struct usb_phy_roothub {
19 struct list_head list; 19 struct list_head list;
20}; 20};
21 21
22static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
23{
24 struct usb_phy_roothub *roothub_entry;
25
26 roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
27 if (!roothub_entry)
28 return ERR_PTR(-ENOMEM);
29
30 INIT_LIST_HEAD(&roothub_entry->list);
31
32 return roothub_entry;
33}
34
35static int usb_phy_roothub_add_phy(struct device *dev, int index, 22static int usb_phy_roothub_add_phy(struct device *dev, int index,
36 struct list_head *list) 23 struct list_head *list)
37{ 24{
@@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
45 return PTR_ERR(phy); 32 return PTR_ERR(phy);
46 } 33 }
47 34
48 roothub_entry = usb_phy_roothub_alloc(dev); 35 roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
49 if (IS_ERR(roothub_entry)) 36 if (!roothub_entry)
50 return PTR_ERR(roothub_entry); 37 return -ENOMEM;
38
39 INIT_LIST_HEAD(&roothub_entry->list);
51 40
52 roothub_entry->phy = phy; 41 roothub_entry->phy = phy;
53 42
@@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
56 return 0; 45 return 0;
57} 46}
58 47
59struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) 48struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
60{ 49{
61 struct usb_phy_roothub *phy_roothub; 50 struct usb_phy_roothub *phy_roothub;
62 struct usb_phy_roothub *roothub_entry;
63 struct list_head *head;
64 int i, num_phys, err; 51 int i, num_phys, err;
65 52
53 if (!IS_ENABLED(CONFIG_GENERIC_PHY))
54 return NULL;
55
66 num_phys = of_count_phandle_with_args(dev->of_node, "phys", 56 num_phys = of_count_phandle_with_args(dev->of_node, "phys",
67 "#phy-cells"); 57 "#phy-cells");
68 if (num_phys <= 0) 58 if (num_phys <= 0)
69 return NULL; 59 return NULL;
70 60
71 phy_roothub = usb_phy_roothub_alloc(dev); 61 phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
72 if (IS_ERR(phy_roothub)) 62 if (!phy_roothub)
73 return phy_roothub; 63 return ERR_PTR(-ENOMEM);
64
65 INIT_LIST_HEAD(&phy_roothub->list);
74 66
75 for (i = 0; i < num_phys; i++) { 67 for (i = 0; i < num_phys; i++) {
76 err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); 68 err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
77 if (err) 69 if (err)
78 goto err_out; 70 return ERR_PTR(err);
79 } 71 }
80 72
73 return phy_roothub;
74}
75EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
76
77int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
78{
79 struct usb_phy_roothub *roothub_entry;
80 struct list_head *head;
81 int err;
82
83 if (!phy_roothub)
84 return 0;
85
81 head = &phy_roothub->list; 86 head = &phy_roothub->list;
82 87
83 list_for_each_entry(roothub_entry, head, list) { 88 list_for_each_entry(roothub_entry, head, list) {
@@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
86 goto err_exit_phys; 91 goto err_exit_phys;
87 } 92 }
88 93
89 return phy_roothub; 94 return 0;
90 95
91err_exit_phys: 96err_exit_phys:
92 list_for_each_entry_continue_reverse(roothub_entry, head, list) 97 list_for_each_entry_continue_reverse(roothub_entry, head, list)
93 phy_exit(roothub_entry->phy); 98 phy_exit(roothub_entry->phy);
94 99
95err_out: 100 return err;
96 return ERR_PTR(err);
97} 101}
98EXPORT_SYMBOL_GPL(usb_phy_roothub_init); 102EXPORT_SYMBOL_GPL(usb_phy_roothub_init);
99 103
@@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
111 list_for_each_entry(roothub_entry, head, list) { 115 list_for_each_entry(roothub_entry, head, list) {
112 err = phy_exit(roothub_entry->phy); 116 err = phy_exit(roothub_entry->phy);
113 if (err) 117 if (err)
114 ret = ret; 118 ret = err;
115 } 119 }
116 120
117 return ret; 121 return ret;
@@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub)
156 phy_power_off(roothub_entry->phy); 160 phy_power_off(roothub_entry->phy);
157} 161}
158EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); 162EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off);
163
164int usb_phy_roothub_suspend(struct device *controller_dev,
165 struct usb_phy_roothub *phy_roothub)
166{
167 usb_phy_roothub_power_off(phy_roothub);
168
169 /* keep the PHYs initialized so the device can wake up the system */
170 if (device_may_wakeup(controller_dev))
171 return 0;
172
173 return usb_phy_roothub_exit(phy_roothub);
174}
175EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend);
176
177int usb_phy_roothub_resume(struct device *controller_dev,
178 struct usb_phy_roothub *phy_roothub)
179{
180 int err;
181
182 /* if the device can't wake up the system _exit was called */
183 if (!device_may_wakeup(controller_dev)) {
184 err = usb_phy_roothub_init(phy_roothub);
185 if (err)
186 return err;
187 }
188
189 err = usb_phy_roothub_power_on(phy_roothub);
190
191 /* undo _init if _power_on failed */
192 if (err && !device_may_wakeup(controller_dev))
193 usb_phy_roothub_exit(phy_roothub);
194
195 return err;
196}
197EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 6fde59bfbff8..88a3c037e9df 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -1,7 +1,27 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * USB roothub wrapper
4 *
5 * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
6 */
7
8#ifndef __USB_CORE_PHY_H_
9#define __USB_CORE_PHY_H_
10
11struct device;
1struct usb_phy_roothub; 12struct usb_phy_roothub;
2 13
3struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev); 14struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
15
16int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
4int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); 17int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
5 18
6int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); 19int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
7void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); 20void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
21
22int usb_phy_roothub_suspend(struct device *controller_dev,
23 struct usb_phy_roothub *phy_roothub);
24int usb_phy_roothub_resume(struct device *controller_dev,
25 struct usb_phy_roothub *phy_roothub);
26
27#endif /* __USB_CORE_PHY_H_ */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 920f48a49a87..c55def2f1320 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = {
186 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 186 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
187 USB_QUIRK_STRING_FETCH_255 }, 187 USB_QUIRK_STRING_FETCH_255 },
188 188
189 /* HP v222w 16GB Mini USB Drive */
190 { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
191
189 /* Creative SB Audigy 2 NX */ 192 /* Creative SB Audigy 2 NX */
190 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 193 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
191 194
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 48779c44c361..eb494ec547e8 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
320 320
321void xhci_dbc_tty_unregister_driver(void) 321void xhci_dbc_tty_unregister_driver(void)
322{ 322{
323 tty_unregister_driver(dbc_tty_driver); 323 if (dbc_tty_driver) {
324 put_tty_driver(dbc_tty_driver); 324 tty_unregister_driver(dbc_tty_driver);
325 dbc_tty_driver = NULL; 325 put_tty_driver(dbc_tty_driver);
326 dbc_tty_driver = NULL;
327 }
326} 328}
327 329
328static void dbc_rx_push(unsigned long _port) 330static void dbc_rx_push(unsigned long _port)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f17b7eab66cf..85ffda85f8ab 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
127 xhci->quirks |= XHCI_AMD_PLL_FIX; 127 xhci->quirks |= XHCI_AMD_PLL_FIX;
128 128
129 if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) 129 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
130 (pdev->device == 0x15e0 ||
131 pdev->device == 0x15e1 ||
132 pdev->device == 0x43bb))
130 xhci->quirks |= XHCI_SUSPEND_DELAY; 133 xhci->quirks |= XHCI_SUSPEND_DELAY;
131 134
132 if (pdev->vendor == PCI_VENDOR_ID_AMD) 135 if (pdev->vendor == PCI_VENDOR_ID_AMD)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index df327dcc2bac..c1b22fc64e38 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
157 struct resource *res; 157 struct resource *res;
158 struct usb_hcd *hcd; 158 struct usb_hcd *hcd;
159 struct clk *clk; 159 struct clk *clk;
160 struct clk *reg_clk;
160 int ret; 161 int ret;
161 int irq; 162 int irq;
162 163
@@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev)
226 hcd->rsrc_len = resource_size(res); 227 hcd->rsrc_len = resource_size(res);
227 228
228 /* 229 /*
229 * Not all platforms have a clk so it is not an error if the 230 * Not all platforms have clks so it is not an error if the
230 * clock does not exists. 231 * clock do not exist.
231 */ 232 */
233 reg_clk = devm_clk_get(&pdev->dev, "reg");
234 if (!IS_ERR(reg_clk)) {
235 ret = clk_prepare_enable(reg_clk);
236 if (ret)
237 goto put_hcd;
238 } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
239 ret = -EPROBE_DEFER;
240 goto put_hcd;
241 }
242
232 clk = devm_clk_get(&pdev->dev, NULL); 243 clk = devm_clk_get(&pdev->dev, NULL);
233 if (!IS_ERR(clk)) { 244 if (!IS_ERR(clk)) {
234 ret = clk_prepare_enable(clk); 245 ret = clk_prepare_enable(clk);
235 if (ret) 246 if (ret)
236 goto put_hcd; 247 goto disable_reg_clk;
237 } else if (PTR_ERR(clk) == -EPROBE_DEFER) { 248 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
238 ret = -EPROBE_DEFER; 249 ret = -EPROBE_DEFER;
239 goto put_hcd; 250 goto disable_reg_clk;
240 } 251 }
241 252
242 xhci = hcd_to_xhci(hcd); 253 xhci = hcd_to_xhci(hcd);
@@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
252 device_wakeup_enable(hcd->self.controller); 263 device_wakeup_enable(hcd->self.controller);
253 264
254 xhci->clk = clk; 265 xhci->clk = clk;
266 xhci->reg_clk = reg_clk;
255 xhci->main_hcd = hcd; 267 xhci->main_hcd = hcd;
256 xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, 268 xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
257 dev_name(&pdev->dev), hcd); 269 dev_name(&pdev->dev), hcd);
@@ -320,8 +332,10 @@ put_usb3_hcd:
320 usb_put_hcd(xhci->shared_hcd); 332 usb_put_hcd(xhci->shared_hcd);
321 333
322disable_clk: 334disable_clk:
323 if (!IS_ERR(clk)) 335 clk_disable_unprepare(clk);
324 clk_disable_unprepare(clk); 336
337disable_reg_clk:
338 clk_disable_unprepare(reg_clk);
325 339
326put_hcd: 340put_hcd:
327 usb_put_hcd(hcd); 341 usb_put_hcd(hcd);
@@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev)
338 struct usb_hcd *hcd = platform_get_drvdata(dev); 352 struct usb_hcd *hcd = platform_get_drvdata(dev);
339 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 353 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
340 struct clk *clk = xhci->clk; 354 struct clk *clk = xhci->clk;
355 struct clk *reg_clk = xhci->reg_clk;
341 356
342 xhci->xhc_state |= XHCI_STATE_REMOVING; 357 xhci->xhc_state |= XHCI_STATE_REMOVING;
343 358
@@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev)
347 usb_remove_hcd(hcd); 362 usb_remove_hcd(hcd);
348 usb_put_hcd(xhci->shared_hcd); 363 usb_put_hcd(xhci->shared_hcd);
349 364
350 if (!IS_ERR(clk)) 365 clk_disable_unprepare(clk);
351 clk_disable_unprepare(clk); 366 clk_disable_unprepare(reg_clk);
352 usb_put_hcd(hcd); 367 usb_put_hcd(hcd);
353 368
354 pm_runtime_set_suspended(&dev->dev); 369 pm_runtime_set_suspended(&dev->dev);
@@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
420static struct platform_driver usb_xhci_driver = { 435static struct platform_driver usb_xhci_driver = {
421 .probe = xhci_plat_probe, 436 .probe = xhci_plat_probe,
422 .remove = xhci_plat_remove, 437 .remove = xhci_plat_remove,
423 .shutdown = usb_hcd_platform_shutdown,
424 .driver = { 438 .driver = {
425 .name = "xhci-hcd", 439 .name = "xhci-hcd",
426 .pm = &xhci_plat_pm_ops, 440 .pm = &xhci_plat_pm_ops,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 05c909b04f14..6dfc4867dbcf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1729,8 +1729,9 @@ struct xhci_hcd {
1729 int page_shift; 1729 int page_shift;
1730 /* msi-x vectors */ 1730 /* msi-x vectors */
1731 int msix_count; 1731 int msix_count;
1732 /* optional clock */ 1732 /* optional clocks */
1733 struct clk *clk; 1733 struct clk *clk;
1734 struct clk *reg_clk;
1734 /* data structures */ 1735 /* data structures */
1735 struct xhci_device_context_array *dcbaa; 1736 struct xhci_device_context_array *dcbaa;
1736 struct xhci_ring *cmd_ring; 1737 struct xhci_ring *cmd_ring;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 05a679d5e3a2..6a60bc0490c5 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb)
451 if (!rev) 451 if (!rev)
452 return -ENODEV; 452 return -ENODEV;
453 453
454 usb_phy_init(musb->xceiv);
455 if (IS_ERR(musb->phy)) { 454 if (IS_ERR(musb->phy)) {
456 musb->phy = NULL; 455 musb->phy = NULL;
457 } else { 456 } else {
@@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb)
501 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 500 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
502 501
503 del_timer_sync(&musb->dev_timer); 502 del_timer_sync(&musb->dev_timer);
504 usb_phy_shutdown(musb->xceiv);
505 phy_power_off(musb->phy); 503 phy_power_off(musb->phy);
506 phy_exit(musb->phy); 504 phy_exit(musb->phy);
507 debugfs_remove_recursive(glue->dbgfs_root); 505 debugfs_remove_recursive(glue->dbgfs_root);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3a8451a15f7f..4fa372c845e1 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2754,6 +2754,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
2754 hcd->self.otg_port = 1; 2754 hcd->self.otg_port = 1;
2755 musb->xceiv->otg->host = &hcd->self; 2755 musb->xceiv->otg->host = &hcd->self;
2756 hcd->power_budget = 2 * (power_budget ? : 250); 2756 hcd->power_budget = 2 * (power_budget ? : 250);
2757 hcd->skip_phy_initialization = 1;
2757 2758
2758 ret = usb_add_hcd(hcd, 0, 0); 2759 ret = usb_add_hcd(hcd, 0, 0);
2759 if (ret < 0) 2760 if (ret < 0)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a646820f5a78..533f127c30ad 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
62 - Fundamental Software dongle. 62 - Fundamental Software dongle.
63 - Google USB serial devices 63 - Google USB serial devices
64 - HP4x calculators 64 - HP4x calculators
65 - Libtransistor USB console
65 - a number of Motorola phones 66 - a number of Motorola phones
66 - Motorola Tetra devices 67 - Motorola Tetra devices
67 - Novatel Wireless GPS receivers 68 - Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index de1e759dd512..eb6c26cbe579 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
214 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 214 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
215 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 215 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
216 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 216 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
217 { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
217 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ 218 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
218 { } /* Terminating Entry */ 219 { } /* Terminating Entry */
219}; 220};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 87202ad5a50d..7ea221d42dba 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
1898 return ftdi_jtag_probe(serial); 1898 return ftdi_jtag_probe(serial);
1899 1899
1900 if (udev->product && 1900 if (udev->product &&
1901 (!strcmp(udev->product, "BeagleBone/XDS100V2") || 1901 (!strcmp(udev->product, "Arrow USB Blaster") ||
1902 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
1902 !strcmp(udev->product, "SNAP Connect E10"))) 1903 !strcmp(udev->product, "SNAP Connect E10")))
1903 return ftdi_jtag_probe(serial); 1904 return ftdi_jtag_probe(serial);
1904 1905
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4ef79e29cb26..40864c2bd9dc 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
63 0x01) } 63 0x01) }
64DEVICE(google, GOOGLE_IDS); 64DEVICE(google, GOOGLE_IDS);
65 65
66/* Libtransistor USB console */
67#define LIBTRANSISTOR_IDS() \
68 { USB_DEVICE(0x1209, 0x8b00) }
69DEVICE(libtransistor, LIBTRANSISTOR_IDS);
70
66/* ViVOpay USB Serial Driver */ 71/* ViVOpay USB Serial Driver */
67#define VIVOPAY_IDS() \ 72#define VIVOPAY_IDS() \
68 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ 73 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
110 &funsoft_device, 115 &funsoft_device,
111 &flashloader_device, 116 &flashloader_device,
112 &google_device, 117 &google_device,
118 &libtransistor_device,
113 &vivopay_device, 119 &vivopay_device,
114 &moto_modem_device, 120 &moto_modem_device,
115 &motorola_tetra_device, 121 &motorola_tetra_device,
@@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
126 FUNSOFT_IDS(), 132 FUNSOFT_IDS(),
127 FLASHLOADER_IDS(), 133 FLASHLOADER_IDS(),
128 GOOGLE_IDS(), 134 GOOGLE_IDS(),
135 LIBTRANSISTOR_IDS(),
129 VIVOPAY_IDS(), 136 VIVOPAY_IDS(),
130 MOTO_IDS(), 137 MOTO_IDS(),
131 MOTOROLA_TETRA_IDS(), 138 MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index b57891c1fd31..7afbea512207 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
5 5
6typec_ucsi-y := ucsi.o 6typec_ucsi-y := ucsi.o
7 7
8typec_ucsi-$(CONFIG_FTRACE) += trace.o 8typec_ucsi-$(CONFIG_TRACING) += trace.o
9 9
10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o 10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bf0977fbd100..bd5cca5632b3 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -28,7 +28,7 @@
28 * difficult to estimate the time it takes for the system to process the command 28 * difficult to estimate the time it takes for the system to process the command
29 * before it is actually passed to the PPM. 29 * before it is actually passed to the PPM.
30 */ 30 */
31#define UCSI_TIMEOUT_MS 1000 31#define UCSI_TIMEOUT_MS 5000
32 32
33/* 33/*
34 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests 34 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index c31c8402a0c5..d41d0cdeec0f 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
186 if (!bid) 186 if (!bid)
187 return -ENODEV; 187 return -ENODEV;
188 188
189 /* device_attach() callers should hold parent lock for USB */
190 if (bid->udev->dev.parent)
191 device_lock(bid->udev->dev.parent);
189 ret = device_attach(&bid->udev->dev); 192 ret = device_attach(&bid->udev->dev);
193 if (bid->udev->dev.parent)
194 device_unlock(bid->udev->dev.parent);
190 if (ret < 0) { 195 if (ret < 0) {
191 dev_err(&bid->udev->dev, "rebind failed\n"); 196 dev_err(&bid->udev->dev, "rebind failed\n");
192 return ret; 197 return ret;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 473fb8a87289..bf8afe9b5883 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -243,7 +243,7 @@ enum usbip_side {
243#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 243#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
244#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 244#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
245 245
246#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) 246#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
247#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 247#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
248#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 248#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
249#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 249#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 5b4c0864ad92..5d88917c9631 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
91 unset_event(ud, USBIP_EH_UNUSABLE); 91 unset_event(ud, USBIP_EH_UNUSABLE);
92 } 92 }
93 93
94 /* Stop the error handler. */
95 if (ud->event & USBIP_EH_BYE)
96 usbip_dbg_eh("removed %p\n", ud);
97
98 wake_up(&ud->eh_waitq); 94 wake_up(&ud->eh_waitq);
99 } 95 }
100} 96}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 20e3d4609583..d11f3f8dad40 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
354 usbip_dbg_vhci_rh(" ClearHubFeature\n"); 354 usbip_dbg_vhci_rh(" ClearHubFeature\n");
355 break; 355 break;
356 case ClearPortFeature: 356 case ClearPortFeature:
357 if (rhport < 0)
358 goto error;
357 switch (wValue) { 359 switch (wValue) {
358 case USB_PORT_FEAT_SUSPEND: 360 case USB_PORT_FEAT_SUSPEND:
359 if (hcd->speed == HCD_USB3) { 361 if (hcd->speed == HCD_USB3) {
@@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
511 goto error; 513 goto error;
512 } 514 }
513 515
516 if (rhport < 0)
517 goto error;
518
514 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; 519 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
515 break; 520 break;
516 case USB_PORT_FEAT_POWER: 521 case USB_PORT_FEAT_POWER:
517 usbip_dbg_vhci_rh( 522 usbip_dbg_vhci_rh(
518 " SetPortFeature: USB_PORT_FEAT_POWER\n"); 523 " SetPortFeature: USB_PORT_FEAT_POWER\n");
524 if (rhport < 0)
525 goto error;
519 if (hcd->speed == HCD_USB3) 526 if (hcd->speed == HCD_USB3)
520 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; 527 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
521 else 528 else
@@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
524 case USB_PORT_FEAT_BH_PORT_RESET: 531 case USB_PORT_FEAT_BH_PORT_RESET:
525 usbip_dbg_vhci_rh( 532 usbip_dbg_vhci_rh(
526 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); 533 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
534 if (rhport < 0)
535 goto error;
527 /* Applicable only for USB3.0 hub */ 536 /* Applicable only for USB3.0 hub */
528 if (hcd->speed != HCD_USB3) { 537 if (hcd->speed != HCD_USB3) {
529 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " 538 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
534 case USB_PORT_FEAT_RESET: 543 case USB_PORT_FEAT_RESET:
535 usbip_dbg_vhci_rh( 544 usbip_dbg_vhci_rh(
536 " SetPortFeature: USB_PORT_FEAT_RESET\n"); 545 " SetPortFeature: USB_PORT_FEAT_RESET\n");
546 if (rhport < 0)
547 goto error;
537 /* if it's already enabled, disable */ 548 /* if it's already enabled, disable */
538 if (hcd->speed == HCD_USB3) { 549 if (hcd->speed == HCD_USB3) {
539 vhci_hcd->port_status[rhport] = 0; 550 vhci_hcd->port_status[rhport] = 0;
@@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
554 default: 565 default:
555 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", 566 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
556 wValue); 567 wValue);
568 if (rhport < 0)
569 goto error;
557 if (hcd->speed == HCD_USB3) { 570 if (hcd->speed == HCD_USB3) {
558 if ((vhci_hcd->port_status[rhport] & 571 if ((vhci_hcd->port_status[rhport] &
559 USB_SS_PORT_STAT_POWER) != 0) { 572 USB_SS_PORT_STAT_POWER) != 0) {
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 190dbf8cfcb5..2f3856a95856 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
114 } 114 }
115 115
116out: 116out:
117 kfree(req); 117 vbg_req_free(req, sizeof(*req));
118 kfree(pages); 118 kfree(pages);
119} 119}
120 120
@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
144 144
145 rc = vbg_req_perform(gdev, req); 145 rc = vbg_req_perform(gdev, req);
146 146
147 kfree(req); 147 vbg_req_free(req, sizeof(*req));
148 148
149 if (rc < 0) { 149 if (rc < 0) {
150 vbg_err("%s error: %d\n", __func__, rc); 150 vbg_err("%s error: %d\n", __func__, rc);
@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
214 ret = vbg_status_code_to_errno(rc); 214 ret = vbg_status_code_to_errno(rc);
215 215
216out_free: 216out_free:
217 kfree(req2); 217 vbg_req_free(req2, sizeof(*req2));
218 kfree(req1); 218 vbg_req_free(req1, sizeof(*req1));
219 return ret; 219 return ret;
220} 220}
221 221
@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ 245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
246 rc = VINF_SUCCESS; 246 rc = VINF_SUCCESS;
247 247
248 kfree(req); 248 vbg_req_free(req, sizeof(*req));
249 249
250 return vbg_status_code_to_errno(rc); 250 return vbg_status_code_to_errno(rc);
251} 251}
@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
431 rc = vbg_req_perform(gdev, req); 431 rc = vbg_req_perform(gdev, req);
432 do_div(req->interval_ns, 1000000); /* ns -> ms */ 432 do_div(req->interval_ns, 1000000); /* ns -> ms */
433 gdev->heartbeat_interval_ms = req->interval_ns; 433 gdev->heartbeat_interval_ms = req->interval_ns;
434 kfree(req); 434 vbg_req_free(req, sizeof(*req));
435 435
436 return vbg_status_code_to_errno(rc); 436 return vbg_status_code_to_errno(rc);
437} 437}
@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
454 if (ret < 0) 454 if (ret < 0)
455 return ret; 455 return ret;
456 456
457 /*
458 * Preallocate the request to use it from the timer callback because:
459 * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
460 * and the timer callback runs at DISPATCH_LEVEL;
461 * 2) avoid repeated allocations.
462 */
463 gdev->guest_heartbeat_req = vbg_req_alloc( 457 gdev->guest_heartbeat_req = vbg_req_alloc(
464 sizeof(*gdev->guest_heartbeat_req), 458 sizeof(*gdev->guest_heartbeat_req),
465 VMMDEVREQ_GUEST_HEARTBEAT); 459 VMMDEVREQ_GUEST_HEARTBEAT);
@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
481{ 475{
482 del_timer_sync(&gdev->heartbeat_timer); 476 del_timer_sync(&gdev->heartbeat_timer);
483 vbg_heartbeat_host_config(gdev, false); 477 vbg_heartbeat_host_config(gdev, false);
484 kfree(gdev->guest_heartbeat_req); 478 vbg_req_free(gdev->guest_heartbeat_req,
485 479 sizeof(*gdev->guest_heartbeat_req));
486} 480}
487 481
488/** 482/**
@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
543 if (rc < 0) 537 if (rc < 0)
544 vbg_err("%s error, rc: %d\n", __func__, rc); 538 vbg_err("%s error, rc: %d\n", __func__, rc);
545 539
546 kfree(req); 540 vbg_req_free(req, sizeof(*req));
547 return vbg_status_code_to_errno(rc); 541 return vbg_status_code_to_errno(rc);
548} 542}
549 543
@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
617 611
618out: 612out:
619 mutex_unlock(&gdev->session_mutex); 613 mutex_unlock(&gdev->session_mutex);
620 kfree(req); 614 vbg_req_free(req, sizeof(*req));
621 615
622 return ret; 616 return ret;
623} 617}
@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
642 if (rc < 0) 636 if (rc < 0)
643 vbg_err("%s error, rc: %d\n", __func__, rc); 637 vbg_err("%s error, rc: %d\n", __func__, rc);
644 638
645 kfree(req); 639 vbg_req_free(req, sizeof(*req));
646 return vbg_status_code_to_errno(rc); 640 return vbg_status_code_to_errno(rc);
647} 641}
648 642
@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
712 706
713out: 707out:
714 mutex_unlock(&gdev->session_mutex); 708 mutex_unlock(&gdev->session_mutex);
715 kfree(req); 709 vbg_req_free(req, sizeof(*req));
716 710
717 return ret; 711 return ret;
718} 712}
@@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
733 727
734 rc = vbg_req_perform(gdev, req); 728 rc = vbg_req_perform(gdev, req);
735 ret = vbg_status_code_to_errno(rc); 729 ret = vbg_status_code_to_errno(rc);
736 if (ret) 730 if (ret) {
731 vbg_err("%s error: %d\n", __func__, rc);
737 goto out; 732 goto out;
733 }
738 734
739 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", 735 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
740 req->major, req->minor, req->build, req->revision); 736 req->major, req->minor, req->build, req->revision);
@@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
749 } 745 }
750 746
751out: 747out:
752 kfree(req); 748 vbg_req_free(req, sizeof(*req));
753 return ret; 749 return ret;
754} 750}
755 751
@@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
847 return 0; 843 return 0;
848 844
849err_free_reqs: 845err_free_reqs:
850 kfree(gdev->mouse_status_req); 846 vbg_req_free(gdev->mouse_status_req,
851 kfree(gdev->ack_events_req); 847 sizeof(*gdev->mouse_status_req));
852 kfree(gdev->cancel_req); 848 vbg_req_free(gdev->ack_events_req,
853 kfree(gdev->mem_balloon.change_req); 849 sizeof(*gdev->ack_events_req));
854 kfree(gdev->mem_balloon.get_req); 850 vbg_req_free(gdev->cancel_req,
851 sizeof(*gdev->cancel_req));
852 vbg_req_free(gdev->mem_balloon.change_req,
853 sizeof(*gdev->mem_balloon.change_req));
854 vbg_req_free(gdev->mem_balloon.get_req,
855 sizeof(*gdev->mem_balloon.get_req));
855 return ret; 856 return ret;
856} 857}
857 858
@@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
872 vbg_reset_host_capabilities(gdev); 873 vbg_reset_host_capabilities(gdev);
873 vbg_core_set_mouse_status(gdev, 0); 874 vbg_core_set_mouse_status(gdev, 0);
874 875
875 kfree(gdev->mouse_status_req); 876 vbg_req_free(gdev->mouse_status_req,
876 kfree(gdev->ack_events_req); 877 sizeof(*gdev->mouse_status_req));
877 kfree(gdev->cancel_req); 878 vbg_req_free(gdev->ack_events_req,
878 kfree(gdev->mem_balloon.change_req); 879 sizeof(*gdev->ack_events_req));
879 kfree(gdev->mem_balloon.get_req); 880 vbg_req_free(gdev->cancel_req,
881 sizeof(*gdev->cancel_req));
882 vbg_req_free(gdev->mem_balloon.change_req,
883 sizeof(*gdev->mem_balloon.change_req));
884 vbg_req_free(gdev->mem_balloon.get_req,
885 sizeof(*gdev->mem_balloon.get_req));
880} 886}
881 887
882/** 888/**
@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 req->flags = dump->u.in.flags; 1421 req->flags = dump->u.in.flags;
1416 dump->hdr.rc = vbg_req_perform(gdev, req); 1422 dump->hdr.rc = vbg_req_perform(gdev, req);
1417 1423
1418 kfree(req); 1424 vbg_req_free(req, sizeof(*req));
1419 return 0; 1425 return 0;
1420} 1426}
1421 1427
@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1513 if (rc < 0) 1519 if (rc < 0)
1514 vbg_err("%s error, rc: %d\n", __func__, rc); 1520 vbg_err("%s error, rc: %d\n", __func__, rc);
1515 1521
1516 kfree(req); 1522 vbg_req_free(req, sizeof(*req));
1517 return vbg_status_code_to_errno(rc); 1523 return vbg_status_code_to_errno(rc);
1518} 1524}
1519 1525
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 6c784bf4fa6d..7ad9ec45bfa9 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
171 171
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
176void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
181 int *vbox_status);
182
174#endif 183#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 82e280d38cc2..398d22693234 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
87 struct vbg_session *session = filp->private_data; 87 struct vbg_session *session = filp->private_data;
88 size_t returned_size, size; 88 size_t returned_size, size;
89 struct vbg_ioctl_hdr hdr; 89 struct vbg_ioctl_hdr hdr;
90 bool is_vmmdev_req;
90 int ret = 0; 91 int ret = 0;
91 void *buf; 92 void *buf;
92 93
@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
106 if (size > SZ_16M) 107 if (size > SZ_16M)
107 return -E2BIG; 108 return -E2BIG;
108 109
109 /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ 110 /*
110 buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); 111 * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
112 * the need for a bounce-buffer and another copy later on.
113 */
114 is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116
117 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
119 else
120 buf = kmalloc(size, GFP_KERNEL);
111 if (!buf) 121 if (!buf)
112 return -ENOMEM; 122 return -ENOMEM;
113 123
@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
132 ret = -EFAULT; 142 ret = -EFAULT;
133 143
134out: 144out:
135 kfree(buf); 145 if (is_vmmdev_req)
146 vbg_req_free(buf, size);
147 else
148 kfree(buf);
136 149
137 return ret; 150 return ret;
138} 151}
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 0f0dab8023cf..bf4474214b4d 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
66{ 66{
67 struct vmmdev_request_header *req; 67 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len));
68 69
69 req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); 70 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
70 if (!req) 71 if (!req)
71 return NULL; 72 return NULL;
72 73
@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
82 return req; 83 return req;
83} 84}
84 85
86void vbg_req_free(void *req, size_t len)
87{
88 if (!req)
89 return;
90
91 free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
92}
93
85/* Note this function returns a VBox status code, not a negative errno!! */ 94/* Note this function returns a VBox status code, not a negative errno!! */
86int vbg_req_perform(struct vbg_dev *gdev, void *req) 95int vbg_req_perform(struct vbg_dev *gdev, void *req)
87{ 96{
@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
137 rc = hgcm_connect->header.result; 146 rc = hgcm_connect->header.result;
138 } 147 }
139 148
140 kfree(hgcm_connect); 149 vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
141 150
142 *vbox_status = rc; 151 *vbox_status = rc;
143 return 0; 152 return 0;
@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
166 if (rc >= 0) 175 if (rc >= 0)
167 rc = hgcm_disconnect->header.result; 176 rc = hgcm_disconnect->header.result;
168 177
169 kfree(hgcm_disconnect); 178 vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
170 179
171 *vbox_status = rc; 180 *vbox_status = rc;
172 return 0; 181 return 0;
@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
623 } 632 }
624 633
625 if (!leak_it) 634 if (!leak_it)
626 kfree(call); 635 vbg_req_free(call, size);
627 636
628free_bounce_bufs: 637free_bounce_bufs:
629 if (bounce_bufs) { 638 if (bounce_bufs) {
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index a5b8eb21201f..1abe4d021fd2 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
55#define WDT_CTRL_WDT_INTR BIT(2) 55#define WDT_CTRL_WDT_INTR BIT(2)
56#define WDT_CTRL_RESET_SYSTEM BIT(1) 56#define WDT_CTRL_RESET_SYSTEM BIT(1)
57#define WDT_CTRL_ENABLE BIT(0) 57#define WDT_CTRL_ENABLE BIT(0)
58#define WDT_TIMEOUT_STATUS 0x10
59#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
58 60
59/* 61/*
60 * WDT_RESET_WIDTH controls the characteristics of the external pulse (if 62 * WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
192 struct device_node *np; 194 struct device_node *np;
193 const char *reset_type; 195 const char *reset_type;
194 u32 duration; 196 u32 duration;
197 u32 status;
195 int ret; 198 int ret;
196 199
197 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); 200 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
307 writel(duration - 1, wdt->base + WDT_RESET_WIDTH); 310 writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
308 } 311 }
309 312
313 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
314 if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
315 wdt->wdd.bootstatus = WDIOF_CARDRESET;
316
310 ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); 317 ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
311 if (ret) { 318 if (ret) {
312 dev_err(&pdev->dev, "failed to register\n"); 319 dev_err(&pdev->dev, "failed to register\n");
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 6b8c6ddfe30b..514db5cc1595 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
121} 121}
122 122
123static const struct watchdog_info rwdt_ident = { 123static const struct watchdog_info rwdt_ident = {
124 .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, 124 .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
125 WDIOF_CARDRESET,
125 .identity = "Renesas WDT Watchdog", 126 .identity = "Renesas WDT Watchdog",
126}; 127};
127 128
@@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev)
197 return PTR_ERR(clk); 198 return PTR_ERR(clk);
198 199
199 pm_runtime_enable(&pdev->dev); 200 pm_runtime_enable(&pdev->dev);
200
201 pm_runtime_get_sync(&pdev->dev); 201 pm_runtime_get_sync(&pdev->dev);
202 priv->clk_rate = clk_get_rate(clk); 202 priv->clk_rate = clk_get_rate(clk);
203 priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
204 RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
203 pm_runtime_put(&pdev->dev); 205 pm_runtime_put(&pdev->dev);
204 206
205 if (!priv->clk_rate) { 207 if (!priv->clk_rate) {
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 43d0cbb7ba0b..814cdf539b0f 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
299 if (sch311x_wdt_set_heartbeat(new_timeout)) 299 if (sch311x_wdt_set_heartbeat(new_timeout))
300 return -EINVAL; 300 return -EINVAL;
301 sch311x_wdt_keepalive(); 301 sch311x_wdt_keepalive();
302 /* Fall */ 302 /* Fall through */
303 case WDIOC_GETTIMEOUT: 303 case WDIOC_GETTIMEOUT:
304 return put_user(timeout, p); 304 return put_user(timeout, p);
305 default: 305 default:
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 20e2bba10400..672b61a7f9a3 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
427 return -EINVAL; 427 return -EINVAL;
428 428
429 wdt_keepalive(); 429 wdt_keepalive();
430 /* Fall */ 430 /* Fall through */
431 431
432 case WDIOC_GETTIMEOUT: 432 case WDIOC_GETTIMEOUT:
433 return put_user(timeout, uarg.i); 433 return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index db0da7ea4fd8..93c5b610e264 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
178 timeout = new_timeout; 178 timeout = new_timeout;
179 wafwdt_stop(); 179 wafwdt_stop();
180 wafwdt_start(); 180 wafwdt_start();
181 /* Fall */ 181 /* Fall through */
182 case WDIOC_GETTIMEOUT: 182 case WDIOC_GETTIMEOUT:
183 return put_user(timeout, p); 183 return put_user(timeout, p);
184 184
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 89d9744ece61..ed593d1042a6 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev)
95 struct xen_pcibk_config_quirk *quirk; 95 struct xen_pcibk_config_quirk *quirk;
96 int ret = 0; 96 int ret = 0;
97 97
98 quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); 98 quirk = kzalloc(sizeof(*quirk), GFP_KERNEL);
99 if (!quirk) { 99 if (!quirk) {
100 ret = -ENOMEM; 100 ret = -ENOMEM;
101 goto out; 101 goto out;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9e480fdebe1f..59661db144e5 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
71 71
72 dev_dbg(&dev->dev, "pcistub_device_alloc\n"); 72 dev_dbg(&dev->dev, "pcistub_device_alloc\n");
73 73
74 psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); 74 psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
75 if (!psdev) 75 if (!psdev)
76 return NULL; 76 return NULL;
77 77
@@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev)
364 * here and then to call kfree(pci_get_drvdata(psdev->dev)). 364 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
365 */ 365 */
366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") 366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
367 + strlen(pci_name(dev)) + 1, GFP_ATOMIC); 367 + strlen(pci_name(dev)) + 1, GFP_KERNEL);
368 if (!dev_data) { 368 if (!dev_data) {
369 err = -ENOMEM; 369 err = -ENOMEM;
370 goto out; 370 goto out;
@@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
577 } 577 }
578 578
579 if (!match) { 579 if (!match) {
580 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); 580 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
581 if (!pci_dev_id) { 581 if (!pci_dev_id) {
582 err = -ENOMEM; 582 err = -ENOMEM;
583 goto out; 583 goto out;
@@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func,
1149 } 1149 }
1150 dev = psdev->dev; 1150 dev = psdev->dev;
1151 1151
1152 field = kzalloc(sizeof(*field), GFP_ATOMIC); 1152 field = kzalloc(sizeof(*field), GFP_KERNEL);
1153 if (!field) { 1153 if (!field) {
1154 err = -ENOMEM; 1154 err = -ENOMEM;
1155 goto out; 1155 goto out;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0d6d9264d6a9..c3e201025ef0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
403{ 403{
404 struct { 404 struct {
405 struct xsd_sockmsg hdr; 405 struct xsd_sockmsg hdr;
406 const char body[16]; 406 char body[16];
407 } msg; 407 } msg;
408 int rc; 408 int rc;
409 409
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
412 msg.hdr.len = strlen(reply) + 1; 412 msg.hdr.len = strlen(reply) + 1;
413 if (msg.hdr.len > sizeof(msg.body)) 413 if (msg.hdr.len > sizeof(msg.body))
414 return -E2BIG; 414 return -E2BIG;
415 memcpy(&msg.body, reply, msg.hdr.len);
415 416
416 mutex_lock(&u->reply_mutex); 417 mutex_lock(&u->reply_mutex);
417 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); 418 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index e23be63998a8..629c74986cff 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -428,8 +428,15 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
428 } 428 }
429 write_sequnlock(&net->fs_lock); 429 write_sequnlock(&net->fs_lock);
430 430
431 if (deleted) 431 if (deleted) {
432 write_seqlock(&net->fs_addr_lock);
433 if (!hlist_unhashed(&server->addr4_link))
434 hlist_del_rcu(&server->addr4_link);
435 if (!hlist_unhashed(&server->addr6_link))
436 hlist_del_rcu(&server->addr6_link);
437 write_sequnlock(&net->fs_addr_lock);
432 afs_destroy_server(net, server); 438 afs_destroy_server(net, server);
439 }
433 } 440 }
434} 441}
435 442
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 82e8f6edfb48..b12e37f27530 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
749 749
750 autofs4_del_active(dentry); 750 autofs4_del_active(dentry);
751 751
752 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); 752 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
753 if (!inode) 753 if (!inode)
754 return -ENOMEM; 754 return -ENOMEM;
755 d_add(dentry, inode); 755 d_add(dentry, inode);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 41e04183e4ce..4ad6f669fe34 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
377 } else 377 } else
378 map_addr = vm_mmap(filep, addr, size, prot, type, off); 378 map_addr = vm_mmap(filep, addr, size, prot, type, off);
379 379
380 if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) 380 if ((type & MAP_FIXED_NOREPLACE) &&
381 pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", 381 PTR_ERR((void *)map_addr) == -EEXIST)
382 task_pid_nr(current), current->comm, 382 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
383 (void *)addr); 383 task_pid_nr(current), current->comm, (void *)addr);
384 384
385 return(map_addr); 385 return(map_addr);
386} 386}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 5474ef14d6e6..2771cc56a622 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -459,6 +459,25 @@ struct btrfs_block_rsv {
459 unsigned short full; 459 unsigned short full;
460 unsigned short type; 460 unsigned short type;
461 unsigned short failfast; 461 unsigned short failfast;
462
463 /*
464 * Qgroup equivalent for @size @reserved
465 *
466 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
467 * about things like csum size nor how many tree blocks it will need to
468 * reserve.
469 *
470 * Qgroup cares more about net change of the extent usage.
471 *
472 * So for one newly inserted file extent, in worst case it will cause
473 * leaf split and level increase, nodesize for each file extent is
474 * already too much.
475 *
476 * In short, qgroup_size/reserved is the upper limit of possible needed
477 * qgroup metadata reservation.
478 */
479 u64 qgroup_rsv_size;
480 u64 qgroup_rsv_reserved;
462}; 481};
463 482
464/* 483/*
@@ -714,6 +733,12 @@ struct btrfs_delayed_root;
714 */ 733 */
715#define BTRFS_FS_EXCL_OP 16 734#define BTRFS_FS_EXCL_OP 16
716 735
736/*
737 * To info transaction_kthread we need an immediate commit so it doesn't
738 * need to wait for commit_interval
739 */
740#define BTRFS_FS_NEED_ASYNC_COMMIT 17
741
717struct btrfs_fs_info { 742struct btrfs_fs_info {
718 u8 fsid[BTRFS_FSID_SIZE]; 743 u8 fsid[BTRFS_FSID_SIZE];
719 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 744 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 06ec8ab6d9ba..a8d492dbd3e7 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
556 dst_rsv = &fs_info->delayed_block_rsv; 556 dst_rsv = &fs_info->delayed_block_rsv;
557 557
558 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 558 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
559
560 /*
561 * Here we migrate space rsv from transaction rsv, since have already
562 * reserved space when starting a transaction. So no need to reserve
563 * qgroup space here.
564 */
559 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); 565 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
560 if (!ret) { 566 if (!ret) {
561 trace_btrfs_space_reservation(fs_info, "delayed_item", 567 trace_btrfs_space_reservation(fs_info, "delayed_item",
@@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
577 return; 583 return;
578 584
579 rsv = &fs_info->delayed_block_rsv; 585 rsv = &fs_info->delayed_block_rsv;
580 btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved); 586 /*
587 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
588 * to release/reserve qgroup space.
589 */
581 trace_btrfs_space_reservation(fs_info, "delayed_item", 590 trace_btrfs_space_reservation(fs_info, "delayed_item",
582 item->key.objectid, item->bytes_reserved, 591 item->key.objectid, item->bytes_reserved,
583 0); 592 0);
@@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata(
602 611
603 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 612 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
604 613
605 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
606 if (ret < 0)
607 return ret;
608 /* 614 /*
609 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 615 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
610 * which doesn't reserve space for speed. This is a problem since we 616 * which doesn't reserve space for speed. This is a problem since we
@@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata(
616 */ 622 */
617 if (!src_rsv || (!trans->bytes_reserved && 623 if (!src_rsv || (!trans->bytes_reserved &&
618 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { 624 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
625 ret = btrfs_qgroup_reserve_meta_prealloc(root,
626 fs_info->nodesize, true);
627 if (ret < 0)
628 return ret;
619 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, 629 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
620 BTRFS_RESERVE_NO_FLUSH); 630 BTRFS_RESERVE_NO_FLUSH);
621 /* 631 /*
@@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata(
634 "delayed_inode", 644 "delayed_inode",
635 btrfs_ino(inode), 645 btrfs_ino(inode),
636 num_bytes, 1); 646 num_bytes, 1);
647 } else {
648 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
637 } 649 }
638 return ret; 650 return ret;
639 } 651 }
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 9e98295de7ce..e1b0651686f7 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
540 struct btrfs_delayed_ref_head *head_ref, 540 struct btrfs_delayed_ref_head *head_ref,
541 struct btrfs_qgroup_extent_record *qrecord, 541 struct btrfs_qgroup_extent_record *qrecord,
542 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, 542 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
543 int action, int is_data, int *qrecord_inserted_ret, 543 int action, int is_data, int is_system,
544 int *qrecord_inserted_ret,
544 int *old_ref_mod, int *new_ref_mod) 545 int *old_ref_mod, int *new_ref_mod)
546
545{ 547{
546 struct btrfs_delayed_ref_head *existing; 548 struct btrfs_delayed_ref_head *existing;
547 struct btrfs_delayed_ref_root *delayed_refs; 549 struct btrfs_delayed_ref_root *delayed_refs;
@@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
585 head_ref->ref_mod = count_mod; 587 head_ref->ref_mod = count_mod;
586 head_ref->must_insert_reserved = must_insert_reserved; 588 head_ref->must_insert_reserved = must_insert_reserved;
587 head_ref->is_data = is_data; 589 head_ref->is_data = is_data;
590 head_ref->is_system = is_system;
588 head_ref->ref_tree = RB_ROOT; 591 head_ref->ref_tree = RB_ROOT;
589 INIT_LIST_HEAD(&head_ref->ref_add_list); 592 INIT_LIST_HEAD(&head_ref->ref_add_list);
590 RB_CLEAR_NODE(&head_ref->href_node); 593 RB_CLEAR_NODE(&head_ref->href_node);
@@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
772 struct btrfs_delayed_ref_root *delayed_refs; 775 struct btrfs_delayed_ref_root *delayed_refs;
773 struct btrfs_qgroup_extent_record *record = NULL; 776 struct btrfs_qgroup_extent_record *record = NULL;
774 int qrecord_inserted; 777 int qrecord_inserted;
778 int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
775 779
776 BUG_ON(extent_op && extent_op->is_data); 780 BUG_ON(extent_op && extent_op->is_data);
777 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); 781 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
@@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
800 */ 804 */
801 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, 805 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
802 bytenr, num_bytes, 0, 0, action, 0, 806 bytenr, num_bytes, 0, 0, action, 0,
803 &qrecord_inserted, old_ref_mod, 807 is_system, &qrecord_inserted,
804 new_ref_mod); 808 old_ref_mod, new_ref_mod);
805 809
806 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, 810 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
807 num_bytes, parent, ref_root, level, action); 811 num_bytes, parent, ref_root, level, action);
@@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
868 */ 872 */
869 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, 873 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
870 bytenr, num_bytes, ref_root, reserved, 874 bytenr, num_bytes, ref_root, reserved,
871 action, 1, &qrecord_inserted, 875 action, 1, 0, &qrecord_inserted,
872 old_ref_mod, new_ref_mod); 876 old_ref_mod, new_ref_mod);
873 877
874 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, 878 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
898 delayed_refs = &trans->transaction->delayed_refs; 902 delayed_refs = &trans->transaction->delayed_refs;
899 spin_lock(&delayed_refs->lock); 903 spin_lock(&delayed_refs->lock);
900 904
905 /*
906 * extent_ops just modify the flags of an extent and they don't result
907 * in ref count changes, hence it's safe to pass false/0 for is_system
908 * argument
909 */
901 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, 910 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
902 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, 911 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
903 extent_op->is_data, NULL, NULL, NULL); 912 extent_op->is_data, 0, NULL, NULL, NULL);
904 913
905 spin_unlock(&delayed_refs->lock); 914 spin_unlock(&delayed_refs->lock);
906 return 0; 915 return 0;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 741869dbc316..7f00db50bd24 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head {
127 */ 127 */
128 unsigned int must_insert_reserved:1; 128 unsigned int must_insert_reserved:1;
129 unsigned int is_data:1; 129 unsigned int is_data:1;
130 unsigned int is_system:1;
130 unsigned int processing:1; 131 unsigned int processing:1;
131}; 132};
132 133
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4ac8b1d21baf..60caa68c3618 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg)
1824 1824
1825 now = get_seconds(); 1825 now = get_seconds();
1826 if (cur->state < TRANS_STATE_BLOCKED && 1826 if (cur->state < TRANS_STATE_BLOCKED &&
1827 !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1827 (now < cur->start_time || 1828 (now < cur->start_time ||
1828 now - cur->start_time < fs_info->commit_interval)) { 1829 now - cur->start_time < fs_info->commit_interval)) {
1829 spin_unlock(&fs_info->trans_lock); 1830 spin_unlock(&fs_info->trans_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 75cfb80d2551..e2f16b68fcbf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2601,13 +2601,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2601 trace_run_delayed_ref_head(fs_info, head, 0); 2601 trace_run_delayed_ref_head(fs_info, head, 0);
2602 2602
2603 if (head->total_ref_mod < 0) { 2603 if (head->total_ref_mod < 0) {
2604 struct btrfs_block_group_cache *cache; 2604 struct btrfs_space_info *space_info;
2605 u64 flags;
2605 2606
2606 cache = btrfs_lookup_block_group(fs_info, head->bytenr); 2607 if (head->is_data)
2607 ASSERT(cache); 2608 flags = BTRFS_BLOCK_GROUP_DATA;
2608 percpu_counter_add(&cache->space_info->total_bytes_pinned, 2609 else if (head->is_system)
2610 flags = BTRFS_BLOCK_GROUP_SYSTEM;
2611 else
2612 flags = BTRFS_BLOCK_GROUP_METADATA;
2613 space_info = __find_space_info(fs_info, flags);
2614 ASSERT(space_info);
2615 percpu_counter_add(&space_info->total_bytes_pinned,
2609 -head->num_bytes); 2616 -head->num_bytes);
2610 btrfs_put_block_group(cache);
2611 2617
2612 if (head->is_data) { 2618 if (head->is_data) {
2613 spin_lock(&delayed_refs->lock); 2619 spin_lock(&delayed_refs->lock);
@@ -5559,14 +5565,18 @@ again:
5559 5565
5560static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, 5566static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5561 struct btrfs_block_rsv *block_rsv, 5567 struct btrfs_block_rsv *block_rsv,
5562 struct btrfs_block_rsv *dest, u64 num_bytes) 5568 struct btrfs_block_rsv *dest, u64 num_bytes,
5569 u64 *qgroup_to_release_ret)
5563{ 5570{
5564 struct btrfs_space_info *space_info = block_rsv->space_info; 5571 struct btrfs_space_info *space_info = block_rsv->space_info;
5572 u64 qgroup_to_release = 0;
5565 u64 ret; 5573 u64 ret;
5566 5574
5567 spin_lock(&block_rsv->lock); 5575 spin_lock(&block_rsv->lock);
5568 if (num_bytes == (u64)-1) 5576 if (num_bytes == (u64)-1) {
5569 num_bytes = block_rsv->size; 5577 num_bytes = block_rsv->size;
5578 qgroup_to_release = block_rsv->qgroup_rsv_size;
5579 }
5570 block_rsv->size -= num_bytes; 5580 block_rsv->size -= num_bytes;
5571 if (block_rsv->reserved >= block_rsv->size) { 5581 if (block_rsv->reserved >= block_rsv->size) {
5572 num_bytes = block_rsv->reserved - block_rsv->size; 5582 num_bytes = block_rsv->reserved - block_rsv->size;
@@ -5575,6 +5585,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5575 } else { 5585 } else {
5576 num_bytes = 0; 5586 num_bytes = 0;
5577 } 5587 }
5588 if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
5589 qgroup_to_release = block_rsv->qgroup_rsv_reserved -
5590 block_rsv->qgroup_rsv_size;
5591 block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
5592 } else {
5593 qgroup_to_release = 0;
5594 }
5578 spin_unlock(&block_rsv->lock); 5595 spin_unlock(&block_rsv->lock);
5579 5596
5580 ret = num_bytes; 5597 ret = num_bytes;
@@ -5597,6 +5614,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5597 space_info_add_old_bytes(fs_info, space_info, 5614 space_info_add_old_bytes(fs_info, space_info,
5598 num_bytes); 5615 num_bytes);
5599 } 5616 }
5617 if (qgroup_to_release_ret)
5618 *qgroup_to_release_ret = qgroup_to_release;
5600 return ret; 5619 return ret;
5601} 5620}
5602 5621
@@ -5738,17 +5757,21 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
5738 struct btrfs_root *root = inode->root; 5757 struct btrfs_root *root = inode->root;
5739 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 5758 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5740 u64 num_bytes = 0; 5759 u64 num_bytes = 0;
5760 u64 qgroup_num_bytes = 0;
5741 int ret = -ENOSPC; 5761 int ret = -ENOSPC;
5742 5762
5743 spin_lock(&block_rsv->lock); 5763 spin_lock(&block_rsv->lock);
5744 if (block_rsv->reserved < block_rsv->size) 5764 if (block_rsv->reserved < block_rsv->size)
5745 num_bytes = block_rsv->size - block_rsv->reserved; 5765 num_bytes = block_rsv->size - block_rsv->reserved;
5766 if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
5767 qgroup_num_bytes = block_rsv->qgroup_rsv_size -
5768 block_rsv->qgroup_rsv_reserved;
5746 spin_unlock(&block_rsv->lock); 5769 spin_unlock(&block_rsv->lock);
5747 5770
5748 if (num_bytes == 0) 5771 if (num_bytes == 0)
5749 return 0; 5772 return 0;
5750 5773
5751 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); 5774 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true);
5752 if (ret) 5775 if (ret)
5753 return ret; 5776 return ret;
5754 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); 5777 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
@@ -5756,7 +5779,13 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
5756 block_rsv_add_bytes(block_rsv, num_bytes, 0); 5779 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5757 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5780 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5758 btrfs_ino(inode), num_bytes, 1); 5781 btrfs_ino(inode), num_bytes, 1);
5759 } 5782
5783 /* Don't forget to increase qgroup_rsv_reserved */
5784 spin_lock(&block_rsv->lock);
5785 block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
5786 spin_unlock(&block_rsv->lock);
5787 } else
5788 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
5760 return ret; 5789 return ret;
5761} 5790}
5762 5791
@@ -5777,20 +5806,23 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
5777 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5806 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5778 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 5807 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5779 u64 released = 0; 5808 u64 released = 0;
5809 u64 qgroup_to_release = 0;
5780 5810
5781 /* 5811 /*
5782 * Since we statically set the block_rsv->size we just want to say we 5812 * Since we statically set the block_rsv->size we just want to say we
5783 * are releasing 0 bytes, and then we'll just get the reservation over 5813 * are releasing 0 bytes, and then we'll just get the reservation over
5784 * the size free'd. 5814 * the size free'd.
5785 */ 5815 */
5786 released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0); 5816 released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0,
5817 &qgroup_to_release);
5787 if (released > 0) 5818 if (released > 0)
5788 trace_btrfs_space_reservation(fs_info, "delalloc", 5819 trace_btrfs_space_reservation(fs_info, "delalloc",
5789 btrfs_ino(inode), released, 0); 5820 btrfs_ino(inode), released, 0);
5790 if (qgroup_free) 5821 if (qgroup_free)
5791 btrfs_qgroup_free_meta_prealloc(inode->root, released); 5822 btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release);
5792 else 5823 else
5793 btrfs_qgroup_convert_reserved_meta(inode->root, released); 5824 btrfs_qgroup_convert_reserved_meta(inode->root,
5825 qgroup_to_release);
5794} 5826}
5795 5827
5796void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, 5828void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
@@ -5802,7 +5834,7 @@ void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5802 if (global_rsv == block_rsv || 5834 if (global_rsv == block_rsv ||
5803 block_rsv->space_info != global_rsv->space_info) 5835 block_rsv->space_info != global_rsv->space_info)
5804 global_rsv = NULL; 5836 global_rsv = NULL;
5805 block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); 5837 block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
5806} 5838}
5807 5839
5808static void update_global_block_rsv(struct btrfs_fs_info *fs_info) 5840static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -5882,7 +5914,7 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5882static void release_global_block_rsv(struct btrfs_fs_info *fs_info) 5914static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5883{ 5915{
5884 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, 5916 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5885 (u64)-1); 5917 (u64)-1, NULL);
5886 WARN_ON(fs_info->trans_block_rsv.size > 0); 5918 WARN_ON(fs_info->trans_block_rsv.size > 0);
5887 WARN_ON(fs_info->trans_block_rsv.reserved > 0); 5919 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5888 WARN_ON(fs_info->chunk_block_rsv.size > 0); 5920 WARN_ON(fs_info->chunk_block_rsv.size > 0);
@@ -5906,7 +5938,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5906 WARN_ON_ONCE(!list_empty(&trans->new_bgs)); 5938 WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5907 5939
5908 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, 5940 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5909 trans->chunk_bytes_reserved); 5941 trans->chunk_bytes_reserved, NULL);
5910 trans->chunk_bytes_reserved = 0; 5942 trans->chunk_bytes_reserved = 0;
5911} 5943}
5912 5944
@@ -6011,6 +6043,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
6011{ 6043{
6012 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 6044 struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
6013 u64 reserve_size = 0; 6045 u64 reserve_size = 0;
6046 u64 qgroup_rsv_size = 0;
6014 u64 csum_leaves; 6047 u64 csum_leaves;
6015 unsigned outstanding_extents; 6048 unsigned outstanding_extents;
6016 6049
@@ -6023,9 +6056,17 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
6023 inode->csum_bytes); 6056 inode->csum_bytes);
6024 reserve_size += btrfs_calc_trans_metadata_size(fs_info, 6057 reserve_size += btrfs_calc_trans_metadata_size(fs_info,
6025 csum_leaves); 6058 csum_leaves);
6059 /*
6060 * For qgroup rsv, the calculation is very simple:
6061 * account one nodesize for each outstanding extent
6062 *
6063 * This is overestimating in most cases.
6064 */
6065 qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
6026 6066
6027 spin_lock(&block_rsv->lock); 6067 spin_lock(&block_rsv->lock);
6028 block_rsv->size = reserve_size; 6068 block_rsv->size = reserve_size;
6069 block_rsv->qgroup_rsv_size = qgroup_rsv_size;
6029 spin_unlock(&block_rsv->lock); 6070 spin_unlock(&block_rsv->lock);
6030} 6071}
6031 6072
@@ -8403,7 +8444,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8403 struct btrfs_block_rsv *block_rsv, u32 blocksize) 8444 struct btrfs_block_rsv *block_rsv, u32 blocksize)
8404{ 8445{
8405 block_rsv_add_bytes(block_rsv, blocksize, 0); 8446 block_rsv_add_bytes(block_rsv, blocksize, 0);
8406 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); 8447 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
8407} 8448}
8408 8449
8409/* 8450/*
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 0167a9c97c9c..f660ba1e5e58 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1748,7 +1748,7 @@ again:
1748 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1748 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1749 lockstart, lockend, &cached_state); 1749 lockstart, lockend, &cached_state);
1750 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, 1750 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
1751 (ret != 0)); 1751 true);
1752 if (ret) { 1752 if (ret) {
1753 btrfs_drop_pages(pages, num_pages); 1753 btrfs_drop_pages(pages, num_pages);
1754 break; 1754 break;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e064c49c9a9a..d241285a0d2a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -31,6 +31,7 @@
31#include <linux/uio.h> 31#include <linux/uio.h>
32#include <linux/magic.h> 32#include <linux/magic.h>
33#include <linux/iversion.h> 33#include <linux/iversion.h>
34#include <asm/unaligned.h>
34#include "ctree.h" 35#include "ctree.h"
35#include "disk-io.h" 36#include "disk-io.h"
36#include "transaction.h" 37#include "transaction.h"
@@ -5905,11 +5906,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5905 struct dir_entry *entry = addr; 5906 struct dir_entry *entry = addr;
5906 char *name = (char *)(entry + 1); 5907 char *name = (char *)(entry + 1);
5907 5908
5908 ctx->pos = entry->offset; 5909 ctx->pos = get_unaligned(&entry->offset);
5909 if (!dir_emit(ctx, name, entry->name_len, entry->ino, 5910 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5910 entry->type)) 5911 get_unaligned(&entry->ino),
5912 get_unaligned(&entry->type)))
5911 return 1; 5913 return 1;
5912 addr += sizeof(struct dir_entry) + entry->name_len; 5914 addr += sizeof(struct dir_entry) +
5915 get_unaligned(&entry->name_len);
5913 ctx->pos++; 5916 ctx->pos++;
5914 } 5917 }
5915 return 0; 5918 return 0;
@@ -5999,14 +6002,15 @@ again:
5999 } 6002 }
6000 6003
6001 entry = addr; 6004 entry = addr;
6002 entry->name_len = name_len; 6005 put_unaligned(name_len, &entry->name_len);
6003 name_ptr = (char *)(entry + 1); 6006 name_ptr = (char *)(entry + 1);
6004 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), 6007 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6005 name_len); 6008 name_len);
6006 entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 6009 put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
6010 &entry->type);
6007 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6011 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6008 entry->ino = location.objectid; 6012 put_unaligned(location.objectid, &entry->ino);
6009 entry->offset = found_key.offset; 6013 put_unaligned(found_key.offset, &entry->offset);
6010 entries++; 6014 entries++;
6011 addr += sizeof(struct dir_entry) + name_len; 6015 addr += sizeof(struct dir_entry) + name_len;
6012 total_len += sizeof(struct dir_entry) + name_len; 6016 total_len += sizeof(struct dir_entry) + name_len;
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 124276bba8cf..21a831d3d087 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -189,9 +189,10 @@ void btrfs_print_leaf(struct extent_buffer *l)
189 fs_info = l->fs_info; 189 fs_info = l->fs_info;
190 nr = btrfs_header_nritems(l); 190 nr = btrfs_header_nritems(l);
191 191
192 btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d", 192 btrfs_info(fs_info,
193 btrfs_header_bytenr(l), nr, 193 "leaf %llu gen %llu total ptrs %d free space %d owner %llu",
194 btrfs_leaf_free_space(fs_info, l)); 194 btrfs_header_bytenr(l), btrfs_header_generation(l), nr,
195 btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l));
195 for (i = 0 ; i < nr ; i++) { 196 for (i = 0 ; i < nr ; i++) {
196 item = btrfs_item_nr(i); 197 item = btrfs_item_nr(i);
197 btrfs_item_key_to_cpu(l, &key, i); 198 btrfs_item_key_to_cpu(l, &key, i);
@@ -325,7 +326,7 @@ void btrfs_print_leaf(struct extent_buffer *l)
325 } 326 }
326} 327}
327 328
328void btrfs_print_tree(struct extent_buffer *c) 329void btrfs_print_tree(struct extent_buffer *c, bool follow)
329{ 330{
330 struct btrfs_fs_info *fs_info; 331 struct btrfs_fs_info *fs_info;
331 int i; u32 nr; 332 int i; u32 nr;
@@ -342,15 +343,19 @@ void btrfs_print_tree(struct extent_buffer *c)
342 return; 343 return;
343 } 344 }
344 btrfs_info(fs_info, 345 btrfs_info(fs_info,
345 "node %llu level %d total ptrs %d free spc %u", 346 "node %llu level %d gen %llu total ptrs %d free spc %u owner %llu",
346 btrfs_header_bytenr(c), level, nr, 347 btrfs_header_bytenr(c), level, btrfs_header_generation(c),
347 (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr); 348 nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr,
349 btrfs_header_owner(c));
348 for (i = 0; i < nr; i++) { 350 for (i = 0; i < nr; i++) {
349 btrfs_node_key_to_cpu(c, &key, i); 351 btrfs_node_key_to_cpu(c, &key, i);
350 pr_info("\tkey %d (%llu %u %llu) block %llu\n", 352 pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n",
351 i, key.objectid, key.type, key.offset, 353 i, key.objectid, key.type, key.offset,
352 btrfs_node_blockptr(c, i)); 354 btrfs_node_blockptr(c, i),
355 btrfs_node_ptr_generation(c, i));
353 } 356 }
357 if (!follow)
358 return;
354 for (i = 0; i < nr; i++) { 359 for (i = 0; i < nr; i++) {
355 struct btrfs_key first_key; 360 struct btrfs_key first_key;
356 struct extent_buffer *next; 361 struct extent_buffer *next;
@@ -372,7 +377,7 @@ void btrfs_print_tree(struct extent_buffer *c)
372 if (btrfs_header_level(next) != 377 if (btrfs_header_level(next) !=
373 level - 1) 378 level - 1)
374 BUG(); 379 BUG();
375 btrfs_print_tree(next); 380 btrfs_print_tree(next, follow);
376 free_extent_buffer(next); 381 free_extent_buffer(next);
377 } 382 }
378} 383}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 4a98481688f4..e6bb38fd75ad 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -7,6 +7,6 @@
7#define BTRFS_PRINT_TREE_H 7#define BTRFS_PRINT_TREE_H
8 8
9void btrfs_print_leaf(struct extent_buffer *l); 9void btrfs_print_leaf(struct extent_buffer *l);
10void btrfs_print_tree(struct extent_buffer *c); 10void btrfs_print_tree(struct extent_buffer *c, bool follow);
11 11
12#endif 12#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 09c7e4fd550f..9fb758d5077a 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -11,6 +11,7 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/btrfs.h> 13#include <linux/btrfs.h>
14#include <linux/sizes.h>
14 15
15#include "ctree.h" 16#include "ctree.h"
16#include "transaction.h" 17#include "transaction.h"
@@ -2375,8 +2376,21 @@ out:
2375 return ret; 2376 return ret;
2376} 2377}
2377 2378
2378static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 2379/*
2380 * Two limits to commit transaction in advance.
2381 *
2382 * For RATIO, it will be 1/RATIO of the remaining limit
2383 * (excluding data and prealloc meta) as threshold.
2384 * For SIZE, it will be in byte unit as threshold.
2385 */
2386#define QGROUP_PERTRANS_RATIO 32
2387#define QGROUP_PERTRANS_SIZE SZ_32M
2388static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
2389 const struct btrfs_qgroup *qg, u64 num_bytes)
2379{ 2390{
2391 u64 limit;
2392 u64 threshold;
2393
2380 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2394 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2381 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 2395 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2382 return false; 2396 return false;
@@ -2385,6 +2399,31 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2385 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 2399 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2386 return false; 2400 return false;
2387 2401
2402 /*
2403 * Even if we passed the check, it's better to check if reservation
2404 * for meta_pertrans is pushing us near limit.
2405 * If there is too much pertrans reservation or it's near the limit,
2406 * let's try commit transaction to free some, using transaction_kthread
2407 */
2408 if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
2409 BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
2410 if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
2411 limit = qg->max_excl;
2412 else
2413 limit = qg->max_rfer;
2414 threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
2415 qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
2416 QGROUP_PERTRANS_RATIO;
2417 threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
2418
2419 /*
2420 * Use transaction_kthread to commit transaction, so we no
2421 * longer need to bother nested transaction nor lock context.
2422 */
2423 if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
2424 btrfs_commit_transaction_locksafe(fs_info);
2425 }
2426
2388 return true; 2427 return true;
2389} 2428}
2390 2429
@@ -2434,7 +2473,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2434 2473
2435 qg = unode_aux_to_qgroup(unode); 2474 qg = unode_aux_to_qgroup(unode);
2436 2475
2437 if (enforce && !qgroup_check_limits(qg, num_bytes)) { 2476 if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
2438 ret = -EDQUOT; 2477 ret = -EDQUOT;
2439 goto out; 2478 goto out;
2440 } 2479 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 63fdcab64b01..c944b4769e3c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2267,6 +2267,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2267 */ 2267 */
2268 cur_trans->state = TRANS_STATE_COMPLETED; 2268 cur_trans->state = TRANS_STATE_COMPLETED;
2269 wake_up(&cur_trans->commit_wait); 2269 wake_up(&cur_trans->commit_wait);
2270 clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
2270 2271
2271 spin_lock(&fs_info->trans_lock); 2272 spin_lock(&fs_info->trans_lock);
2272 list_del_init(&cur_trans->list); 2273 list_del_init(&cur_trans->list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index c88fccd80bc5..d8c0826bc2c7 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -199,6 +199,20 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
199int btrfs_commit_transaction(struct btrfs_trans_handle *trans); 199int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
200int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 200int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
201 int wait_for_unblock); 201 int wait_for_unblock);
202
203/*
204 * Try to commit transaction asynchronously, so this is safe to call
205 * even holding a spinlock.
206 *
207 * It's done by informing transaction_kthread to commit transaction without
208 * waiting for commit interval.
209 */
210static inline void btrfs_commit_transaction_locksafe(
211 struct btrfs_fs_info *fs_info)
212{
213 set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
214 wake_up_process(fs_info->transaction_kthread);
215}
202int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); 216int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
203int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); 217int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
204void btrfs_throttle(struct btrfs_fs_info *fs_info); 218void btrfs_throttle(struct btrfs_fs_info *fs_info);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8bf60250309e..ae056927080d 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -669,13 +669,15 @@ void ceph_fill_file_time(struct inode *inode, int issued,
669 CEPH_CAP_FILE_BUFFER| 669 CEPH_CAP_FILE_BUFFER|
670 CEPH_CAP_AUTH_EXCL| 670 CEPH_CAP_AUTH_EXCL|
671 CEPH_CAP_XATTR_EXCL)) { 671 CEPH_CAP_XATTR_EXCL)) {
672 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 672 if (ci->i_version == 0 ||
673 timespec_compare(ctime, &inode->i_ctime) > 0) {
673 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 674 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 675 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
675 ctime->tv_sec, ctime->tv_nsec); 676 ctime->tv_sec, ctime->tv_nsec);
676 inode->i_ctime = *ctime; 677 inode->i_ctime = *ctime;
677 } 678 }
678 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 679 if (ci->i_version == 0 ||
680 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
679 /* the MDS did a utimes() */ 681 /* the MDS did a utimes() */
680 dout("mtime %ld.%09ld -> %ld.%09ld " 682 dout("mtime %ld.%09ld -> %ld.%09ld "
681 "tw %d -> %d\n", 683 "tw %d -> %d\n",
@@ -795,7 +797,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
795 new_issued = ~issued & le32_to_cpu(info->cap.caps); 797 new_issued = ~issued & le32_to_cpu(info->cap.caps);
796 798
797 /* update inode */ 799 /* update inode */
798 ci->i_version = le64_to_cpu(info->version);
799 inode->i_rdev = le32_to_cpu(info->rdev); 800 inode->i_rdev = le32_to_cpu(info->rdev);
800 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 801 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
801 802
@@ -868,6 +869,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
868 xattr_blob = NULL; 869 xattr_blob = NULL;
869 } 870 }
870 871
872 /* finally update i_version */
873 ci->i_version = le64_to_cpu(info->version);
874
871 inode->i_mapping->a_ops = &ceph_aops; 875 inode->i_mapping->a_ops = &ceph_aops;
872 876
873 switch (inode->i_mode & S_IFMT) { 877 switch (inode->i_mode & S_IFMT) {
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 7e72348639e4..315f7e63e7cc 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -228,7 +228,15 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
228 228
229static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 229static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
230{ 230{
231 return (ci->i_max_files || ci->i_max_bytes); 231 bool ret = false;
232 spin_lock(&ci->i_ceph_lock);
233 if ((ci->i_max_files || ci->i_max_bytes) &&
234 ci->i_vino.snap == CEPH_NOSNAP &&
235 ci->i_snap_realm &&
236 ci->i_snap_realm->ino == ci->i_vino.ino)
237 ret = true;
238 spin_unlock(&ci->i_ceph_lock);
239 return ret;
232} 240}
233 241
234static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, 242static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
@@ -1008,14 +1016,19 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1008 char *newval = NULL; 1016 char *newval = NULL;
1009 struct ceph_inode_xattr *xattr = NULL; 1017 struct ceph_inode_xattr *xattr = NULL;
1010 int required_blob_size; 1018 int required_blob_size;
1019 bool check_realm = false;
1011 bool lock_snap_rwsem = false; 1020 bool lock_snap_rwsem = false;
1012 1021
1013 if (ceph_snap(inode) != CEPH_NOSNAP) 1022 if (ceph_snap(inode) != CEPH_NOSNAP)
1014 return -EROFS; 1023 return -EROFS;
1015 1024
1016 vxattr = ceph_match_vxattr(inode, name); 1025 vxattr = ceph_match_vxattr(inode, name);
1017 if (vxattr && vxattr->readonly) 1026 if (vxattr) {
1018 return -EOPNOTSUPP; 1027 if (vxattr->readonly)
1028 return -EOPNOTSUPP;
1029 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1030 check_realm = true;
1031 }
1019 1032
1020 /* pass any unhandled ceph.* xattrs through to the MDS */ 1033 /* pass any unhandled ceph.* xattrs through to the MDS */
1021 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 1034 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
@@ -1109,6 +1122,15 @@ do_sync_unlocked:
1109 err = -EBUSY; 1122 err = -EBUSY;
1110 } else { 1123 } else {
1111 err = ceph_sync_setxattr(inode, name, value, size, flags); 1124 err = ceph_sync_setxattr(inode, name, value, size, flags);
1125 if (err >= 0 && check_realm) {
1126 /* check if snaprealm was created for quota inode */
1127 spin_lock(&ci->i_ceph_lock);
1128 if ((ci->i_max_files || ci->i_max_bytes) &&
1129 !(ci->i_snap_realm &&
1130 ci->i_snap_realm->ino == ci->i_vino.ino))
1131 err = -EOPNOTSUPP;
1132 spin_unlock(&ci->i_ceph_lock);
1133 }
1112 } 1134 }
1113out: 1135out:
1114 ceph_free_cap_flush(prealloc_cf); 1136 ceph_free_cap_flush(prealloc_cf);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index fe5567655662..0e74690d11bc 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -54,7 +54,7 @@ do { \
54 pr_debug_ ## ratefunc("%s: " \ 54 pr_debug_ ## ratefunc("%s: " \
55 fmt, __FILE__, ##__VA_ARGS__); \ 55 fmt, __FILE__, ##__VA_ARGS__); \
56 } else if ((type) & VFS) { \ 56 } else if ((type) & VFS) { \
57 pr_err_ ## ratefunc("CuIFS VFS: " \ 57 pr_err_ ## ratefunc("CIFS VFS: " \
58 fmt, ##__VA_ARGS__); \ 58 fmt, ##__VA_ARGS__); \
59 } else if ((type) & NOISY && (NOISY != 0)) { \ 59 } else if ((type) & NOISY && (NOISY != 0)) { \
60 pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \ 60 pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6d3e40d7029c..1529a088383d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -455,6 +455,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
455 server->sign = true; 455 server->sign = true;
456 } 456 }
457 457
458 if (cifs_rdma_enabled(server) && server->sign)
459 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
460
458 return 0; 461 return 0;
459} 462}
460 463
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e8830f076a7f..a5aa158d535a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2959,6 +2959,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
2959 } 2959 }
2960 } 2960 }
2961 2961
2962 if (volume_info->seal) {
2963 if (ses->server->vals->protocol_id == 0) {
2964 cifs_dbg(VFS,
2965 "SMB3 or later required for encryption\n");
2966 rc = -EOPNOTSUPP;
2967 goto out_fail;
2968 } else if (tcon->ses->server->capabilities &
2969 SMB2_GLOBAL_CAP_ENCRYPTION)
2970 tcon->seal = true;
2971 else {
2972 cifs_dbg(VFS, "Encryption is not supported on share\n");
2973 rc = -EOPNOTSUPP;
2974 goto out_fail;
2975 }
2976 }
2977
2962 /* 2978 /*
2963 * BB Do we need to wrap session_mutex around this TCon call and Unix 2979 * BB Do we need to wrap session_mutex around this TCon call and Unix
2964 * SetFS as we do on SessSetup and reconnect? 2980 * SetFS as we do on SessSetup and reconnect?
@@ -3007,22 +3023,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
3007 tcon->use_resilient = true; 3023 tcon->use_resilient = true;
3008 } 3024 }
3009 3025
3010 if (volume_info->seal) {
3011 if (ses->server->vals->protocol_id == 0) {
3012 cifs_dbg(VFS,
3013 "SMB3 or later required for encryption\n");
3014 rc = -EOPNOTSUPP;
3015 goto out_fail;
3016 } else if (tcon->ses->server->capabilities &
3017 SMB2_GLOBAL_CAP_ENCRYPTION)
3018 tcon->seal = true;
3019 else {
3020 cifs_dbg(VFS, "Encryption is not supported on share\n");
3021 rc = -EOPNOTSUPP;
3022 goto out_fail;
3023 }
3024 }
3025
3026 /* 3026 /*
3027 * We can have only one retry value for a connection to a share so for 3027 * We can have only one retry value for a connection to a share so for
3028 * resources mounted more than once to the same server share the last 3028 * resources mounted more than once to the same server share the last
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 81ba6e0d88d8..925844343038 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
684 goto mknod_out; 684 goto mknod_out;
685 } 685 }
686 686
687 if (!S_ISCHR(mode) && !S_ISBLK(mode))
688 goto mknod_out;
689
687 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 690 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
688 goto mknod_out; 691 goto mknod_out;
689 692
@@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
692 695
693 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 696 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
694 if (buf == NULL) { 697 if (buf == NULL) {
695 kfree(full_path);
696 rc = -ENOMEM; 698 rc = -ENOMEM;
697 free_xid(xid); 699 goto mknod_out;
698 return rc;
699 } 700 }
700 701
701 if (backup_cred(cifs_sb)) 702 if (backup_cred(cifs_sb))
@@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
742 pdev->minor = cpu_to_le64(MINOR(device_number)); 743 pdev->minor = cpu_to_le64(MINOR(device_number));
743 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 744 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
744 &bytes_written, iov, 1); 745 &bytes_written, iov, 1);
745 } /* else if (S_ISFIFO) */ 746 }
746 tcon->ses->server->ops->close(xid, tcon, &fid); 747 tcon->ses->server->ops->close(xid, tcon, &fid);
747 d_drop(direntry); 748 d_drop(direntry);
748 749
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4bcd4e838b47..23fd430fe74a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3462,7 +3462,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3462 * If the page is mmap'ed into a process' page tables, then we need to make 3462 * If the page is mmap'ed into a process' page tables, then we need to make
3463 * sure that it doesn't change while being written back. 3463 * sure that it doesn't change while being written back.
3464 */ 3464 */
3465static int 3465static vm_fault_t
3466cifs_page_mkwrite(struct vm_fault *vmf) 3466cifs_page_mkwrite(struct vm_fault *vmf)
3467{ 3467{
3468 struct page *page = vmf->page; 3468 struct page *page = vmf->page;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index b4ae932ea134..b76b85881dcc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
252 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 252 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
253 wsize = min_t(unsigned int, wsize, server->max_write); 253 wsize = min_t(unsigned int, wsize, server->max_write);
254#ifdef CONFIG_CIFS_SMB_DIRECT 254#ifdef CONFIG_CIFS_SMB_DIRECT
255 if (server->rdma) 255 if (server->rdma) {
256 wsize = min_t(unsigned int, 256 if (server->sign)
257 wsize = min_t(unsigned int,
258 wsize, server->smbd_conn->max_fragmented_send_size);
259 else
260 wsize = min_t(unsigned int,
257 wsize, server->smbd_conn->max_readwrite_size); 261 wsize, server->smbd_conn->max_readwrite_size);
262 }
258#endif 263#endif
259 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 264 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
260 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 265 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
272 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 277 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
273 rsize = min_t(unsigned int, rsize, server->max_read); 278 rsize = min_t(unsigned int, rsize, server->max_read);
274#ifdef CONFIG_CIFS_SMB_DIRECT 279#ifdef CONFIG_CIFS_SMB_DIRECT
275 if (server->rdma) 280 if (server->rdma) {
276 rsize = min_t(unsigned int, 281 if (server->sign)
282 rsize = min_t(unsigned int,
283 rsize, server->smbd_conn->max_fragmented_recv_size);
284 else
285 rsize = min_t(unsigned int,
277 rsize, server->smbd_conn->max_readwrite_size); 286 rsize, server->smbd_conn->max_readwrite_size);
287 }
278#endif 288#endif
279 289
280 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 290 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
@@ -1452,7 +1462,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
1452 struct cifs_open_parms oparms; 1462 struct cifs_open_parms oparms;
1453 struct cifs_fid fid; 1463 struct cifs_fid fid;
1454 struct kvec err_iov = {NULL, 0}; 1464 struct kvec err_iov = {NULL, 0};
1455 struct smb2_err_rsp *err_buf = NULL; 1465 struct smb2_err_rsp *err_buf;
1456 struct smb2_symlink_err_rsp *symlink; 1466 struct smb2_symlink_err_rsp *symlink;
1457 unsigned int sub_len; 1467 unsigned int sub_len;
1458 unsigned int sub_offset; 1468 unsigned int sub_offset;
@@ -1476,7 +1486,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
1476 1486
1477 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); 1487 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov);
1478 1488
1479 if (!rc || !err_buf) { 1489 if (!rc || !err_iov.iov_base) {
1480 kfree(utf16_path); 1490 kfree(utf16_path);
1481 return -ENOENT; 1491 return -ENOENT;
1482 } 1492 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0f044c4a2dc9..60db51bae0e3 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -383,10 +383,10 @@ static void
383build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 383build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
384{ 384{
385 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 385 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
386 pneg_ctxt->DataLength = cpu_to_le16(6); 386 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
387 pneg_ctxt->CipherCount = cpu_to_le16(2); 387 pneg_ctxt->CipherCount = cpu_to_le16(1);
388 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 388/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
389 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 389 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
390} 390}
391 391
392static void 392static void
@@ -444,6 +444,7 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 server->cipher_type = ctxt->Ciphers[0]; 446 server->cipher_type = ctxt->Ciphers[0];
447 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
447 return 0; 448 return 0;
448} 449}
449 450
@@ -2590,7 +2591,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
2590 * If we want to do a RDMA write, fill in and append 2591 * If we want to do a RDMA write, fill in and append
2591 * smbd_buffer_descriptor_v1 to the end of read request 2592 * smbd_buffer_descriptor_v1 to the end of read request
2592 */ 2593 */
2593 if (server->rdma && rdata && 2594 if (server->rdma && rdata && !server->sign &&
2594 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { 2595 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
2595 2596
2596 struct smbd_buffer_descriptor_v1 *v1; 2597 struct smbd_buffer_descriptor_v1 *v1;
@@ -2968,7 +2969,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
2968 * If we want to do a server RDMA read, fill in and append 2969 * If we want to do a server RDMA read, fill in and append
2969 * smbd_buffer_descriptor_v1 to the end of write request 2970 * smbd_buffer_descriptor_v1 to the end of write request
2970 */ 2971 */
2971 if (server->rdma && wdata->bytes >= 2972 if (server->rdma && !server->sign && wdata->bytes >=
2972 server->smbd_conn->rdma_readwrite_threshold) { 2973 server->smbd_conn->rdma_readwrite_threshold) {
2973 2974
2974 struct smbd_buffer_descriptor_v1 *v1; 2975 struct smbd_buffer_descriptor_v1 *v1;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6093e5142b2b..d28f358022c5 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -297,7 +297,7 @@ struct smb2_encryption_neg_context {
297 __le16 DataLength; 297 __le16 DataLength;
298 __le32 Reserved; 298 __le32 Reserved;
299 __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */ 299 __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */
300 __le16 Ciphers[2]; /* Ciphers[0] since only one used now */ 300 __le16 Ciphers[1]; /* Ciphers[0] since only one used now */
301} __packed; 301} __packed;
302 302
303struct smb2_negotiate_rsp { 303struct smb2_negotiate_rsp {
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5008af546dd1..c62f7c95683c 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1028,7 +1028,7 @@ static int smbd_post_send(struct smbd_connection *info,
1028 for (i = 0; i < request->num_sge; i++) { 1028 for (i = 0; i < request->num_sge; i++) {
1029 log_rdma_send(INFO, 1029 log_rdma_send(INFO,
1030 "rdma_request sge[%d] addr=%llu length=%u\n", 1030 "rdma_request sge[%d] addr=%llu length=%u\n",
1031 i, request->sge[0].addr, request->sge[0].length); 1031 i, request->sge[i].addr, request->sge[i].length);
1032 ib_dma_sync_single_for_device( 1032 ib_dma_sync_single_for_device(
1033 info->id->device, 1033 info->id->device,
1034 request->sge[i].addr, 1034 request->sge[i].addr,
@@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2086 int start, i, j; 2086 int start, i, j;
2087 int max_iov_size = 2087 int max_iov_size =
2088 info->max_send_size - sizeof(struct smbd_data_transfer); 2088 info->max_send_size - sizeof(struct smbd_data_transfer);
2089 struct kvec iov[SMBDIRECT_MAX_SGE]; 2089 struct kvec *iov;
2090 int rc; 2090 int rc;
2091 2091
2092 info->smbd_send_pending++; 2092 info->smbd_send_pending++;
@@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2096 } 2096 }
2097 2097
2098 /* 2098 /*
2099 * This usually means a configuration error 2099 * Skip the RFC1002 length defined in MS-SMB2 section 2.1
2100 * We use RDMA read/write for packet size > rdma_readwrite_threshold 2100 * It is used only for TCP transport in the iov[0]
2101 * as long as it's properly configured we should never get into this
2102 * situation
2103 */
2104 if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
2105 log_write(ERR, "maximum send segment %x exceeding %x\n",
2106 rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
2107 rc = -EINVAL;
2108 goto done;
2109 }
2110
2111 /*
2112 * Remove the RFC1002 length defined in MS-SMB2 section 2.1
2113 * It is used only for TCP transport
2114 * In future we may want to add a transport layer under protocol 2101 * In future we may want to add a transport layer under protocol
2115 * layer so this will only be issued to TCP transport 2102 * layer so this will only be issued to TCP transport
2116 */ 2103 */
2117 iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; 2104
2118 iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; 2105 if (rqst->rq_iov[0].iov_len != 4) {
2119 buflen += iov[0].iov_len; 2106 log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
2107 return -EINVAL;
2108 }
2109 iov = &rqst->rq_iov[1];
2120 2110
2121 /* total up iov array first */ 2111 /* total up iov array first */
2122 for (i = 1; i < rqst->rq_nvec; i++) { 2112 for (i = 0; i < rqst->rq_nvec-1; i++) {
2123 iov[i].iov_base = rqst->rq_iov[i].iov_base;
2124 iov[i].iov_len = rqst->rq_iov[i].iov_len;
2125 buflen += iov[i].iov_len; 2113 buflen += iov[i].iov_len;
2126 } 2114 }
2127 2115
@@ -2139,6 +2127,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2139 goto done; 2127 goto done;
2140 } 2128 }
2141 2129
2130 cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
2131 for (i = 0; i < rqst->rq_nvec-1; i++)
2132 dump_smb(iov[i].iov_base, iov[i].iov_len);
2133
2142 remaining_data_length = buflen; 2134 remaining_data_length = buflen;
2143 2135
2144 log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " 2136 log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
@@ -2194,12 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2194 goto done; 2186 goto done;
2195 } 2187 }
2196 i++; 2188 i++;
2189 if (i == rqst->rq_nvec-1)
2190 break;
2197 } 2191 }
2198 start = i; 2192 start = i;
2199 buflen = 0; 2193 buflen = 0;
2200 } else { 2194 } else {
2201 i++; 2195 i++;
2202 if (i == rqst->rq_nvec) { 2196 if (i == rqst->rq_nvec-1) {
2203 /* send out all remaining vecs */ 2197 /* send out all remaining vecs */
2204 remaining_data_length -= buflen; 2198 remaining_data_length -= buflen;
2205 log_write(INFO, 2199 log_write(INFO,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 8f6f25918229..927226a2122f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
753 goto out; 753 goto out;
754 754
755#ifdef CONFIG_CIFS_SMB311 755#ifdef CONFIG_CIFS_SMB311
756 if (ses->status == CifsNew) 756 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
757 smb311_update_preauth_hash(ses, rqst->rq_iov+1, 757 smb311_update_preauth_hash(ses, rqst->rq_iov+1,
758 rqst->rq_nvec-1); 758 rqst->rq_nvec-1);
759#endif 759#endif
@@ -798,7 +798,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
798 *resp_buf_type = CIFS_SMALL_BUFFER; 798 *resp_buf_type = CIFS_SMALL_BUFFER;
799 799
800#ifdef CONFIG_CIFS_SMB311 800#ifdef CONFIG_CIFS_SMB311
801 if (ses->status == CifsNew) { 801 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
802 struct kvec iov = { 802 struct kvec iov = {
803 .iov_base = buf + 4, 803 .iov_base = buf + 4,
804 .iov_len = get_rfc1002_length(buf) 804 .iov_len = get_rfc1002_length(buf)
@@ -834,8 +834,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
834 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 834 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
835 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), 835 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
836 GFP_KERNEL); 836 GFP_KERNEL);
837 if (!new_iov) 837 if (!new_iov) {
838 /* otherwise cifs_send_recv below sets resp_buf_type */
839 *resp_buf_type = CIFS_NO_BUFFER;
838 return -ENOMEM; 840 return -ENOMEM;
841 }
839 } else 842 } else
840 new_iov = s_iov; 843 new_iov = s_iov;
841 844
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 846ca150d52e..4dd842f72846 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1997,6 +1997,16 @@ out:
1997 return rc; 1997 return rc;
1998} 1998}
1999 1999
2000static bool is_dot_dotdot(const char *name, size_t name_size)
2001{
2002 if (name_size == 1 && name[0] == '.')
2003 return true;
2004 else if (name_size == 2 && name[0] == '.' && name[1] == '.')
2005 return true;
2006
2007 return false;
2008}
2009
2000/** 2010/**
2001 * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext 2011 * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
2002 * @plaintext_name: The plaintext name 2012 * @plaintext_name: The plaintext name
@@ -2021,13 +2031,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
2021 size_t packet_size; 2031 size_t packet_size;
2022 int rc = 0; 2032 int rc = 0;
2023 2033
2024 if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) 2034 if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) &&
2025 && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) 2035 !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) {
2026 && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) 2036 if (is_dot_dotdot(name, name_size)) {
2027 && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, 2037 rc = ecryptfs_copy_filename(plaintext_name,
2028 ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { 2038 plaintext_name_size,
2029 const char *orig_name = name; 2039 name, name_size);
2030 size_t orig_name_size = name_size; 2040 goto out;
2041 }
2042
2043 if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE ||
2044 strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
2045 ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) {
2046 rc = -EINVAL;
2047 goto out;
2048 }
2031 2049
2032 name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; 2050 name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
2033 name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; 2051 name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
@@ -2047,12 +2065,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
2047 decoded_name, 2065 decoded_name,
2048 decoded_name_size); 2066 decoded_name_size);
2049 if (rc) { 2067 if (rc) {
2050 printk(KERN_INFO "%s: Could not parse tag 70 packet " 2068 ecryptfs_printk(KERN_DEBUG,
2051 "from filename; copying through filename " 2069 "%s: Could not parse tag 70 packet from filename\n",
2052 "as-is\n", __func__); 2070 __func__);
2053 rc = ecryptfs_copy_filename(plaintext_name,
2054 plaintext_name_size,
2055 orig_name, orig_name_size);
2056 goto out_free; 2071 goto out_free;
2057 } 2072 }
2058 } else { 2073 } else {
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c74ed3ca3372..b76a9853325e 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
82 buf->sb, lower_name, 82 buf->sb, lower_name,
83 lower_namelen); 83 lower_namelen);
84 if (rc) { 84 if (rc) {
85 printk(KERN_ERR "%s: Error attempting to decode and decrypt " 85 if (rc != -EINVAL) {
86 "filename [%s]; rc = [%d]\n", __func__, lower_name, 86 ecryptfs_printk(KERN_DEBUG,
87 rc); 87 "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
88 goto out; 88 __func__, lower_name, rc);
89 return rc;
90 }
91
92 /* Mask -EINVAL errors as these are most likely due a plaintext
93 * filename present in the lower filesystem despite filename
94 * encryption being enabled. One unavoidable example would be
95 * the "lost+found" dentry in the root directory of an Ext4
96 * filesystem.
97 */
98 return 0;
89 } 99 }
100
90 buf->caller->pos = buf->ctx.pos; 101 buf->caller->pos = buf->ctx.pos;
91 rc = !dir_emit(buf->caller, name, name_size, ino, d_type); 102 rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
92 kfree(name); 103 kfree(name);
93 if (!rc) 104 if (!rc)
94 buf->entries_written++; 105 buf->entries_written++;
95out: 106
96 return rc; 107 return rc;
97} 108}
98 109
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 847904aa63a9..97d17eaeba07 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -395,8 +395,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
395 395
396 mount_crypt_stat = &ecryptfs_superblock_to_private( 396 mount_crypt_stat = &ecryptfs_superblock_to_private(
397 ecryptfs_dentry->d_sb)->mount_crypt_stat; 397 ecryptfs_dentry->d_sb)->mount_crypt_stat;
398 if (mount_crypt_stat 398 if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
399 && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
400 rc = ecryptfs_encrypt_and_encode_filename( 399 rc = ecryptfs_encrypt_and_encode_filename(
401 &encrypted_and_encoded_name, &len, 400 &encrypted_and_encoded_name, &len,
402 mount_crypt_stat, name, len); 401 mount_crypt_stat, name, len);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c89a58cfc991..e74fe84d0886 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1880,7 +1880,7 @@ find_next_matching_auth_tok:
1880 candidate_auth_tok = &auth_tok_list_item->auth_tok; 1880 candidate_auth_tok = &auth_tok_list_item->auth_tok;
1881 if (unlikely(ecryptfs_verbosity > 0)) { 1881 if (unlikely(ecryptfs_verbosity > 0)) {
1882 ecryptfs_printk(KERN_DEBUG, 1882 ecryptfs_printk(KERN_DEBUG,
1883 "Considering cadidate auth tok:\n"); 1883 "Considering candidate auth tok:\n");
1884 ecryptfs_dump_auth_tok(candidate_auth_tok); 1884 ecryptfs_dump_auth_tok(candidate_auth_tok);
1885 } 1885 }
1886 rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, 1886 rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig,
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 09640220fda8..047c327a6b23 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -88,11 +88,11 @@ out_unlock:
88 * The default page_lock and i_size verification done by non-DAX fault paths 88 * The default page_lock and i_size verification done by non-DAX fault paths
89 * is sufficient because ext2 doesn't support hole punching. 89 * is sufficient because ext2 doesn't support hole punching.
90 */ 90 */
91static int ext2_dax_fault(struct vm_fault *vmf) 91static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
92{ 92{
93 struct inode *inode = file_inode(vmf->vma->vm_file); 93 struct inode *inode = file_inode(vmf->vma->vm_file);
94 struct ext2_inode_info *ei = EXT2_I(inode); 94 struct ext2_inode_info *ei = EXT2_I(inode);
95 int ret; 95 vm_fault_t ret;
96 96
97 if (vmf->flags & FAULT_FLAG_WRITE) { 97 if (vmf->flags & FAULT_FLAG_WRITE) {
98 sb_start_pagefault(inode->i_sb); 98 sb_start_pagefault(inode->i_sb);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a33d8fb1bf2a..508b905d744d 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
321 struct ext4_sb_info *sbi = EXT4_SB(sb); 321 struct ext4_sb_info *sbi = EXT4_SB(sb);
322 ext4_grpblk_t offset; 322 ext4_grpblk_t offset;
323 ext4_grpblk_t next_zero_bit; 323 ext4_grpblk_t next_zero_bit;
324 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
324 ext4_fsblk_t blk; 325 ext4_fsblk_t blk;
325 ext4_fsblk_t group_first_block; 326 ext4_fsblk_t group_first_block;
326 327
@@ -338,7 +339,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
338 /* check whether block bitmap block number is set */ 339 /* check whether block bitmap block number is set */
339 blk = ext4_block_bitmap(sb, desc); 340 blk = ext4_block_bitmap(sb, desc);
340 offset = blk - group_first_block; 341 offset = blk - group_first_block;
341 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 342 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
342 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 343 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
343 /* bad block bitmap */ 344 /* bad block bitmap */
344 return blk; 345 return blk;
@@ -346,7 +347,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
346 /* check whether the inode bitmap block number is set */ 347 /* check whether the inode bitmap block number is set */
347 blk = ext4_inode_bitmap(sb, desc); 348 blk = ext4_inode_bitmap(sb, desc);
348 offset = blk - group_first_block; 349 offset = blk - group_first_block;
349 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 350 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
350 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 351 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
351 /* bad block bitmap */ 352 /* bad block bitmap */
352 return blk; 353 return blk;
@@ -354,8 +355,8 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
354 /* check whether the inode table block number is set */ 355 /* check whether the inode table block number is set */
355 blk = ext4_inode_table(sb, desc); 356 blk = ext4_inode_table(sb, desc);
356 offset = blk - group_first_block; 357 offset = blk - group_first_block;
357 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 358 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
358 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize) 359 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
359 return blk; 360 return blk;
360 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 361 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
361 EXT4_B2C(sbi, offset + sbi->s_itb_per_group), 362 EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0a7315961bac..c969275ce3ee 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5329,8 +5329,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5329 stop = le32_to_cpu(extent->ee_block); 5329 stop = le32_to_cpu(extent->ee_block);
5330 5330
5331 /* 5331 /*
5332 * In case of left shift, Don't start shifting extents until we make 5332 * For left shifts, make sure the hole on the left is big enough to
5333 * sure the hole is big enough to accommodate the shift. 5333 * accommodate the shift. For right shifts, make sure the last extent
5334 * won't be shifted beyond EXT_MAX_BLOCKS.
5334 */ 5335 */
5335 if (SHIFT == SHIFT_LEFT) { 5336 if (SHIFT == SHIFT_LEFT) {
5336 path = ext4_find_extent(inode, start - 1, &path, 5337 path = ext4_find_extent(inode, start - 1, &path,
@@ -5350,9 +5351,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5350 5351
5351 if ((start == ex_start && shift > ex_start) || 5352 if ((start == ex_start && shift > ex_start) ||
5352 (shift > start - ex_end)) { 5353 (shift > start - ex_end)) {
5353 ext4_ext_drop_refs(path); 5354 ret = -EINVAL;
5354 kfree(path); 5355 goto out;
5355 return -EINVAL; 5356 }
5357 } else {
5358 if (shift > EXT_MAX_BLOCKS -
5359 (stop + ext4_ext_get_actual_len(extent))) {
5360 ret = -EINVAL;
5361 goto out;
5356 } 5362 }
5357 } 5363 }
5358 5364
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 185f7e61f4cf..eb104e8476f0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5886,5 +5886,6 @@ static void __exit ext4_exit_fs(void)
5886MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 5886MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5887MODULE_DESCRIPTION("Fourth Extended Filesystem"); 5887MODULE_DESCRIPTION("Fourth Extended Filesystem");
5888MODULE_LICENSE("GPL"); 5888MODULE_LICENSE("GPL");
5889MODULE_SOFTDEP("pre: crc32c");
5889module_init(ext4_init_fs) 5890module_init(ext4_init_fs)
5890module_exit(ext4_exit_fs) 5891module_exit(ext4_exit_fs)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 4b12ba70a895..47d7c151fcba 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
745 */ 745 */
746 if (inode && inode_to_wb_is_valid(inode)) { 746 if (inode && inode_to_wb_is_valid(inode)) {
747 struct bdi_writeback *wb; 747 struct bdi_writeback *wb;
748 bool locked, congested; 748 struct wb_lock_cookie lock_cookie = {};
749 bool congested;
749 750
750 wb = unlocked_inode_to_wb_begin(inode, &locked); 751 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
751 congested = wb_congested(wb, cong_bits); 752 congested = wb_congested(wb, cong_bits);
752 unlocked_inode_to_wb_end(inode, locked); 753 unlocked_inode_to_wb_end(inode, &lock_cookie);
753 return congested; 754 return congested;
754 } 755 }
755 756
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..10205ececc27 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/bio.h> 21#include <linux/bio.h>
22 22
23#include <linux/slab.h>
23#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
24#include <linux/zlib.h> 25#include <linux/zlib.h>
25 26
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
59 >> bufshift; 60 >> bufshift;
60 int haveblocks; 61 int haveblocks;
61 blkcnt_t blocknum; 62 blkcnt_t blocknum;
62 struct buffer_head *bhs[needblocks + 1]; 63 struct buffer_head **bhs;
63 int curbh, curpage; 64 int curbh, curpage;
64 65
65 if (block_size > deflateBound(1UL << zisofs_block_shift)) { 66 if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
80 81
81 /* Because zlib is not thread-safe, do all the I/O at the top. */ 82 /* Because zlib is not thread-safe, do all the I/O at the top. */
82 blocknum = block_start >> bufshift; 83 blocknum = block_start >> bufshift;
83 memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); 84 bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
85 if (!bhs) {
86 *errp = -ENOMEM;
87 return 0;
88 }
84 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); 89 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
85 ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); 90 ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
86 91
@@ -190,6 +195,7 @@ z_eio:
190b_eio: 195b_eio:
191 for (i = 0; i < haveblocks; i++) 196 for (i = 0; i < haveblocks; i++)
192 brelse(bhs[i]); 197 brelse(bhs[i]);
198 kfree(bhs);
193 return stream.total_out; 199 return stream.total_out;
194} 200}
195 201
@@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
305 unsigned int zisofs_pages_per_cblock = 311 unsigned int zisofs_pages_per_cblock =
306 PAGE_SHIFT <= zisofs_block_shift ? 312 PAGE_SHIFT <= zisofs_block_shift ?
307 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; 313 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
308 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; 314 struct page **pages;
309 pgoff_t index = page->index, end_index; 315 pgoff_t index = page->index, end_index;
310 316
311 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 317 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page *page)
330 full_page = 0; 336 full_page = 0;
331 pcount = 1; 337 pcount = 1;
332 } 338 }
339 pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
340 sizeof(*pages), GFP_KERNEL);
341 if (!pages) {
342 unlock_page(page);
343 return -ENOMEM;
344 }
333 pages[full_page] = page; 345 pages[full_page] = page;
334 346
335 for (i = 0; i < pcount; i++, index++) { 347 for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
357 } 369 }
358 370
359 /* At this point, err contains 0 or -EIO depending on the "critical" page */ 371 /* At this point, err contains 0 or -EIO depending on the "critical" page */
372 kfree(pages);
360 return err; 373 return err;
361} 374}
362 375
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bc258a4402f6..ec3fba7d492f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt)
394 break; 394 break;
395#ifdef CONFIG_JOLIET 395#ifdef CONFIG_JOLIET
396 case Opt_iocharset: 396 case Opt_iocharset:
397 kfree(popt->iocharset);
397 popt->iocharset = match_strdup(&args[0]); 398 popt->iocharset = match_strdup(&args[0]);
399 if (!popt->iocharset)
400 return 0;
398 break; 401 break;
399#endif 402#endif
400 case Opt_map_a: 403 case Opt_map_a:
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ac311037d7a5..8aa453784402 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
532 */ 532 */
533 ret = start_this_handle(journal, handle, GFP_NOFS); 533 ret = start_this_handle(journal, handle, GFP_NOFS);
534 if (ret < 0) { 534 if (ret < 0) {
535 handle->h_journal = journal;
535 jbd2_journal_free_reserved(handle); 536 jbd2_journal_free_reserved(handle);
536 return ret; 537 return ret;
537 } 538 }
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f60dee7faf03..87bdf0f4cba1 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb)
342static void jffs2_kill_sb(struct super_block *sb) 342static void jffs2_kill_sb(struct super_block *sb)
343{ 343{
344 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 344 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
345 if (!sb_rdonly(sb)) 345 if (c && !sb_rdonly(sb))
346 jffs2_stop_garbage_collect_thread(c); 346 jffs2_stop_garbage_collect_thread(c);
347 kill_mtd_super(sb); 347 kill_mtd_super(sb);
348 kfree(c); 348 kfree(c);
diff --git a/fs/namespace.c b/fs/namespace.c
index e398f32d7541..5f75969adff1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1089 goto out_free; 1089 goto out_free;
1090 } 1090 }
1091 1091
1092 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); 1092 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1093 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1093 /* Don't allow unprivileged users to change mount flags */ 1094 /* Don't allow unprivileged users to change mount flags */
1094 if (flag & CL_UNPRIVILEGED) { 1095 if (flag & CL_UNPRIVILEGED) {
1095 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; 1096 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
@@ -2814,7 +2815,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2814 mnt_flags |= MNT_NODIRATIME; 2815 mnt_flags |= MNT_NODIRATIME;
2815 if (flags & MS_STRICTATIME) 2816 if (flags & MS_STRICTATIME)
2816 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 2817 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2817 if (flags & SB_RDONLY) 2818 if (flags & MS_RDONLY)
2818 mnt_flags |= MNT_READONLY; 2819 mnt_flags |= MNT_READONLY;
2819 2820
2820 /* The default atime for remount is preservation */ 2821 /* The default atime for remount is preservation */
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index d51e1bb781cf..d94e8031fe5f 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
92 u32 event_mask, 92 u32 event_mask,
93 const void *data, int data_type) 93 const void *data, int data_type)
94{ 94{
95 __u32 marks_mask, marks_ignored_mask; 95 __u32 marks_mask = 0, marks_ignored_mask = 0;
96 const struct path *path = data; 96 const struct path *path = data;
97 97
98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" 98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
108 !d_can_lookup(path->dentry)) 108 !d_can_lookup(path->dentry))
109 return false; 109 return false;
110 110
111 if (inode_mark && vfsmnt_mark) { 111 /*
112 marks_mask = (vfsmnt_mark->mask | inode_mark->mask); 112 * if the event is for a child and this inode doesn't care about
113 marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); 113 * events on the child, don't send it!
114 } else if (inode_mark) { 114 */
115 /* 115 if (inode_mark &&
116 * if the event is for a child and this inode doesn't care about 116 (!(event_mask & FS_EVENT_ON_CHILD) ||
117 * events on the child, don't send it! 117 (inode_mark->mask & FS_EVENT_ON_CHILD))) {
118 */ 118 marks_mask |= inode_mark->mask;
119 if ((event_mask & FS_EVENT_ON_CHILD) && 119 marks_ignored_mask |= inode_mark->ignored_mask;
120 !(inode_mark->mask & FS_EVENT_ON_CHILD)) 120 }
121 return false; 121
122 marks_mask = inode_mark->mask; 122 if (vfsmnt_mark) {
123 marks_ignored_mask = inode_mark->ignored_mask; 123 marks_mask |= vfsmnt_mark->mask;
124 } else if (vfsmnt_mark) { 124 marks_ignored_mask |= vfsmnt_mark->ignored_mask;
125 marks_mask = vfsmnt_mark->mask;
126 marks_ignored_mask = vfsmnt_mark->ignored_mask;
127 } else {
128 BUG();
129 } 125 }
130 126
131 if (d_is_dir(path->dentry) && 127 if (d_is_dir(path->dentry) &&
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 219b269c737e..613ec7e5a465 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell,
192 struct fsnotify_iter_info *iter_info) 192 struct fsnotify_iter_info *iter_info)
193{ 193{
194 struct fsnotify_group *group = NULL; 194 struct fsnotify_group *group = NULL;
195 __u32 inode_test_mask = 0; 195 __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
196 __u32 vfsmount_test_mask = 0; 196 __u32 marks_mask = 0;
197 __u32 marks_ignored_mask = 0;
197 198
198 if (unlikely(!inode_mark && !vfsmount_mark)) { 199 if (unlikely(!inode_mark && !vfsmount_mark)) {
199 BUG(); 200 BUG();
@@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell,
213 /* does the inode mark tell us to do something? */ 214 /* does the inode mark tell us to do something? */
214 if (inode_mark) { 215 if (inode_mark) {
215 group = inode_mark->group; 216 group = inode_mark->group;
216 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); 217 marks_mask |= inode_mark->mask;
217 inode_test_mask &= inode_mark->mask; 218 marks_ignored_mask |= inode_mark->ignored_mask;
218 inode_test_mask &= ~inode_mark->ignored_mask;
219 } 219 }
220 220
221 /* does the vfsmount_mark tell us to do something? */ 221 /* does the vfsmount_mark tell us to do something? */
222 if (vfsmount_mark) { 222 if (vfsmount_mark) {
223 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
224 group = vfsmount_mark->group; 223 group = vfsmount_mark->group;
225 vfsmount_test_mask &= vfsmount_mark->mask; 224 marks_mask |= vfsmount_mark->mask;
226 vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; 225 marks_ignored_mask |= vfsmount_mark->ignored_mask;
227 if (inode_mark)
228 vfsmount_test_mask &= ~inode_mark->ignored_mask;
229 } 226 }
230 227
231 pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" 228 pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
232 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" 229 " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
233 " data=%p data_is=%d cookie=%d\n", 230 " data=%p data_is=%d cookie=%d\n",
234 __func__, group, to_tell, mask, inode_mark, 231 __func__, group, to_tell, mask, inode_mark, vfsmount_mark,
235 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, 232 marks_mask, marks_ignored_mask, data,
236 data_is, cookie); 233 data_is, cookie);
237 234
238 if (!inode_test_mask && !vfsmount_test_mask) 235 if (!(test_mask & marks_mask & ~marks_ignored_mask))
239 return 0; 236 return 0;
240 237
241 return group->ops->handle_event(group, to_tell, inode_mark, 238 return group->ops->handle_event(group, to_tell, inode_mark,
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 3ae5fdba0225..10796d3fe27d 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb)
579 /* provided sb cleanup */ 579 /* provided sb cleanup */
580 kill_anon_super(sb); 580 kill_anon_super(sb);
581 581
582 if (!ORANGEFS_SB(sb)) {
583 mutex_lock(&orangefs_request_mutex);
584 mutex_unlock(&orangefs_request_mutex);
585 return;
586 }
582 /* 587 /*
583 * issue the unmount to userspace to tell it to remove the 588 * issue the unmount to userspace to tell it to remove the
584 * dynamic mount info it has for this superblock 589 * dynamic mount info it has for this superblock
diff --git a/fs/proc/base.c b/fs/proc/base.c
index eafa39a3a88c..1b2ede6abcdf 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
1693 kuid_t uid; 1693 kuid_t uid;
1694 kgid_t gid; 1694 kgid_t gid;
1695 1695
1696 if (unlikely(task->flags & PF_KTHREAD)) {
1697 *ruid = GLOBAL_ROOT_UID;
1698 *rgid = GLOBAL_ROOT_GID;
1699 return;
1700 }
1701
1696 /* Default to the tasks effective ownership */ 1702 /* Default to the tasks effective ownership */
1697 rcu_read_lock(); 1703 rcu_read_lock();
1698 cred = __task_cred(task); 1704 cred = __task_cred(task);
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index a000d7547479..b572cc865b92 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
24 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), 24 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
25 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), 25 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
26 nr_running(), nr_threads, 26 nr_running(), nr_threads,
27 idr_get_cursor(&task_active_pid_ns(current)->idr)); 27 idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
28 return 0; 28 return 0;
29} 29}
30 30
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 65ae54659833..c486ad4b43f0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1310#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1310#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1311 else if (is_swap_pmd(pmd)) { 1311 else if (is_swap_pmd(pmd)) {
1312 swp_entry_t entry = pmd_to_swp_entry(pmd); 1312 swp_entry_t entry = pmd_to_swp_entry(pmd);
1313 unsigned long offset = swp_offset(entry);
1313 1314
1315 offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1314 frame = swp_type(entry) | 1316 frame = swp_type(entry) |
1315 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1317 (offset << MAX_SWAPFILES_SHIFT);
1316 flags |= PM_SWAP; 1318 flags |= PM_SWAP;
1317 if (pmd_swp_soft_dirty(pmd)) 1319 if (pmd_swp_soft_dirty(pmd))
1318 flags |= PM_SOFT_DIRTY; 1320 flags |= PM_SOFT_DIRTY;
@@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1332 break; 1334 break;
1333 if (pm->show_pfn && (flags & PM_PRESENT)) 1335 if (pm->show_pfn && (flags & PM_PRESENT))
1334 frame++; 1336 frame++;
1337 else if (flags & PM_SWAP)
1338 frame += (1 << MAX_SWAPFILES_SHIFT);
1335 } 1339 }
1336 spin_unlock(ptl); 1340 spin_unlock(ptl);
1337 return err; 1341 return err;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 020c597ef9b6..d88231e3b2be 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2966,7 +2966,7 @@ static int __init dquot_init(void)
2966 NULL); 2966 NULL);
2967 2967
2968 order = 0; 2968 order = 0;
2969 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); 2969 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
2970 if (!dquot_hash) 2970 if (!dquot_hash)
2971 panic("Cannot create dquot hash table"); 2971 panic("Cannot create dquot hash table");
2972 2972
diff --git a/fs/super.c b/fs/super.c
index 5fa9a8d8d865..122c402049a2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -167,6 +167,7 @@ static void destroy_unused_super(struct super_block *s)
167 security_sb_free(s); 167 security_sb_free(s);
168 put_user_ns(s->s_user_ns); 168 put_user_ns(s->s_user_ns);
169 kfree(s->s_subtype); 169 kfree(s->s_subtype);
170 free_prealloced_shrinker(&s->s_shrink);
170 /* no delays needed */ 171 /* no delays needed */
171 destroy_super_work(&s->destroy_work); 172 destroy_super_work(&s->destroy_work);
172} 173}
@@ -252,6 +253,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
252 s->s_shrink.count_objects = super_cache_count; 253 s->s_shrink.count_objects = super_cache_count;
253 s->s_shrink.batch = 1024; 254 s->s_shrink.batch = 1024;
254 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; 255 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
256 if (prealloc_shrinker(&s->s_shrink))
257 goto fail;
255 return s; 258 return s;
256 259
257fail: 260fail:
@@ -518,11 +521,7 @@ retry:
518 hlist_add_head(&s->s_instances, &type->fs_supers); 521 hlist_add_head(&s->s_instances, &type->fs_supers);
519 spin_unlock(&sb_lock); 522 spin_unlock(&sb_lock);
520 get_filesystem(type); 523 get_filesystem(type);
521 err = register_shrinker(&s->s_shrink); 524 register_shrinker_prepared(&s->s_shrink);
522 if (err) {
523 deactivate_locked_super(s);
524 s = ERR_PTR(err);
525 }
526 return s; 525 return s;
527} 526}
528 527
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index f897e55f2cd0..16a8ad21b77e 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,6 +28,9 @@
28 28
29#include "udf_sb.h" 29#include "udf_sb.h"
30 30
31#define SURROGATE_MASK 0xfffff800
32#define SURROGATE_PAIR 0x0000d800
33
31static int udf_uni2char_utf8(wchar_t uni, 34static int udf_uni2char_utf8(wchar_t uni,
32 unsigned char *out, 35 unsigned char *out,
33 int boundlen) 36 int boundlen)
@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
37 if (boundlen <= 0) 40 if (boundlen <= 0)
38 return -ENAMETOOLONG; 41 return -ENAMETOOLONG;
39 42
43 if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
44 return -EINVAL;
45
40 if (uni < 0x80) { 46 if (uni < 0x80) {
41 out[u_len++] = (unsigned char)uni; 47 out[u_len++] = (unsigned char)uni;
42 } else if (uni < 0x800) { 48 } else if (uni < 0x800) {
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index ce4a34a2751d..35a124400d60 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -511,7 +511,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
511 if (args->flags & ATTR_CREATE) 511 if (args->flags & ATTR_CREATE)
512 return retval; 512 return retval;
513 retval = xfs_attr_shortform_remove(args); 513 retval = xfs_attr_shortform_remove(args);
514 ASSERT(retval == 0); 514 if (retval)
515 return retval;
516 /*
517 * Since we have removed the old attr, clear ATTR_REPLACE so
518 * that the leaf format add routine won't trip over the attr
519 * not being around.
520 */
521 args->flags &= ~ATTR_REPLACE;
515 } 522 }
516 523
517 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || 524 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6a7c2f03ea11..040eeda8426f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -725,12 +725,16 @@ xfs_bmap_extents_to_btree(
725 *logflagsp = 0; 725 *logflagsp = 0;
726 if ((error = xfs_alloc_vextent(&args))) { 726 if ((error = xfs_alloc_vextent(&args))) {
727 xfs_iroot_realloc(ip, -1, whichfork); 727 xfs_iroot_realloc(ip, -1, whichfork);
728 ASSERT(ifp->if_broot == NULL);
729 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
728 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 730 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
729 return error; 731 return error;
730 } 732 }
731 733
732 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 734 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
733 xfs_iroot_realloc(ip, -1, whichfork); 735 xfs_iroot_realloc(ip, -1, whichfork);
736 ASSERT(ifp->if_broot == NULL);
737 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
734 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 738 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
735 return -ENOSPC; 739 return -ENOSPC;
736 } 740 }
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index ef68b1de006a..1201107eabc6 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -466,6 +466,8 @@ xfs_dinode_verify(
466 return __this_address; 466 return __this_address;
467 if (di_size > XFS_DFORK_DSIZE(dip, mp)) 467 if (di_size > XFS_DFORK_DSIZE(dip, mp))
468 return __this_address; 468 return __this_address;
469 if (dip->di_nextents)
470 return __this_address;
469 /* fall through */ 471 /* fall through */
470 case XFS_DINODE_FMT_EXTENTS: 472 case XFS_DINODE_FMT_EXTENTS:
471 case XFS_DINODE_FMT_BTREE: 473 case XFS_DINODE_FMT_BTREE:
@@ -484,12 +486,31 @@ xfs_dinode_verify(
484 if (XFS_DFORK_Q(dip)) { 486 if (XFS_DFORK_Q(dip)) {
485 switch (dip->di_aformat) { 487 switch (dip->di_aformat) {
486 case XFS_DINODE_FMT_LOCAL: 488 case XFS_DINODE_FMT_LOCAL:
489 if (dip->di_anextents)
490 return __this_address;
491 /* fall through */
487 case XFS_DINODE_FMT_EXTENTS: 492 case XFS_DINODE_FMT_EXTENTS:
488 case XFS_DINODE_FMT_BTREE: 493 case XFS_DINODE_FMT_BTREE:
489 break; 494 break;
490 default: 495 default:
491 return __this_address; 496 return __this_address;
492 } 497 }
498 } else {
499 /*
500 * If there is no fork offset, this may be a freshly-made inode
501 * in a new disk cluster, in which case di_aformat is zeroed.
502 * Otherwise, such an inode must be in EXTENTS format; this goes
503 * for freed inodes as well.
504 */
505 switch (dip->di_aformat) {
506 case 0:
507 case XFS_DINODE_FMT_EXTENTS:
508 break;
509 default:
510 return __this_address;
511 }
512 if (dip->di_anextents)
513 return __this_address;
493 } 514 }
494 515
495 /* only version 3 or greater inodes are extensively verified here */ 516 /* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 299aee4b7b0b..eed073cc4778 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -778,22 +778,26 @@ xfs_file_fallocate(
778 if (error) 778 if (error)
779 goto out_unlock; 779 goto out_unlock;
780 } else if (mode & FALLOC_FL_INSERT_RANGE) { 780 } else if (mode & FALLOC_FL_INSERT_RANGE) {
781 unsigned int blksize_mask = i_blocksize(inode) - 1; 781 unsigned int blksize_mask = i_blocksize(inode) - 1;
782 loff_t isize = i_size_read(inode);
782 783
783 new_size = i_size_read(inode) + len;
784 if (offset & blksize_mask || len & blksize_mask) { 784 if (offset & blksize_mask || len & blksize_mask) {
785 error = -EINVAL; 785 error = -EINVAL;
786 goto out_unlock; 786 goto out_unlock;
787 } 787 }
788 788
789 /* check the new inode size does not wrap through zero */ 789 /*
790 if (new_size > inode->i_sb->s_maxbytes) { 790 * New inode size must not exceed ->s_maxbytes, accounting for
791 * possible signed overflow.
792 */
793 if (inode->i_sb->s_maxbytes - isize < len) {
791 error = -EFBIG; 794 error = -EFBIG;
792 goto out_unlock; 795 goto out_unlock;
793 } 796 }
797 new_size = isize + len;
794 798
795 /* Offset should be less than i_size */ 799 /* Offset should be less than i_size */
796 if (offset >= i_size_read(inode)) { 800 if (offset >= isize) {
797 error = -EINVAL; 801 error = -EINVAL;
798 goto out_unlock; 802 goto out_unlock;
799 } 803 }
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 278841c75b97..af240573e482 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -188,7 +188,7 @@
188#endif 188#endif
189 189
190#ifdef CONFIG_SERIAL_EARLYCON 190#ifdef CONFIG_SERIAL_EARLYCON
191#define EARLYCON_TABLE() STRUCT_ALIGN(); \ 191#define EARLYCON_TABLE() . = ALIGN(8); \
192 VMLINUX_SYMBOL(__earlycon_table) = .; \ 192 VMLINUX_SYMBOL(__earlycon_table) = .; \
193 KEEP(*(__earlycon_table)) \ 193 KEEP(*(__earlycon_table)) \
194 VMLINUX_SYMBOL(__earlycon_table_end) = .; 194 VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 562fa7df2637..98e63d870139 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -19,7 +19,7 @@
19#define DRM_HDCP_RI_LEN 2 19#define DRM_HDCP_RI_LEN 2
20#define DRM_HDCP_V_PRIME_PART_LEN 4 20#define DRM_HDCP_V_PRIME_PART_LEN 4
21#define DRM_HDCP_V_PRIME_NUM_PARTS 5 21#define DRM_HDCP_V_PRIME_NUM_PARTS 5
22#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) 22#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f)
23#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) 23#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3))
24#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) 24#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7))
25 25
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e3dfb5..4b1548129fa2 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
37 * Our PSCI implementation stays the same across versions from 37 * Our PSCI implementation stays the same across versions from
38 * v0.2 onward, only adding the few mandatory functions (such 38 * v0.2 onward, only adding the few mandatory functions (such
39 * as FEATURES with 1.0) that are required by newer 39 * as FEATURES with 1.0) that are required by newer
40 * revisions. It is thus safe to return the latest. 40 * revisions. It is thus safe to return the latest, unless
41 * userspace has instructed us otherwise.
41 */ 42 */
42 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) 43 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
44 if (vcpu->kvm->arch.psci_version)
45 return vcpu->kvm->arch.psci_version;
46
43 return KVM_ARM_PSCI_LATEST; 47 return KVM_ARM_PSCI_LATEST;
48 }
44 49
45 return KVM_ARM_PSCI_0_1; 50 return KVM_ARM_PSCI_0_1;
46} 51}
@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
48 53
49int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); 54int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
50 55
56struct kvm_one_reg;
57
58int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
59int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
60int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
61int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
62
51#endif /* __KVM_ARM_PSCI_H__ */ 63#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index bfe86b54f6c1..0bd432a4d7bd 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
223 set_wb_congested(bdi->wb.congested, sync); 223 set_wb_congested(bdi->wb.congested, sync);
224} 224}
225 225
226struct wb_lock_cookie {
227 bool locked;
228 unsigned long flags;
229};
230
226#ifdef CONFIG_CGROUP_WRITEBACK 231#ifdef CONFIG_CGROUP_WRITEBACK
227 232
228/** 233/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f6be4b0b6c18..72ca0f3d39f3 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
347/** 347/**
348 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 348 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
349 * @inode: target inode 349 * @inode: target inode
350 * @lockedp: temp bool output param, to be passed to the end function 350 * @cookie: output param, to be passed to the end function
351 * 351 *
352 * The caller wants to access the wb associated with @inode but isn't 352 * The caller wants to access the wb associated with @inode but isn't
353 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 353 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
355 * association doesn't change until the transaction is finished with 355 * association doesn't change until the transaction is finished with
356 * unlocked_inode_to_wb_end(). 356 * unlocked_inode_to_wb_end().
357 * 357 *
358 * The caller must call unlocked_inode_to_wb_end() with *@lockdep 358 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
359 * afterwards and can't sleep during transaction. IRQ may or may not be 359 * can't sleep during the transaction. IRQs may or may not be disabled on
360 * disabled on return. 360 * return.
361 */ 361 */
362static inline struct bdi_writeback * 362static inline struct bdi_writeback *
363unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 363unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
364{ 364{
365 rcu_read_lock(); 365 rcu_read_lock();
366 366
@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
368 * Paired with store_release in inode_switch_wb_work_fn() and 368 * Paired with store_release in inode_switch_wb_work_fn() and
369 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 369 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
370 */ 370 */
371 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 371 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
372 372
373 if (unlikely(*lockedp)) 373 if (unlikely(cookie->locked))
374 xa_lock_irq(&inode->i_mapping->i_pages); 374 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
375 375
376 /* 376 /*
377 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 377 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
383/** 383/**
384 * unlocked_inode_to_wb_end - end inode wb access transaction 384 * unlocked_inode_to_wb_end - end inode wb access transaction
385 * @inode: target inode 385 * @inode: target inode
386 * @locked: *@lockedp from unlocked_inode_to_wb_begin() 386 * @cookie: @cookie from unlocked_inode_to_wb_begin()
387 */ 387 */
388static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 388static inline void unlocked_inode_to_wb_end(struct inode *inode,
389 struct wb_lock_cookie *cookie)
389{ 390{
390 if (unlikely(locked)) 391 if (unlikely(cookie->locked))
391 xa_unlock_irq(&inode->i_mapping->i_pages); 392 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
392 393
393 rcu_read_unlock(); 394 rcu_read_unlock();
394} 395}
@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
435} 436}
436 437
437static inline struct bdi_writeback * 438static inline struct bdi_writeback *
438unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 439unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
439{ 440{
440 return inode_to_wb(inode); 441 return inode_to_wb(inode);
441} 442}
442 443
443static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 444static inline void unlocked_inode_to_wb_end(struct inode *inode,
445 struct wb_lock_cookie *cookie)
444{ 446{
445} 447}
446 448
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3986f4b3461..ebc34a5686dc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,9 @@
9struct blk_mq_tags; 9struct blk_mq_tags;
10struct blk_flush_queue; 10struct blk_flush_queue;
11 11
12/**
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
14 */
12struct blk_mq_hw_ctx { 15struct blk_mq_hw_ctx {
13 struct { 16 struct {
14 spinlock_t lock; 17 spinlock_t lock;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 21e21f273a21..5c4eee043191 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -742,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
742#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 742#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
743#define blk_queue_preempt_only(q) \ 743#define blk_queue_preempt_only(q) \
744 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) 744 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
745#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
745 746
746extern int blk_set_preempt_only(struct request_queue *q); 747extern int blk_set_preempt_only(struct request_queue *q);
747extern void blk_clear_preempt_only(struct request_queue *q); 748extern void blk_clear_preempt_only(struct request_queue *q);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 95a7abd0ee92..486e65e3db26 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -339,8 +339,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
339void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 339void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
340 struct bpf_prog *old_prog); 340 struct bpf_prog *old_prog);
341int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 341int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
342 __u32 __user *prog_ids, u32 request_cnt, 342 u32 *prog_ids, u32 request_cnt,
343 __u32 __user *prog_cnt); 343 u32 *prog_cnt);
344int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 344int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
345 struct bpf_prog *exclude_prog, 345 struct bpf_prog *exclude_prog,
346 struct bpf_prog *include_prog, 346 struct bpf_prog *include_prog,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index ceb96ecab96e..7d98e263e048 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -25,6 +25,9 @@
25#define __SANITIZE_ADDRESS__ 25#define __SANITIZE_ADDRESS__
26#endif 26#endif
27 27
28#undef __no_sanitize_address
29#define __no_sanitize_address __attribute__((no_sanitize("address")))
30
28/* Clang doesn't have a way to turn it off per-function, yet. */ 31/* Clang doesn't have a way to turn it off per-function, yet. */
29#ifdef __noretpoline 32#ifdef __noretpoline
30#undef __noretpoline 33#undef __noretpoline
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index edfeaba95429..a1a959ba24ff 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -1,18 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#ifndef _LINUX_CORESIGHT_PMU_H 7#ifndef _LINUX_CORESIGHT_PMU_H
diff --git a/include/linux/device.h b/include/linux/device.h
index 0059b99e1f25..477956990f5e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -256,7 +256,9 @@ enum probe_type {
256 * automatically. 256 * automatically.
257 * @pm: Power management operations of the device which matched 257 * @pm: Power management operations of the device which matched
258 * this driver. 258 * this driver.
259 * @coredump: Called through sysfs to initiate a device coredump. 259 * @coredump: Called when sysfs entry is written to. The device driver
260 * is expected to call the dev_coredump API resulting in a
261 * uevent.
260 * @p: Driver core's private data, no one other than the driver 262 * @p: Driver core's private data, no one other than the driver
261 * core can touch this. 263 * core can touch this.
262 * 264 *
@@ -288,7 +290,7 @@ struct device_driver {
288 const struct attribute_group **groups; 290 const struct attribute_group **groups;
289 291
290 const struct dev_pm_ops *pm; 292 const struct dev_pm_ops *pm;
291 int (*coredump) (struct device *dev); 293 void (*coredump) (struct device *dev);
292 294
293 struct driver_private *p; 295 struct driver_private *p;
294}; 296};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index ebe41811ed34..b32cd2062f18 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
310 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS 310 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
311 * instead of the latter), any change to them will be overwritten 311 * instead of the latter), any change to them will be overwritten
312 * by kernel. Returns a negative error code or zero. 312 * by kernel. Returns a negative error code or zero.
313 * @get_fecparam: Get the network device Forward Error Correction parameters.
314 * @set_fecparam: Set the network device Forward Error Correction parameters.
313 * 315 *
314 * All operations are optional (i.e. the function pointer may be set 316 * All operations are optional (i.e. the function pointer may be set
315 * to %NULL) and callers must take this into account. Callers must 317 * to %NULL) and callers must take this into account. Callers must
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 9f1edb92c97e..e64c0294f50b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
217 union { /* Object pointer [lock] */ 217 union { /* Object pointer [lock] */
218 struct inode *inode; 218 struct inode *inode;
219 struct vfsmount *mnt; 219 struct vfsmount *mnt;
220 };
221 union {
222 struct hlist_head list;
223 /* Used listing heads to free after srcu period expires */ 220 /* Used listing heads to free after srcu period expires */
224 struct fsnotify_mark_connector *destroy_next; 221 struct fsnotify_mark_connector *destroy_next;
225 }; 222 };
223 struct hlist_head list;
226}; 224};
227 225
228/* 226/*
@@ -248,7 +246,7 @@ struct fsnotify_mark {
248 /* Group this mark is for. Set on mark creation, stable until last ref 246 /* Group this mark is for. Set on mark creation, stable until last ref
249 * is dropped */ 247 * is dropped */
250 struct fsnotify_group *group; 248 struct fsnotify_group *group;
251 /* List of marks by group->i_fsnotify_marks. Also reused for queueing 249 /* List of marks by group->marks_list. Also reused for queueing
252 * mark into destroy_list when it's waiting for the end of SRCU period 250 * mark into destroy_list when it's waiting for the end of SRCU period
253 * before it can be freed. [group->mark_mutex] */ 251 * before it can be freed. [group->mark_mutex] */
254 struct list_head g_list; 252 struct list_head g_list;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8da3e1f48195..26240a22978a 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -516,6 +516,12 @@ enum hid_type {
516 HID_TYPE_USBNONE 516 HID_TYPE_USBNONE
517}; 517};
518 518
519enum hid_battery_status {
520 HID_BATTERY_UNKNOWN = 0,
521 HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
522 HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
523};
524
519struct hid_driver; 525struct hid_driver;
520struct hid_ll_driver; 526struct hid_ll_driver;
521 527
@@ -558,7 +564,8 @@ struct hid_device { /* device report descriptor */
558 __s32 battery_max; 564 __s32 battery_max;
559 __s32 battery_report_type; 565 __s32 battery_report_type;
560 __s32 battery_report_id; 566 __s32 battery_report_id;
561 bool battery_reported; 567 enum hid_battery_status battery_status;
568 bool battery_avoid_query;
562#endif 569#endif
563 570
564 unsigned int status; /* see STAT flags above */ 571 unsigned int status; /* see STAT flags above */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a2656c3ebe81..3892e9c8b2de 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -161,9 +161,11 @@ struct hrtimer_clock_base {
161enum hrtimer_base_type { 161enum hrtimer_base_type {
162 HRTIMER_BASE_MONOTONIC, 162 HRTIMER_BASE_MONOTONIC,
163 HRTIMER_BASE_REALTIME, 163 HRTIMER_BASE_REALTIME,
164 HRTIMER_BASE_BOOTTIME,
164 HRTIMER_BASE_TAI, 165 HRTIMER_BASE_TAI,
165 HRTIMER_BASE_MONOTONIC_SOFT, 166 HRTIMER_BASE_MONOTONIC_SOFT,
166 HRTIMER_BASE_REALTIME_SOFT, 167 HRTIMER_BASE_REALTIME_SOFT,
168 HRTIMER_BASE_BOOTTIME_SOFT,
167 HRTIMER_BASE_TAI_SOFT, 169 HRTIMER_BASE_TAI_SOFT,
168 HRTIMER_MAX_CLOCK_BASES, 170 HRTIMER_MAX_CLOCK_BASES,
169}; 171};
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d11f41d5269f..78a5a90b4267 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
663 * Returns true if the skb is tagged with multiple vlan headers, regardless 663 * Returns true if the skb is tagged with multiple vlan headers, regardless
664 * of whether it is hardware accelerated or not. 664 * of whether it is hardware accelerated or not.
665 */ 665 */
666static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) 666static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
667{ 667{
668 __be16 protocol = skb->protocol; 668 __be16 protocol = skb->protocol;
669 669
@@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
673 if (likely(!eth_type_vlan(protocol))) 673 if (likely(!eth_type_vlan(protocol)))
674 return false; 674 return false;
675 675
676 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
677 return false;
678
676 veh = (struct vlan_ethhdr *)skb->data; 679 veh = (struct vlan_ethhdr *)skb->data;
677 protocol = veh->h_vlan_encapsulated_proto; 680 protocol = veh->h_vlan_encapsulated_proto;
678 } 681 }
@@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
690 * 693 *
691 * Returns features without unsafe ones if the skb has multiple tags. 694 * Returns features without unsafe ones if the skb has multiple tags.
692 */ 695 */
693static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, 696static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
694 netdev_features_t features) 697 netdev_features_t features)
695{ 698{
696 if (skb_vlan_tagged_multi(skb)) { 699 if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 4754f01c1abb..aec44b1d9582 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void)
186 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); 186 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
187} 187}
188 188
189typedef int (*klp_shadow_ctor_t)(void *obj,
190 void *shadow_data,
191 void *ctor_data);
192typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
193
189void *klp_shadow_get(void *obj, unsigned long id); 194void *klp_shadow_get(void *obj, unsigned long id);
190void *klp_shadow_alloc(void *obj, unsigned long id, void *data, 195void *klp_shadow_alloc(void *obj, unsigned long id,
191 size_t size, gfp_t gfp_flags); 196 size_t size, gfp_t gfp_flags,
192void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 197 klp_shadow_ctor_t ctor, void *ctor_data);
193 size_t size, gfp_t gfp_flags); 198void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
194void klp_shadow_free(void *obj, unsigned long id); 199 size_t size, gfp_t gfp_flags,
195void klp_shadow_free_all(unsigned long id); 200 klp_shadow_ctor_t ctor, void *ctor_data);
201void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
202void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
196 203
197#else /* !CONFIG_LIVEPATCH */ 204#else /* !CONFIG_LIVEPATCH */
198 205
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
index eb492d47f717..8f9c90379732 100644
--- a/include/linux/microchipphy.h
+++ b/include/linux/microchipphy.h
@@ -70,4 +70,12 @@
70#define LAN88XX_MMD3_CHIP_ID (32877) 70#define LAN88XX_MMD3_CHIP_ID (32877)
71#define LAN88XX_MMD3_CHIP_REV (32878) 71#define LAN88XX_MMD3_CHIP_REV (32878)
72 72
73/* DSP registers */
74#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
75#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
76#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5)
77#define LAN88XX_EXT_PAGE_TR_CR 16
78#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17
79#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18
80
73#endif /* _MICROCHIPPHY_H */ 81#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
85 unsigned int write_suspended:1; 85 unsigned int write_suspended:1;
86 unsigned int erase_suspended:1; 86 unsigned int erase_suspended:1;
87 unsigned long in_progress_block_addr; 87 unsigned long in_progress_block_addr;
88 unsigned long in_progress_block_mask;
88 89
89 struct mutex mutex; 90 struct mutex mutex;
90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip 91 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1d356105f25a..b4c9fda9d833 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -351,10 +351,10 @@ struct earlycon_id {
351 char name[16]; 351 char name[16];
352 char compatible[128]; 352 char compatible[128];
353 int (*setup)(struct earlycon_device *, const char *options); 353 int (*setup)(struct earlycon_device *, const char *options);
354} __aligned(32); 354};
355 355
356extern const struct earlycon_id __earlycon_table[]; 356extern const struct earlycon_id *__earlycon_table[];
357extern const struct earlycon_id __earlycon_table_end[]; 357extern const struct earlycon_id *__earlycon_table_end[];
358 358
359#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) 359#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
360#define EARLYCON_USED_OR_UNUSED __used 360#define EARLYCON_USED_OR_UNUSED __used
@@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
362#define EARLYCON_USED_OR_UNUSED __maybe_unused 362#define EARLYCON_USED_OR_UNUSED __maybe_unused
363#endif 363#endif
364 364
365#define OF_EARLYCON_DECLARE(_name, compat, fn) \ 365#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
366 static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ 366 static const struct earlycon_id unique_id \
367 EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ 367 EARLYCON_USED_OR_UNUSED __initconst \
368 = { .name = __stringify(_name), \ 368 = { .name = __stringify(_name), \
369 .compatible = compat, \ 369 .compatible = compat, \
370 .setup = fn } 370 .setup = fn }; \
371 static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
372 __section(__earlycon_table) \
373 * const __PASTE(__p, unique_id) = &unique_id
374
375#define OF_EARLYCON_DECLARE(_name, compat, fn) \
376 _OF_EARLYCON_DECLARE(_name, compat, fn, \
377 __UNIQUE_ID(__earlycon_##_name))
371 378
372#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) 379#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
373 380
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 388ff2936a87..6794490f25b2 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -75,6 +75,9 @@ struct shrinker {
75#define SHRINKER_NUMA_AWARE (1 << 0) 75#define SHRINKER_NUMA_AWARE (1 << 0)
76#define SHRINKER_MEMCG_AWARE (1 << 1) 76#define SHRINKER_MEMCG_AWARE (1 << 1)
77 77
78extern int register_shrinker(struct shrinker *); 78extern int prealloc_shrinker(struct shrinker *shrinker);
79extern void unregister_shrinker(struct shrinker *); 79extern void register_shrinker_prepared(struct shrinker *shrinker);
80extern int register_shrinker(struct shrinker *shrinker);
81extern void unregister_shrinker(struct shrinker *shrinker);
82extern void free_prealloced_shrinker(struct shrinker *shrinker);
80#endif 83#endif
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
index e8f0f852968f..c0c5c5b73dc0 100644
--- a/include/linux/stringhash.h
+++ b/include/linux/stringhash.h
@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
50 * losing bits). This also has the property (wanted by the dcache) 50 * losing bits). This also has the property (wanted by the dcache)
51 * that the msbits make a good hash table index. 51 * that the msbits make a good hash table index.
52 */ 52 */
53static inline unsigned long end_name_hash(unsigned long hash) 53static inline unsigned int end_name_hash(unsigned long hash)
54{ 54{
55 return __hash_32((unsigned int)hash); 55 return hash_long(hash, 32);
56} 56}
57 57
58/* 58/*
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 0494db3fd9e8..13770cfe33ad 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -62,7 +62,7 @@ struct ts_config
62 int flags; 62 int flags;
63 63
64 /** 64 /**
65 * get_next_block - fetch next block of data 65 * @get_next_block: fetch next block of data
66 * @consumed: number of bytes consumed by the caller 66 * @consumed: number of bytes consumed by the caller
67 * @dst: destination buffer 67 * @dst: destination buffer
68 * @conf: search configuration 68 * @conf: search configuration
@@ -79,7 +79,7 @@ struct ts_config
79 struct ts_state *state); 79 struct ts_state *state);
80 80
81 /** 81 /**
82 * finish - finalize/clean a series of get_next_block() calls 82 * @finish: finalize/clean a series of get_next_block() calls
83 * @conf: search configuration 83 * @conf: search configuration
84 * @state: search state 84 * @state: search state
85 * 85 *
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 34f053a150a9..cf2862bd134a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -43,11 +43,7 @@ enum {
43#define THREAD_ALIGN THREAD_SIZE 43#define THREAD_ALIGN THREAD_SIZE
44#endif 44#endif
45 45
46#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) 46#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
47# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
48#else
49# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
50#endif
51 47
52/* 48/*
53 * flag set/clear/test wrappers 49 * flag set/clear/test wrappers
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 45bc6b376492..53604b087f2c 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
60 u32 abort_sr; 60 u32 abort_sr;
61} __packed __aligned(8); 61} __packed __aligned(8);
62 62
63static inline void ti_emif_asm_offsets(void)
64{
65 DEFINE(EMIF_SDCFG_VAL_OFFSET,
66 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
67 DEFINE(EMIF_TIMING1_VAL_OFFSET,
68 offsetof(struct emif_regs_amx3, emif_timing1_val));
69 DEFINE(EMIF_TIMING2_VAL_OFFSET,
70 offsetof(struct emif_regs_amx3, emif_timing2_val));
71 DEFINE(EMIF_TIMING3_VAL_OFFSET,
72 offsetof(struct emif_regs_amx3, emif_timing3_val));
73 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
74 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
75 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
76 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
77 DEFINE(EMIF_PMCR_VAL_OFFSET,
78 offsetof(struct emif_regs_amx3, emif_pmcr_val));
79 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
80 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
81 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
82 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
83 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
84 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
85 DEFINE(EMIF_COS_CONFIG_OFFSET,
86 offsetof(struct emif_regs_amx3, emif_cos_config));
87 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
88 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
89 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
90 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
91 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
92 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
93 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
94 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
95 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
96 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
97 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
98 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
99 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
100 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
101 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
102 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
103 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
104 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
105 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
106 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
107 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
108
109 BLANK();
110
111 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
112 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
113 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
114 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
115 DEFINE(EMIF_PM_CONFIG_OFFSET,
116 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
117 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
118 offsetof(struct ti_emif_pm_data, regs_virt));
119 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
120 offsetof(struct ti_emif_pm_data, regs_phys));
121 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
122
123 BLANK();
124
125 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
126 offsetof(struct ti_emif_pm_functions, save_context));
127 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
128 offsetof(struct ti_emif_pm_functions, restore_context));
129 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
130 offsetof(struct ti_emif_pm_functions, enter_sr));
131 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
132 offsetof(struct ti_emif_pm_functions, exit_sr));
133 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
134 offsetof(struct ti_emif_pm_functions, abort_sr));
135 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
136}
137
63struct gen_pool; 138struct gen_pool;
64 139
65int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); 140int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 4b3dca173e89..7acb953298a7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -52,7 +52,6 @@ struct tk_read_base {
52 * @offs_real: Offset clock monotonic -> clock realtime 52 * @offs_real: Offset clock monotonic -> clock realtime
53 * @offs_boot: Offset clock monotonic -> clock boottime 53 * @offs_boot: Offset clock monotonic -> clock boottime
54 * @offs_tai: Offset clock monotonic -> clock tai 54 * @offs_tai: Offset clock monotonic -> clock tai
55 * @time_suspended: Accumulated suspend time
56 * @tai_offset: The current UTC to TAI offset in seconds 55 * @tai_offset: The current UTC to TAI offset in seconds
57 * @clock_was_set_seq: The sequence number of clock was set events 56 * @clock_was_set_seq: The sequence number of clock was set events
58 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
@@ -95,7 +94,6 @@ struct timekeeper {
95 ktime_t offs_real; 94 ktime_t offs_real;
96 ktime_t offs_boot; 95 ktime_t offs_boot;
97 ktime_t offs_tai; 96 ktime_t offs_tai;
98 ktime_t time_suspended;
99 s32 tai_offset; 97 s32 tai_offset;
100 unsigned int clock_was_set_seq; 98 unsigned int clock_was_set_seq;
101 u8 cs_was_changed_seq; 99 u8 cs_was_changed_seq;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9737fbec7019..588a0e4b1ab9 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,25 +33,20 @@ extern void ktime_get_ts64(struct timespec64 *ts);
33extern time64_t ktime_get_seconds(void); 33extern time64_t ktime_get_seconds(void);
34extern time64_t __ktime_get_real_seconds(void); 34extern time64_t __ktime_get_real_seconds(void);
35extern time64_t ktime_get_real_seconds(void); 35extern time64_t ktime_get_real_seconds(void);
36extern void ktime_get_active_ts64(struct timespec64 *ts);
37 36
38extern int __getnstimeofday64(struct timespec64 *tv); 37extern int __getnstimeofday64(struct timespec64 *tv);
39extern void getnstimeofday64(struct timespec64 *tv); 38extern void getnstimeofday64(struct timespec64 *tv);
40extern void getboottime64(struct timespec64 *ts); 39extern void getboottime64(struct timespec64 *ts);
41 40
42#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 41#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
43
44/* Clock BOOTTIME compatibility wrappers */
45static inline void get_monotonic_boottime64(struct timespec64 *ts)
46{
47 ktime_get_ts64(ts);
48}
49 42
50/* 43/*
51 * ktime_t based interfaces 44 * ktime_t based interfaces
52 */ 45 */
46
53enum tk_offsets { 47enum tk_offsets {
54 TK_OFFS_REAL, 48 TK_OFFS_REAL,
49 TK_OFFS_BOOT,
55 TK_OFFS_TAI, 50 TK_OFFS_TAI,
56 TK_OFFS_MAX, 51 TK_OFFS_MAX,
57}; 52};
@@ -62,10 +57,6 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
62extern ktime_t ktime_get_raw(void); 57extern ktime_t ktime_get_raw(void);
63extern u32 ktime_get_resolution_ns(void); 58extern u32 ktime_get_resolution_ns(void);
64 59
65/* Clock BOOTTIME compatibility wrappers */
66static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
67static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
68
69/** 60/**
70 * ktime_get_real - get the real (wall-) time in ktime_t format 61 * ktime_get_real - get the real (wall-) time in ktime_t format
71 */ 62 */
@@ -75,6 +66,17 @@ static inline ktime_t ktime_get_real(void)
75} 66}
76 67
77/** 68/**
69 * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
70 *
71 * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
72 * time spent in suspend.
73 */
74static inline ktime_t ktime_get_boottime(void)
75{
76 return ktime_get_with_offset(TK_OFFS_BOOT);
77}
78
79/**
78 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format 80 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
79 */ 81 */
80static inline ktime_t ktime_get_clocktai(void) 82static inline ktime_t ktime_get_clocktai(void)
@@ -100,6 +102,11 @@ static inline u64 ktime_get_real_ns(void)
100 return ktime_to_ns(ktime_get_real()); 102 return ktime_to_ns(ktime_get_real());
101} 103}
102 104
105static inline u64 ktime_get_boot_ns(void)
106{
107 return ktime_to_ns(ktime_get_boottime());
108}
109
103static inline u64 ktime_get_tai_ns(void) 110static inline u64 ktime_get_tai_ns(void)
104{ 111{
105 return ktime_to_ns(ktime_get_clocktai()); 112 return ktime_to_ns(ktime_get_clocktai());
@@ -112,11 +119,17 @@ static inline u64 ktime_get_raw_ns(void)
112 119
113extern u64 ktime_get_mono_fast_ns(void); 120extern u64 ktime_get_mono_fast_ns(void);
114extern u64 ktime_get_raw_fast_ns(void); 121extern u64 ktime_get_raw_fast_ns(void);
122extern u64 ktime_get_boot_fast_ns(void);
115extern u64 ktime_get_real_fast_ns(void); 123extern u64 ktime_get_real_fast_ns(void);
116 124
117/* 125/*
118 * timespec64 interfaces utilizing the ktime based ones 126 * timespec64 interfaces utilizing the ktime based ones
119 */ 127 */
128static inline void get_monotonic_boottime64(struct timespec64 *ts)
129{
130 *ts = ktime_to_timespec64(ktime_get_boottime());
131}
132
120static inline void timekeeping_clocktai64(struct timespec64 *ts) 133static inline void timekeeping_clocktai64(struct timespec64 *ts)
121{ 134{
122 *ts = ktime_to_timespec64(ktime_get_clocktai()); 135 *ts = ktime_to_timespec64(ktime_get_clocktai());
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index af4114d5dc17..3616b4becb59 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -9,9 +9,6 @@
9extern void do_gettimeofday(struct timeval *tv); 9extern void do_gettimeofday(struct timeval *tv);
10unsigned long get_seconds(void); 10unsigned long get_seconds(void);
11 11
12/* does not take xtime_lock */
13struct timespec __current_kernel_time(void);
14
15static inline struct timespec current_kernel_time(void) 12static inline struct timespec current_kernel_time(void)
16{ 13{
17 struct timespec64 now = current_kernel_time64(); 14 struct timespec64 now = current_kernel_time64();
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 2448f9cc48a3..7b066fd38248 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -8,8 +8,6 @@
8#include <linux/debugobjects.h> 8#include <linux/debugobjects.h>
9#include <linux/stringify.h> 9#include <linux/stringify.h>
10 10
11struct tvec_base;
12
13struct timer_list { 11struct timer_list {
14 /* 12 /*
15 * All fields that change during normal runtime grouped to the 13 * All fields that change during normal runtime grouped to the
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 47f8af22f216..1dd587ba6d88 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
701extern int tty_set_ldisc(struct tty_struct *tty, int disc); 701extern int tty_set_ldisc(struct tty_struct *tty, int disc);
702extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 702extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
703extern void tty_ldisc_release(struct tty_struct *tty); 703extern void tty_ldisc_release(struct tty_struct *tty);
704extern void tty_ldisc_init(struct tty_struct *tty); 704extern int __must_check tty_ldisc_init(struct tty_struct *tty);
705extern void tty_ldisc_deinit(struct tty_struct *tty); 705extern void tty_ldisc_deinit(struct tty_struct *tty);
706extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, 706extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
707 char *f, int count); 707 char *f, int count);
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index c71def6b310f..a240ed2a0372 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
24#define vbg_debug pr_debug 24#define vbg_debug pr_debug
25#endif 25#endif
26 26
27/**
28 * Allocate memory for generic request and initialize the request header.
29 *
30 * Return: the allocated memory
31 * @len: Size of memory block required for the request.
32 * @req_type: The generic request type.
33 */
34void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
35
36/**
37 * Perform a generic request.
38 *
39 * Return: VBox status code
40 * @gdev: The Guest extension device.
41 * @req: Pointer to the request structure.
42 */
43int vbg_req_perform(struct vbg_dev *gdev, void *req);
44
45int vbg_hgcm_connect(struct vbg_dev *gdev, 27int vbg_hgcm_connect(struct vbg_dev *gdev,
46 struct vmmdev_hgcm_service_location *loc, 28 struct vmmdev_hgcm_service_location *loc,
47 u32 *client_id, int *vbox_status); 29 u32 *client_id, int *vbox_status);
@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
52 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 34 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
53 u32 parm_count, int *vbox_status); 35 u32 parm_count, int *vbox_status);
54 36
55int vbg_hgcm_call32(
56 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
57 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
58 int *vbox_status);
59
60/** 37/**
61 * Convert a VirtualBox status code to a standard Linux kernel return value. 38 * Convert a VirtualBox status code to a standard Linux kernel return value.
62 * Return: 0 or negative errno value. 39 * Return: 0 or negative errno value.
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 988c7355bc22..fa1b5da2804e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
157int virtio_device_restore(struct virtio_device *dev); 157int virtio_device_restore(struct virtio_device *dev);
158#endif 158#endif
159 159
160#define virtio_device_for_each_vq(vdev, vq) \
161 list_for_each_entry(vq, &vdev->vqs, list)
162
160/** 163/**
161 * virtio_driver - operations for a virtio I/O driver 164 * virtio_driver - operations for a virtio I/O driver
162 * @driver: underlying device driver (populate name and owner). 165 * @driver: underlying device driver (populate name and owner).
diff --git a/include/net/ife.h b/include/net/ife.h
index 44b9c00f7223..e117617e3c34 100644
--- a/include/net/ife.h
+++ b/include/net/ife.h
@@ -12,7 +12,8 @@
12void *ife_encode(struct sk_buff *skb, u16 metalen); 12void *ife_encode(struct sk_buff *skb, u16 metalen);
13void *ife_decode(struct sk_buff *skb, u16 *metalen); 13void *ife_decode(struct sk_buff *skb, u16 *metalen);
14 14
15void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); 15void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
16 u16 *dlen, u16 *totlen);
16int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, 17int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
17 const void *dval); 18 const void *dval);
18 19
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 5c40f118c0fa..df528a623548 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
97 97
98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, 98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
99 struct proto *prot, int kern); 99 struct proto *prot, int kern);
100void llc_sk_stop_all_timers(struct sock *sk, bool sync);
100void llc_sk_free(struct sock *sk); 101void llc_sk_free(struct sock *sk);
101 102
102void llc_sk_reset(struct sock *sk); 103void llc_sk_reset(struct sock *sk);
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 50df5b28d2c9..8ee8991aa099 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, 143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
144 void *data, size_t len) 144 void *data, size_t len)
145{ 145{
146 return 0; 146 return -ENOSYS;
147} 147}
148 148
149static inline int rpi_firmware_property_list(struct rpi_firmware *fw, 149static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
150 void *data, size_t tag_size) 150 void *data, size_t tag_size)
151{ 151{
152 return 0; 152 return -ENOSYS;
153} 153}
154 154
155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) 155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
diff --git a/include/sound/control.h b/include/sound/control.h
index ca13a44ae9d4..6011a58d3e20 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/wait.h> 25#include <linux/wait.h>
26#include <linux/nospec.h>
26#include <sound/asound.h> 27#include <sound/asound.h>
27 28
28#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) 29#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
148 149
149static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 150static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
150{ 151{
151 return id->numid - kctl->id.numid; 152 unsigned int ioff = id->numid - kctl->id.numid;
153 return array_index_nospec(ioff, kctl->count);
152} 154}
153 155
154static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 156static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
155{ 157{
156 return id->index - kctl->id.index; 158 unsigned int ioff = id->index - kctl->id.index;
159 return array_index_nospec(ioff, kctl->count);
157} 160}
158 161
159static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 162static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 2f057a494d93..9a761bc6a251 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work,
25 TP_printk("work struct %p", __entry->work) 25 TP_printk("work struct %p", __entry->work)
26); 26);
27 27
28struct pool_workqueue;
29
28/** 30/**
29 * workqueue_queue_work - called when a work gets queued 31 * workqueue_queue_work - called when a work gets queued
30 * @req_cpu: the requested cpu 32 * @req_cpu: the requested cpu
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1065006c9bf5..b02c41e53d56 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
676 __u8 pad[36]; 676 __u8 pad[36];
677}; 677};
678 678
679#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
680#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
681#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
682#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
683 KVM_X86_DISABLE_EXITS_HTL | \
684 KVM_X86_DISABLE_EXITS_PAUSE)
685
679/* for KVM_ENABLE_CAP */ 686/* for KVM_ENABLE_CAP */
680struct kvm_enable_cap { 687struct kvm_enable_cap {
681 /* in */ 688 /* in */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 912b85b52344..b8e288a1f740 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
650#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 650#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
651#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 651#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
652/* 652/*
653 * Indicates that the content of PERF_SAMPLE_IP points to 653 * These PERF_RECORD_MISC_* flags below are safely reused
654 * the actual instruction that triggered the event. See also 654 * for the following events:
655 * perf_event_attr::precise_ip. 655 *
656 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
657 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
658 *
659 *
660 * PERF_RECORD_MISC_EXACT_IP:
661 * Indicates that the content of PERF_SAMPLE_IP points to
662 * the actual instruction that triggered the event. See also
663 * perf_event_attr::precise_ip.
664 *
665 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
666 * Indicates that thread was preempted in TASK_RUNNING state.
656 */ 667 */
657#define PERF_RECORD_MISC_EXACT_IP (1 << 14) 668#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
669#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
658/* 670/*
659 * Reserve the last bit to indicate some extended misc field 671 * Reserve the last bit to indicate some extended misc field
660 */ 672 */
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index c34f4490d025..26ee91300e3e 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -35,6 +35,9 @@
35/* Clear the entropy pool and associated counters. (Superuser only.) */ 35/* Clear the entropy pool and associated counters. (Superuser only.) */
36#define RNDCLEARPOOL _IO( 'R', 0x06 ) 36#define RNDCLEARPOOL _IO( 'R', 0x06 )
37 37
38/* Reseed CRNG. (Superuser only.) */
39#define RNDRESEEDCRNG _IO( 'R', 0x07 )
40
38struct rand_pool_info { 41struct rand_pool_info {
39 int entropy_count; 42 int entropy_count;
40 int buf_size; 43 int buf_size;
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 0f272818a4d2..6b58371b1f0d 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -780,24 +780,6 @@ enum {
780 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, 780 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
781}; 781};
782 782
783/* proc/sys/net/irda */
784enum {
785 NET_IRDA_DISCOVERY=1,
786 NET_IRDA_DEVNAME=2,
787 NET_IRDA_DEBUG=3,
788 NET_IRDA_FAST_POLL=4,
789 NET_IRDA_DISCOVERY_SLOTS=5,
790 NET_IRDA_DISCOVERY_TIMEOUT=6,
791 NET_IRDA_SLOT_TIMEOUT=7,
792 NET_IRDA_MAX_BAUD_RATE=8,
793 NET_IRDA_MIN_TX_TURN_TIME=9,
794 NET_IRDA_MAX_TX_DATA_SIZE=10,
795 NET_IRDA_MAX_TX_WINDOW=11,
796 NET_IRDA_MAX_NOREPLY_TIME=12,
797 NET_IRDA_WARN_NOREPLY_TIME=13,
798 NET_IRDA_LAP_KEEPALIVE_TIME=14,
799};
800
801 783
802/* CTL_FS names: */ 784/* CTL_FS names: */
803enum 785enum
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 16a296612ba4..4c0338ea308a 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -73,7 +73,6 @@ struct __kernel_old_timeval {
73 */ 73 */
74#define CLOCK_SGI_CYCLE 10 74#define CLOCK_SGI_CYCLE 10
75#define CLOCK_TAI 11 75#define CLOCK_TAI 11
76#define CLOCK_MONOTONIC_ACTIVE 12
77 76
78#define MAX_CLOCKS 16 77#define MAX_CLOCKS 16
79#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) 78#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 40297a3181ed..13b8cb563892 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -57,6 +57,21 @@ struct virtio_balloon_config {
57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ 57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
58#define VIRTIO_BALLOON_S_NR 10 58#define VIRTIO_BALLOON_S_NR 10
59 59
60#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
61 VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
62 VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \
63 VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \
64 VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \
65 VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \
66 VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \
67 VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
68 VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
69 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
70 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
71}
72
73#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
74
60/* 75/*
61 * Memory statistics structure. 76 * Memory statistics structure.
62 * Driver fills an array of these structures and passes to device. 77 * Driver fills an array of these structures and passes to device.
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 5c918276835e..78bb5d9f8d83 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -38,6 +38,13 @@
38 38
39/* 39/*
40 ****************************************************************************** 40 ******************************************************************************
41 * Protocol version
42 ******************************************************************************
43 */
44#define XENSND_PROTOCOL_VERSION 2
45
46/*
47 ******************************************************************************
41 * Feature and Parameter Negotiation 48 * Feature and Parameter Negotiation
42 ****************************************************************************** 49 ******************************************************************************
43 * 50 *
@@ -106,6 +113,8 @@
106 * 113 *
107 * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" 114 * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386"
108 * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" 115 * /local/domain/1/device/vsnd/0/0/0/event-channel = "15"
116 * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386"
117 * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215"
109 * 118 *
110 *------------------------------ Stream 1, capture ---------------------------- 119 *------------------------------ Stream 1, capture ----------------------------
111 * 120 *
@@ -115,6 +124,8 @@
115 * 124 *
116 * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" 125 * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384"
117 * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" 126 * /local/domain/1/device/vsnd/0/0/1/event-channel = "13"
127 * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384"
128 * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213"
118 * 129 *
119 *------------------------------- PCM device 1 -------------------------------- 130 *------------------------------- PCM device 1 --------------------------------
120 * 131 *
@@ -128,6 +139,8 @@
128 * 139 *
129 * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" 140 * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387"
130 * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" 141 * /local/domain/1/device/vsnd/0/1/0/event-channel = "151"
142 * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387"
143 * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351"
131 * 144 *
132 *------------------------------- PCM device 2 -------------------------------- 145 *------------------------------- PCM device 2 --------------------------------
133 * 146 *
@@ -140,6 +153,8 @@
140 * 153 *
141 * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" 154 * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389"
142 * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" 155 * /local/domain/1/device/vsnd/0/2/0/event-channel = "152"
156 * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389"
157 * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452"
143 * 158 *
144 ****************************************************************************** 159 ******************************************************************************
145 * Backend XenBus Nodes 160 * Backend XenBus Nodes
@@ -285,6 +300,23 @@
285 * The Xen grant reference granting permission for the backend to map 300 * The Xen grant reference granting permission for the backend to map
286 * a sole page in a single page sized ring buffer. 301 * a sole page in a single page sized ring buffer.
287 * 302 *
303 *--------------------- Stream Event Transport Parameters ---------------------
304 *
305 * This communication path is used to deliver asynchronous events from backend
306 * to frontend, set up per stream.
307 *
308 * evt-event-channel
309 * Values: <uint32_t>
310 *
311 * The identifier of the Xen event channel used to signal activity
312 * in the ring buffer.
313 *
314 * evt-ring-ref
315 * Values: <uint32_t>
316 *
317 * The Xen grant reference granting permission for the backend to map
318 * a sole page in a single page sized ring buffer.
319 *
288 ****************************************************************************** 320 ******************************************************************************
289 * STATE DIAGRAMS 321 * STATE DIAGRAMS
290 ****************************************************************************** 322 ******************************************************************************
@@ -432,6 +464,20 @@
432#define XENSND_OP_GET_VOLUME 5 464#define XENSND_OP_GET_VOLUME 5
433#define XENSND_OP_MUTE 6 465#define XENSND_OP_MUTE 6
434#define XENSND_OP_UNMUTE 7 466#define XENSND_OP_UNMUTE 7
467#define XENSND_OP_TRIGGER 8
468#define XENSND_OP_HW_PARAM_QUERY 9
469
470#define XENSND_OP_TRIGGER_START 0
471#define XENSND_OP_TRIGGER_PAUSE 1
472#define XENSND_OP_TRIGGER_STOP 2
473#define XENSND_OP_TRIGGER_RESUME 3
474
475/*
476 ******************************************************************************
477 * EVENT CODES
478 ******************************************************************************
479 */
480#define XENSND_EVT_CUR_POS 0
435 481
436/* 482/*
437 ****************************************************************************** 483 ******************************************************************************
@@ -448,6 +494,8 @@
448#define XENSND_FIELD_VCARD_LONG_NAME "long-name" 494#define XENSND_FIELD_VCARD_LONG_NAME "long-name"
449#define XENSND_FIELD_RING_REF "ring-ref" 495#define XENSND_FIELD_RING_REF "ring-ref"
450#define XENSND_FIELD_EVT_CHNL "event-channel" 496#define XENSND_FIELD_EVT_CHNL "event-channel"
497#define XENSND_FIELD_EVT_RING_REF "evt-ring-ref"
498#define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel"
451#define XENSND_FIELD_DEVICE_NAME "name" 499#define XENSND_FIELD_DEVICE_NAME "name"
452#define XENSND_FIELD_TYPE "type" 500#define XENSND_FIELD_TYPE "type"
453#define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" 501#define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id"
@@ -526,7 +574,7 @@
526 * 574 *
527 *---------------------------------- Requests --------------------------------- 575 *---------------------------------- Requests ---------------------------------
528 * 576 *
529 * All request packets have the same length (32 octets) 577 * All request packets have the same length (64 octets)
530 * All request packets have common header: 578 * All request packets have common header:
531 * 0 1 2 3 octet 579 * 0 1 2 3 octet
532 * +----------------+----------------+----------------+----------------+ 580 * +----------------+----------------+----------------+----------------+
@@ -559,11 +607,13 @@
559 * +----------------+----------------+----------------+----------------+ 607 * +----------------+----------------+----------------+----------------+
560 * | gref_directory | 24 608 * | gref_directory | 24
561 * +----------------+----------------+----------------+----------------+ 609 * +----------------+----------------+----------------+----------------+
562 * | reserved | 28 610 * | period_sz | 28
611 * +----------------+----------------+----------------+----------------+
612 * | reserved | 32
563 * +----------------+----------------+----------------+----------------+ 613 * +----------------+----------------+----------------+----------------+
564 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 614 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
565 * +----------------+----------------+----------------+----------------+ 615 * +----------------+----------------+----------------+----------------+
566 * | reserved | 32 616 * | reserved | 64
567 * +----------------+----------------+----------------+----------------+ 617 * +----------------+----------------+----------------+----------------+
568 * 618 *
569 * pcm_rate - uint32_t, stream data rate, Hz 619 * pcm_rate - uint32_t, stream data rate, Hz
@@ -571,6 +621,14 @@
571 * pcm_channels - uint8_t, number of channels of this stream, 621 * pcm_channels - uint8_t, number of channels of this stream,
572 * [channels-min; channels-max] 622 * [channels-min; channels-max]
573 * buffer_sz - uint32_t, buffer size to be allocated, octets 623 * buffer_sz - uint32_t, buffer size to be allocated, octets
624 * period_sz - uint32_t, event period size, octets
625 * This is the requested value of the period at which frontend would
626 * like to receive XENSND_EVT_CUR_POS notifications from the backend when
627 * stream position advances during playback/capture.
628 * It shows how many octets are expected to be played/captured before
629 * sending such an event.
630 * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend.
631 *
574 * gref_directory - grant_ref_t, a reference to the first shared page 632 * gref_directory - grant_ref_t, a reference to the first shared page
575 * describing shared buffer references. At least one page exists. If shared 633 * describing shared buffer references. At least one page exists. If shared
576 * buffer size (buffer_sz) exceeds what can be addressed by this single page, 634 * buffer size (buffer_sz) exceeds what can be addressed by this single page,
@@ -585,6 +643,7 @@ struct xensnd_open_req {
585 uint16_t reserved; 643 uint16_t reserved;
586 uint32_t buffer_sz; 644 uint32_t buffer_sz;
587 grant_ref_t gref_directory; 645 grant_ref_t gref_directory;
646 uint32_t period_sz;
588}; 647};
589 648
590/* 649/*
@@ -632,7 +691,7 @@ struct xensnd_page_directory {
632 * +----------------+----------------+----------------+----------------+ 691 * +----------------+----------------+----------------+----------------+
633 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 692 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
634 * +----------------+----------------+----------------+----------------+ 693 * +----------------+----------------+----------------+----------------+
635 * | reserved | 32 694 * | reserved | 64
636 * +----------------+----------------+----------------+----------------+ 695 * +----------------+----------------+----------------+----------------+
637 * 696 *
638 * Request read/write - used for read (for capture) or write (for playback): 697 * Request read/write - used for read (for capture) or write (for playback):
@@ -650,7 +709,7 @@ struct xensnd_page_directory {
650 * +----------------+----------------+----------------+----------------+ 709 * +----------------+----------------+----------------+----------------+
651 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 710 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
652 * +----------------+----------------+----------------+----------------+ 711 * +----------------+----------------+----------------+----------------+
653 * | reserved | 32 712 * | reserved | 64
654 * +----------------+----------------+----------------+----------------+ 713 * +----------------+----------------+----------------+----------------+
655 * 714 *
656 * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write 715 * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write
@@ -673,9 +732,11 @@ struct xensnd_rw_req {
673 * +----------------+----------------+----------------+----------------+ 732 * +----------------+----------------+----------------+----------------+
674 * | length | 16 733 * | length | 16
675 * +----------------+----------------+----------------+----------------+ 734 * +----------------+----------------+----------------+----------------+
735 * | reserved | 20
736 * +----------------+----------------+----------------+----------------+
676 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 737 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
677 * +----------------+----------------+----------------+----------------+ 738 * +----------------+----------------+----------------+----------------+
678 * | reserved | 32 739 * | reserved | 64
679 * +----------------+----------------+----------------+----------------+ 740 * +----------------+----------------+----------------+----------------+
680 * 741 *
681 * operation - XENSND_OP_SET_VOLUME for volume set 742 * operation - XENSND_OP_SET_VOLUME for volume set
@@ -713,9 +774,11 @@ struct xensnd_rw_req {
713 * +----------------+----------------+----------------+----------------+ 774 * +----------------+----------------+----------------+----------------+
714 * | length | 16 775 * | length | 16
715 * +----------------+----------------+----------------+----------------+ 776 * +----------------+----------------+----------------+----------------+
777 * | reserved | 20
778 * +----------------+----------------+----------------+----------------+
716 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 779 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
717 * +----------------+----------------+----------------+----------------+ 780 * +----------------+----------------+----------------+----------------+
718 * | reserved | 32 781 * | reserved | 64
719 * +----------------+----------------+----------------+----------------+ 782 * +----------------+----------------+----------------+----------------+
720 * 783 *
721 * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute 784 * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute
@@ -743,32 +806,213 @@ struct xensnd_rw_req {
743 * 806 *
744 * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, 807 * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME,
745 * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. 808 * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE.
809 *
810 * Request stream running state change - trigger PCM stream running state
811 * to start, stop, pause or resume:
812 *
813 * 0 1 2 3 octet
814 * +----------------+----------------+----------------+----------------+
815 * | id | _OP_TRIGGER | reserved | 4
816 * +----------------+----------------+----------------+----------------+
817 * | reserved | 8
818 * +----------------+----------------+----------------+----------------+
819 * | type | reserved | 12
820 * +----------------+----------------+----------------+----------------+
821 * | reserved | 16
822 * +----------------+----------------+----------------+----------------+
823 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
824 * +----------------+----------------+----------------+----------------+
825 * | reserved | 64
826 * +----------------+----------------+----------------+----------------+
827 *
828 * type - uint8_t, XENSND_OP_TRIGGER_XXX value
746 */ 829 */
747 830
831struct xensnd_trigger_req {
832 uint8_t type;
833};
834
748/* 835/*
749 *---------------------------------- Responses -------------------------------- 836 * Request stream parameter ranges: request intervals and
837 * masks of supported ranges for stream configuration values.
750 * 838 *
751 * All response packets have the same length (32 octets) 839 * Sound device configuration for a particular stream is a limited subset
840 * of the multidimensional configuration available on XenStore, e.g.
841 * once the frame rate has been selected there is a limited supported range
842 * for sample rates becomes available (which might be the same set configured
843 * on XenStore or less). For example, selecting 96kHz sample rate may limit
844 * number of channels available for such configuration from 4 to 2, etc.
845 * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration
846 * space making it possible to iteratively get the final stream configuration,
847 * used in XENSND_OP_OPEN request.
848 *
849 * See response format for this request.
752 * 850 *
753 * Response for all requests:
754 * 0 1 2 3 octet 851 * 0 1 2 3 octet
755 * +----------------+----------------+----------------+----------------+ 852 * +----------------+----------------+----------------+----------------+
756 * | id | operation | reserved | 4 853 * | id | _HW_PARAM_QUERY| reserved | 4
757 * +----------------+----------------+----------------+----------------+ 854 * +----------------+----------------+----------------+----------------+
758 * | status | 8 855 * | reserved | 8
856 * +----------------+----------------+----------------+----------------+
857 * | formats mask low 32-bit | 12
858 * +----------------+----------------+----------------+----------------+
859 * | formats mask high 32-bit | 16
759 * +----------------+----------------+----------------+----------------+ 860 * +----------------+----------------+----------------+----------------+
760 * | reserved | 12 861 * | min rate | 20
862 * +----------------+----------------+----------------+----------------+
863 * | max rate | 24
864 * +----------------+----------------+----------------+----------------+
865 * | min channels | 28
866 * +----------------+----------------+----------------+----------------+
867 * | max channels | 32
868 * +----------------+----------------+----------------+----------------+
869 * | min buffer frames | 36
870 * +----------------+----------------+----------------+----------------+
871 * | max buffer frames | 40
872 * +----------------+----------------+----------------+----------------+
873 * | min period frames | 44
874 * +----------------+----------------+----------------+----------------+
875 * | max period frames | 48
876 * +----------------+----------------+----------------+----------------+
877 * | reserved | 52
761 * +----------------+----------------+----------------+----------------+ 878 * +----------------+----------------+----------------+----------------+
762 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 879 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
763 * +----------------+----------------+----------------+----------------+ 880 * +----------------+----------------+----------------+----------------+
764 * | reserved | 32 881 * | reserved | 64
882 * +----------------+----------------+----------------+----------------+
883 *
884 * formats - uint64_t, bit mask representing values of the parameter
885 * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values
886 *
887 * For interval parameters:
888 * min - uint32_t, minimum value of the parameter
889 * max - uint32_t, maximum value of the parameter
890 *
891 * Frame is defined as a product of the number of channels by the
892 * number of octets per one sample.
893 */
894
895struct xensnd_query_hw_param {
896 uint64_t formats;
897 struct {
898 uint32_t min;
899 uint32_t max;
900 } rates;
901 struct {
902 uint32_t min;
903 uint32_t max;
904 } channels;
905 struct {
906 uint32_t min;
907 uint32_t max;
908 } buffer;
909 struct {
910 uint32_t min;
911 uint32_t max;
912 } period;
913};
914
915/*
916 *---------------------------------- Responses --------------------------------
917 *
918 * All response packets have the same length (64 octets)
919 *
920 * All response packets have common header:
921 * 0 1 2 3 octet
922 * +----------------+----------------+----------------+----------------+
923 * | id | operation | reserved | 4
924 * +----------------+----------------+----------------+----------------+
925 * | status | 8
765 * +----------------+----------------+----------------+----------------+ 926 * +----------------+----------------+----------------+----------------+
766 * 927 *
767 * id - uint16_t, copied from the request 928 * id - uint16_t, copied from the request
768 * operation - uint8_t, XENSND_OP_* - copied from request 929 * operation - uint8_t, XENSND_OP_* - copied from request
769 * status - int32_t, response status, zero on success and -XEN_EXX on failure 930 * status - int32_t, response status, zero on success and -XEN_EXX on failure
931 *
932 *
933 * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY:
934 * 0 1 2 3 octet
935 * +----------------+----------------+----------------+----------------+
936 * | id | operation | reserved | 4
937 * +----------------+----------------+----------------+----------------+
938 * | status | 8
939 * +----------------+----------------+----------------+----------------+
940 * | formats mask low 32-bit | 12
941 * +----------------+----------------+----------------+----------------+
942 * | formats mask high 32-bit | 16
943 * +----------------+----------------+----------------+----------------+
944 * | min rate | 20
945 * +----------------+----------------+----------------+----------------+
946 * | max rate | 24
947 * +----------------+----------------+----------------+----------------+
948 * | min channels | 28
949 * +----------------+----------------+----------------+----------------+
950 * | max channels | 32
951 * +----------------+----------------+----------------+----------------+
952 * | min buffer frames | 36
953 * +----------------+----------------+----------------+----------------+
954 * | max buffer frames | 40
955 * +----------------+----------------+----------------+----------------+
956 * | min period frames | 44
957 * +----------------+----------------+----------------+----------------+
958 * | max period frames | 48
959 * +----------------+----------------+----------------+----------------+
960 * | reserved | 52
961 * +----------------+----------------+----------------+----------------+
962 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
963 * +----------------+----------------+----------------+----------------+
964 * | reserved | 64
965 * +----------------+----------------+----------------+----------------+
966 *
967 * Meaning of the values in this response is the same as for
968 * XENSND_OP_HW_PARAM_QUERY request.
969 */
970
971/*
972 *----------------------------------- Events ----------------------------------
973 *
974 * Events are sent via shared page allocated by the front and propagated by
975 * evt-event-channel/evt-ring-ref XenStore entries
976 * All event packets have the same length (64 octets)
977 * All event packets have common header:
978 * 0 1 2 3 octet
979 * +----------------+----------------+----------------+----------------+
980 * | id | type | reserved | 4
981 * +----------------+----------------+----------------+----------------+
982 * | reserved | 8
983 * +----------------+----------------+----------------+----------------+
984 *
985 * id - uint16_t, event id, may be used by front
986 * type - uint8_t, type of the event
987 *
988 *
989 * Current stream position - event from back to front when stream's
990 * playback/capture position has advanced:
991 * 0 1 2 3 octet
992 * +----------------+----------------+----------------+----------------+
993 * | id | _EVT_CUR_POS | reserved | 4
994 * +----------------+----------------+----------------+----------------+
995 * | reserved | 8
996 * +----------------+----------------+----------------+----------------+
997 * | position low 32-bit | 12
998 * +----------------+----------------+----------------+----------------+
999 * | position high 32-bit | 16
1000 * +----------------+----------------+----------------+----------------+
1001 * | reserved | 20
1002 * +----------------+----------------+----------------+----------------+
1003 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
1004 * +----------------+----------------+----------------+----------------+
1005 * | reserved | 64
1006 * +----------------+----------------+----------------+----------------+
1007 *
1008 * position - current value of stream's playback/capture position, octets
1009 *
770 */ 1010 */
771 1011
1012struct xensnd_cur_pos_evt {
1013 uint64_t position;
1014};
1015
772struct xensnd_req { 1016struct xensnd_req {
773 uint16_t id; 1017 uint16_t id;
774 uint8_t operation; 1018 uint8_t operation;
@@ -776,7 +1020,9 @@ struct xensnd_req {
776 union { 1020 union {
777 struct xensnd_open_req open; 1021 struct xensnd_open_req open;
778 struct xensnd_rw_req rw; 1022 struct xensnd_rw_req rw;
779 uint8_t reserved[24]; 1023 struct xensnd_trigger_req trigger;
1024 struct xensnd_query_hw_param hw_param;
1025 uint8_t reserved[56];
780 } op; 1026 } op;
781}; 1027};
782 1028
@@ -785,9 +1031,53 @@ struct xensnd_resp {
785 uint8_t operation; 1031 uint8_t operation;
786 uint8_t reserved; 1032 uint8_t reserved;
787 int32_t status; 1033 int32_t status;
788 uint8_t reserved1[24]; 1034 union {
1035 struct xensnd_query_hw_param hw_param;
1036 uint8_t reserved1[56];
1037 } resp;
1038};
1039
1040struct xensnd_evt {
1041 uint16_t id;
1042 uint8_t type;
1043 uint8_t reserved[5];
1044 union {
1045 struct xensnd_cur_pos_evt cur_pos;
1046 uint8_t reserved[56];
1047 } op;
789}; 1048};
790 1049
791DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); 1050DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp);
792 1051
1052/*
1053 ******************************************************************************
1054 * Back to front events delivery
1055 ******************************************************************************
1056 * In order to deliver asynchronous events from back to front a shared page is
1057 * allocated by front and its granted reference propagated to back via
1058 * XenStore entries (evt-ring-ref/evt-event-channel).
1059 * This page has a common header used by both front and back to synchronize
1060 * access and control event's ring buffer, while back being a producer of the
1061 * events and front being a consumer. The rest of the page after the header
1062 * is used for event packets.
1063 *
1064 * Upon reception of an event(s) front may confirm its reception
1065 * for either each event, group of events or none.
1066 */
1067
1068struct xensnd_event_page {
1069 uint32_t in_cons;
1070 uint32_t in_prod;
1071 uint8_t reserved[56];
1072};
1073
1074#define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE
1075#define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page))
1076#define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS)
1077#define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt))
1078#define XENSND_IN_RING(page) \
1079 ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS))
1080#define XENSND_IN_RING_REF(page, idx) \
1081 (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN])
1082
793#endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ 1083#endif /* __XEN_PUBLIC_IO_SNDIF_H__ */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d315b393abdd..ba03ec39efb3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1572 return cnt; 1572 return cnt;
1573} 1573}
1574 1574
1575static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1576 u32 *prog_ids,
1577 u32 request_cnt)
1578{
1579 int i = 0;
1580
1581 for (; *prog; prog++) {
1582 if (*prog == &dummy_bpf_prog.prog)
1583 continue;
1584 prog_ids[i] = (*prog)->aux->id;
1585 if (++i == request_cnt) {
1586 prog++;
1587 break;
1588 }
1589 }
1590
1591 return !!(*prog);
1592}
1593
1575int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 1594int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1576 __u32 __user *prog_ids, u32 cnt) 1595 __u32 __user *prog_ids, u32 cnt)
1577{ 1596{
1578 struct bpf_prog **prog; 1597 struct bpf_prog **prog;
1579 unsigned long err = 0; 1598 unsigned long err = 0;
1580 u32 i = 0, *ids;
1581 bool nospc; 1599 bool nospc;
1600 u32 *ids;
1582 1601
1583 /* users of this function are doing: 1602 /* users of this function are doing:
1584 * cnt = bpf_prog_array_length(); 1603 * cnt = bpf_prog_array_length();
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1595 return -ENOMEM; 1614 return -ENOMEM;
1596 rcu_read_lock(); 1615 rcu_read_lock();
1597 prog = rcu_dereference(progs)->progs; 1616 prog = rcu_dereference(progs)->progs;
1598 for (; *prog; prog++) { 1617 nospc = bpf_prog_array_copy_core(prog, ids, cnt);
1599 if (*prog == &dummy_bpf_prog.prog)
1600 continue;
1601 ids[i] = (*prog)->aux->id;
1602 if (++i == cnt) {
1603 prog++;
1604 break;
1605 }
1606 }
1607 nospc = !!(*prog);
1608 rcu_read_unlock(); 1618 rcu_read_unlock();
1609 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); 1619 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1610 kfree(ids); 1620 kfree(ids);
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1683} 1693}
1684 1694
1685int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, 1695int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1686 __u32 __user *prog_ids, u32 request_cnt, 1696 u32 *prog_ids, u32 request_cnt,
1687 __u32 __user *prog_cnt) 1697 u32 *prog_cnt)
1688{ 1698{
1699 struct bpf_prog **prog;
1689 u32 cnt = 0; 1700 u32 cnt = 0;
1690 1701
1691 if (array) 1702 if (array)
1692 cnt = bpf_prog_array_length(array); 1703 cnt = bpf_prog_array_length(array);
1693 1704
1694 if (copy_to_user(prog_cnt, &cnt, sizeof(cnt))) 1705 *prog_cnt = cnt;
1695 return -EFAULT;
1696 1706
1697 /* return early if user requested only program count or nothing to copy */ 1707 /* return early if user requested only program count or nothing to copy */
1698 if (!request_cnt || !cnt) 1708 if (!request_cnt || !cnt)
1699 return 0; 1709 return 0;
1700 1710
1701 return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt); 1711 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1712 prog = rcu_dereference_check(array, 1)->progs;
1713 return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1714 : 0;
1702} 1715}
1703 1716
1704static void bpf_prog_free_deferred(struct work_struct *work) 1717static void bpf_prog_free_deferred(struct work_struct *work)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 8dd9210d7db7..a3b21385e947 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1442 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1442 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1443 return ERR_PTR(-EINVAL); 1443 return ERR_PTR(-EINVAL);
1444 1444
1445 if (attr->value_size > KMALLOC_MAX_SIZE)
1446 return ERR_PTR(-E2BIG);
1447
1448 err = bpf_tcp_ulp_register(); 1445 err = bpf_tcp_ulp_register();
1449 if (err && err != -EEXIST) 1446 if (err && err != -EEXIST)
1450 return ERR_PTR(err); 1447 return ERR_PTR(err);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 772a43fea825..c187aa3df3c8 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
119 goto exit; 119 goto exit;
120 } 120 }
121 121
122 if (count > 1) { 122 /*
123 /* If the allocation failed, give up */ 123 * If requesting per event more than the global cap,
124 if (!callchain_cpus_entries) 124 * return a different error to help userspace figure
125 err = -ENOMEM; 125 * this out.
126 /* 126 *
127 * If requesting per event more than the global cap, 127 * And also do it here so that we have &callchain_mutex held.
128 * return a different error to help userspace figure 128 */
129 * this out. 129 if (event_max_stack > sysctl_perf_event_max_stack) {
130 * 130 err = -EOVERFLOW;
131 * And also do it here so that we have &callchain_mutex held.
132 */
133 if (event_max_stack > sysctl_perf_event_max_stack)
134 err = -EOVERFLOW;
135 goto exit; 131 goto exit;
136 } 132 }
137 133
138 err = alloc_callchain_buffers(); 134 if (count == 1)
135 err = alloc_callchain_buffers();
139exit: 136exit:
140 if (err) 137 if (err)
141 atomic_dec(&nr_callchain_events); 138 atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2d5fe26551f8..67612ce359ad 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
7587 }, 7587 },
7588 }; 7588 };
7589 7589
7590 if (!sched_in && task->state == TASK_RUNNING)
7591 switch_event.event_id.header.misc |=
7592 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
7593
7590 perf_iterate_sb(perf_event_switch_output, 7594 perf_iterate_sb(perf_event_switch_output,
7591 &switch_event, 7595 &switch_event,
7592 NULL); 7596 NULL);
@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
10205 * __u16 sample size limit. 10209 * __u16 sample size limit.
10206 */ 10210 */
10207 if (attr->sample_stack_user >= USHRT_MAX) 10211 if (attr->sample_stack_user >= USHRT_MAX)
10208 ret = -EINVAL; 10212 return -EINVAL;
10209 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 10213 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
10210 ret = -EINVAL; 10214 return -EINVAL;
10211 } 10215 }
10212 10216
10213 if (!attr->sample_max_stack) 10217 if (!attr->sample_max_stack)
diff --git a/kernel/fork.c b/kernel/fork.c
index 242c8c93d285..a5d21c42acfc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
216 if (!s) 216 if (!s)
217 continue; 217 continue;
218 218
219#ifdef CONFIG_DEBUG_KMEMLEAK
220 /* Clear stale pointers from reused stack. */ 219 /* Clear stale pointers from reused stack. */
221 memset(s->addr, 0, THREAD_SIZE); 220 memset(s->addr, 0, THREAD_SIZE);
222#endif 221
223 tsk->stack_vm_area = s; 222 tsk->stack_vm_area = s;
224 return s->addr; 223 return s->addr;
225 } 224 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 102160ff5c66..ea619021d901 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2428 struct kprobe_blacklist_entry *ent = 2428 struct kprobe_blacklist_entry *ent =
2429 list_entry(v, struct kprobe_blacklist_entry, list); 2429 list_entry(v, struct kprobe_blacklist_entry, list);
2430 2430
2431 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, 2431 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2432 (void *)ent->end_addr, (void *)ent->start_addr); 2432 (void *)ent->end_addr, (void *)ent->start_addr);
2433 return 0; 2433 return 0;
2434} 2434}
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c
index fdac27588d60..83958c814439 100644
--- a/kernel/livepatch/shadow.c
+++ b/kernel/livepatch/shadow.c
@@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id)
113} 113}
114EXPORT_SYMBOL_GPL(klp_shadow_get); 114EXPORT_SYMBOL_GPL(klp_shadow_get);
115 115
116static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 116static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id,
117 size_t size, gfp_t gfp_flags, bool warn_on_exist) 117 size_t size, gfp_t gfp_flags,
118 klp_shadow_ctor_t ctor, void *ctor_data,
119 bool warn_on_exist)
118{ 120{
119 struct klp_shadow *new_shadow; 121 struct klp_shadow *new_shadow;
120 void *shadow_data; 122 void *shadow_data;
@@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
125 if (shadow_data) 127 if (shadow_data)
126 goto exists; 128 goto exists;
127 129
128 /* Allocate a new shadow variable for use inside the lock below */ 130 /*
131 * Allocate a new shadow variable. Fill it with zeroes by default.
132 * More complex setting can be done by @ctor function. But it is
133 * called only when the buffer is really used (under klp_shadow_lock).
134 */
129 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); 135 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
130 if (!new_shadow) 136 if (!new_shadow)
131 return NULL; 137 return NULL;
132 138
133 new_shadow->obj = obj;
134 new_shadow->id = id;
135
136 /* Initialize the shadow variable if data provided */
137 if (data)
138 memcpy(new_shadow->data, data, size);
139
140 /* Look for <obj, id> again under the lock */ 139 /* Look for <obj, id> again under the lock */
141 spin_lock_irqsave(&klp_shadow_lock, flags); 140 spin_lock_irqsave(&klp_shadow_lock, flags);
142 shadow_data = klp_shadow_get(obj, id); 141 shadow_data = klp_shadow_get(obj, id);
@@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
150 goto exists; 149 goto exists;
151 } 150 }
152 151
152 new_shadow->obj = obj;
153 new_shadow->id = id;
154
155 if (ctor) {
156 int err;
157
158 err = ctor(obj, new_shadow->data, ctor_data);
159 if (err) {
160 spin_unlock_irqrestore(&klp_shadow_lock, flags);
161 kfree(new_shadow);
162 pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n",
163 obj, id, err);
164 return NULL;
165 }
166 }
167
153 /* No <obj, id> found, so attach the newly allocated one */ 168 /* No <obj, id> found, so attach the newly allocated one */
154 hash_add_rcu(klp_shadow_hash, &new_shadow->node, 169 hash_add_rcu(klp_shadow_hash, &new_shadow->node,
155 (unsigned long)new_shadow->obj); 170 (unsigned long)new_shadow->obj);
@@ -170,26 +185,32 @@ exists:
170 * klp_shadow_alloc() - allocate and add a new shadow variable 185 * klp_shadow_alloc() - allocate and add a new shadow variable
171 * @obj: pointer to parent object 186 * @obj: pointer to parent object
172 * @id: data identifier 187 * @id: data identifier
173 * @data: pointer to data to attach to parent
174 * @size: size of attached data 188 * @size: size of attached data
175 * @gfp_flags: GFP mask for allocation 189 * @gfp_flags: GFP mask for allocation
190 * @ctor: custom constructor to initialize the shadow data (optional)
191 * @ctor_data: pointer to any data needed by @ctor (optional)
192 *
193 * Allocates @size bytes for new shadow variable data using @gfp_flags.
194 * The data are zeroed by default. They are further initialized by @ctor
195 * function if it is not NULL. The new shadow variable is then added
196 * to the global hashtable.
176 * 197 *
177 * Allocates @size bytes for new shadow variable data using @gfp_flags 198 * If an existing <obj, id> shadow variable can be found, this routine will
178 * and copies @size bytes from @data into the new shadow variable's own 199 * issue a WARN, exit early and return NULL.
179 * data space. If @data is NULL, @size bytes are still allocated, but
180 * no copy is performed. The new shadow variable is then added to the
181 * global hashtable.
182 * 200 *
183 * If an existing <obj, id> shadow variable can be found, this routine 201 * This function guarantees that the constructor function is called only when
184 * will issue a WARN, exit early and return NULL. 202 * the variable did not exist before. The cost is that @ctor is called
203 * in atomic context under a spin lock.
185 * 204 *
186 * Return: the shadow variable data element, NULL on duplicate or 205 * Return: the shadow variable data element, NULL on duplicate or
187 * failure. 206 * failure.
188 */ 207 */
189void *klp_shadow_alloc(void *obj, unsigned long id, void *data, 208void *klp_shadow_alloc(void *obj, unsigned long id,
190 size_t size, gfp_t gfp_flags) 209 size_t size, gfp_t gfp_flags,
210 klp_shadow_ctor_t ctor, void *ctor_data)
191{ 211{
192 return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); 212 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
213 ctor, ctor_data, true);
193} 214}
194EXPORT_SYMBOL_GPL(klp_shadow_alloc); 215EXPORT_SYMBOL_GPL(klp_shadow_alloc);
195 216
@@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc);
197 * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable 218 * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
198 * @obj: pointer to parent object 219 * @obj: pointer to parent object
199 * @id: data identifier 220 * @id: data identifier
200 * @data: pointer to data to attach to parent
201 * @size: size of attached data 221 * @size: size of attached data
202 * @gfp_flags: GFP mask for allocation 222 * @gfp_flags: GFP mask for allocation
223 * @ctor: custom constructor to initialize the shadow data (optional)
224 * @ctor_data: pointer to any data needed by @ctor (optional)
203 * 225 *
204 * Returns a pointer to existing shadow data if an <obj, id> shadow 226 * Returns a pointer to existing shadow data if an <obj, id> shadow
205 * variable is already present. Otherwise, it creates a new shadow 227 * variable is already present. Otherwise, it creates a new shadow
206 * variable like klp_shadow_alloc(). 228 * variable like klp_shadow_alloc().
207 * 229 *
208 * This function guarantees that only one shadow variable exists with 230 * This function guarantees that only one shadow variable exists with the given
209 * the given @id for the given @obj. It also guarantees that the shadow 231 * @id for the given @obj. It also guarantees that the constructor function
210 * variable will be initialized by the given @data only when it did not 232 * will be called only when the variable did not exist before. The cost is
211 * exist before. 233 * that @ctor is called in atomic context under a spin lock.
212 * 234 *
213 * Return: the shadow variable data element, NULL on failure. 235 * Return: the shadow variable data element, NULL on failure.
214 */ 236 */
215void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 237void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
216 size_t size, gfp_t gfp_flags) 238 size_t size, gfp_t gfp_flags,
239 klp_shadow_ctor_t ctor, void *ctor_data)
217{ 240{
218 return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); 241 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
242 ctor, ctor_data, false);
219} 243}
220EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); 244EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
221 245
246static void klp_shadow_free_struct(struct klp_shadow *shadow,
247 klp_shadow_dtor_t dtor)
248{
249 hash_del_rcu(&shadow->node);
250 if (dtor)
251 dtor(shadow->obj, shadow->data);
252 kfree_rcu(shadow, rcu_head);
253}
254
222/** 255/**
223 * klp_shadow_free() - detach and free a <obj, id> shadow variable 256 * klp_shadow_free() - detach and free a <obj, id> shadow variable
224 * @obj: pointer to parent object 257 * @obj: pointer to parent object
225 * @id: data identifier 258 * @id: data identifier
259 * @dtor: custom callback that can be used to unregister the variable
260 * and/or free data that the shadow variable points to (optional)
226 * 261 *
227 * This function releases the memory for this <obj, id> shadow variable 262 * This function releases the memory for this <obj, id> shadow variable
228 * instance, callers should stop referencing it accordingly. 263 * instance, callers should stop referencing it accordingly.
229 */ 264 */
230void klp_shadow_free(void *obj, unsigned long id) 265void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
231{ 266{
232 struct klp_shadow *shadow; 267 struct klp_shadow *shadow;
233 unsigned long flags; 268 unsigned long flags;
@@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id)
239 (unsigned long)obj) { 274 (unsigned long)obj) {
240 275
241 if (klp_shadow_match(shadow, obj, id)) { 276 if (klp_shadow_match(shadow, obj, id)) {
242 hash_del_rcu(&shadow->node); 277 klp_shadow_free_struct(shadow, dtor);
243 kfree_rcu(shadow, rcu_head);
244 break; 278 break;
245 } 279 }
246 } 280 }
@@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free);
252/** 286/**
253 * klp_shadow_free_all() - detach and free all <*, id> shadow variables 287 * klp_shadow_free_all() - detach and free all <*, id> shadow variables
254 * @id: data identifier 288 * @id: data identifier
289 * @dtor: custom callback that can be used to unregister the variable
290 * and/or free data that the shadow variable points to (optional)
255 * 291 *
256 * This function releases the memory for all <*, id> shadow variable 292 * This function releases the memory for all <*, id> shadow variable
257 * instances, callers should stop referencing them accordingly. 293 * instances, callers should stop referencing them accordingly.
258 */ 294 */
259void klp_shadow_free_all(unsigned long id) 295void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
260{ 296{
261 struct klp_shadow *shadow; 297 struct klp_shadow *shadow;
262 unsigned long flags; 298 unsigned long flags;
@@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id)
266 302
267 /* Delete all <*, id> from hash */ 303 /* Delete all <*, id> from hash */
268 hash_for_each(klp_shadow_hash, i, shadow, node) { 304 hash_for_each(klp_shadow_hash, i, shadow, node) {
269 if (klp_shadow_match(shadow, shadow->obj, id)) { 305 if (klp_shadow_match(shadow, shadow->obj, id))
270 hash_del_rcu(&shadow->node); 306 klp_shadow_free_struct(shadow, dtor);
271 kfree_rcu(shadow, rcu_head);
272 }
273 } 307 }
274 308
275 spin_unlock_irqrestore(&klp_shadow_lock, flags); 309 spin_unlock_irqrestore(&klp_shadow_lock, flags);
diff --git a/kernel/module.c b/kernel/module.c
index a6e43a5806a1..ce8066b88178 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
1472{ 1472{
1473 struct module_sect_attr *sattr = 1473 struct module_sect_attr *sattr =
1474 container_of(mattr, struct module_sect_attr, mattr); 1474 container_of(mattr, struct module_sect_attr, mattr);
1475 return sprintf(buf, "0x%pK\n", (void *)sattr->address); 1475 return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
1476 (void *)sattr->address : NULL);
1476} 1477}
1477 1478
1478static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1479static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index e8c0dab4fd65..07148b497451 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -704,24 +704,6 @@ static const struct bin_table bin_net_netfilter_table[] = {
704 {} 704 {}
705}; 705};
706 706
707static const struct bin_table bin_net_irda_table[] = {
708 { CTL_INT, NET_IRDA_DISCOVERY, "discovery" },
709 { CTL_STR, NET_IRDA_DEVNAME, "devname" },
710 { CTL_INT, NET_IRDA_DEBUG, "debug" },
711 { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" },
712 { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
713 { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
714 { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
715 { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
716 { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
717 { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
718 { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
719 { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
720 { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
721 { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
722 {}
723};
724
725static const struct bin_table bin_net_table[] = { 707static const struct bin_table bin_net_table[] = {
726 { CTL_DIR, NET_CORE, "core", bin_net_core_table }, 708 { CTL_DIR, NET_CORE, "core", bin_net_core_table },
727 /* NET_ETHER not used */ 709 /* NET_ETHER not used */
@@ -743,7 +725,7 @@ static const struct bin_table bin_net_table[] = {
743 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table }, 725 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
744 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table }, 726 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
745 /* NET_DCCP "dccp" no longer used */ 727 /* NET_DCCP "dccp" no longer used */
746 { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table }, 728 /* NET_IRDA "irda" no longer used */
747 { CTL_INT, 2089, "nf_conntrack_max" }, 729 { CTL_INT, 2089, "nf_conntrack_max" },
748 {} 730 {}
749}; 731};
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index eda1210ce50f..14e858753d76 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -91,6 +91,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
91 .get_time = &ktime_get_real, 91 .get_time = &ktime_get_real,
92 }, 92 },
93 { 93 {
94 .index = HRTIMER_BASE_BOOTTIME,
95 .clockid = CLOCK_BOOTTIME,
96 .get_time = &ktime_get_boottime,
97 },
98 {
94 .index = HRTIMER_BASE_TAI, 99 .index = HRTIMER_BASE_TAI,
95 .clockid = CLOCK_TAI, 100 .clockid = CLOCK_TAI,
96 .get_time = &ktime_get_clocktai, 101 .get_time = &ktime_get_clocktai,
@@ -106,6 +111,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
106 .get_time = &ktime_get_real, 111 .get_time = &ktime_get_real,
107 }, 112 },
108 { 113 {
114 .index = HRTIMER_BASE_BOOTTIME_SOFT,
115 .clockid = CLOCK_BOOTTIME,
116 .get_time = &ktime_get_boottime,
117 },
118 {
109 .index = HRTIMER_BASE_TAI_SOFT, 119 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI, 120 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai, 121 .get_time = &ktime_get_clocktai,
@@ -119,7 +129,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
119 129
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 130 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 131 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_MONOTONIC, 132 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI, 133 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124}; 134};
125 135
@@ -571,12 +581,14 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
571static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 581static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
572{ 582{
573 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 583 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
584 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
574 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 585 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
575 586
576 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, 587 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
577 offs_real, offs_tai); 588 offs_real, offs_boot, offs_tai);
578 589
579 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; 590 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
591 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
580 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; 592 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
581 593
582 return now; 594 return now;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2541bd89f20e..5a6251ac6f7a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1205 u64 *newval, u64 *oldval) 1205 u64 *newval, u64 *oldval)
1206{ 1206{
1207 u64 now; 1207 u64 now;
1208 int ret;
1208 1209
1209 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1210 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1211 ret = cpu_timer_sample_group(clock_idx, tsk, &now);
1210 1212
1211 if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) { 1213 if (oldval && ret != -EINVAL) {
1212 /* 1214 /*
1213 * We are setting itimer. The *oldval is absolute and we update 1215 * We are setting itimer. The *oldval is absolute and we update
1214 * it to be relative, *newval argument is relative and we update 1216 * it to be relative, *newval argument is relative and we update
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index e0dbae98db9d..69a937c3cd81 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -83,8 +83,6 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
83 case CLOCK_BOOTTIME: 83 case CLOCK_BOOTTIME:
84 get_monotonic_boottime64(tp); 84 get_monotonic_boottime64(tp);
85 break; 85 break;
86 case CLOCK_MONOTONIC_ACTIVE:
87 ktime_get_active_ts64(tp);
88 default: 86 default:
89 return -EINVAL; 87 return -EINVAL;
90 } 88 }
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index b6899b5060bd..10b7186d0638 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -252,16 +252,15 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
252 return 0; 252 return 0;
253} 253}
254 254
255static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) 255static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
256{ 256{
257 timekeeping_clocktai64(tp); 257 get_monotonic_boottime64(tp);
258 return 0; 258 return 0;
259} 259}
260 260
261static int posix_get_monotonic_active(clockid_t which_clock, 261static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
262 struct timespec64 *tp)
263{ 262{
264 ktime_get_active_ts64(tp); 263 timekeeping_clocktai64(tp);
265 return 0; 264 return 0;
266} 265}
267 266
@@ -1317,9 +1316,19 @@ static const struct k_clock clock_tai = {
1317 .timer_arm = common_hrtimer_arm, 1316 .timer_arm = common_hrtimer_arm,
1318}; 1317};
1319 1318
1320static const struct k_clock clock_monotonic_active = { 1319static const struct k_clock clock_boottime = {
1321 .clock_getres = posix_get_hrtimer_res, 1320 .clock_getres = posix_get_hrtimer_res,
1322 .clock_get = posix_get_monotonic_active, 1321 .clock_get = posix_get_boottime,
1322 .nsleep = common_nsleep,
1323 .timer_create = common_timer_create,
1324 .timer_set = common_timer_set,
1325 .timer_get = common_timer_get,
1326 .timer_del = common_timer_del,
1327 .timer_rearm = common_hrtimer_rearm,
1328 .timer_forward = common_hrtimer_forward,
1329 .timer_remaining = common_hrtimer_remaining,
1330 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1331 .timer_arm = common_hrtimer_arm,
1323}; 1332};
1324 1333
1325static const struct k_clock * const posix_clocks[] = { 1334static const struct k_clock * const posix_clocks[] = {
@@ -1330,11 +1339,10 @@ static const struct k_clock * const posix_clocks[] = {
1330 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, 1339 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1331 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, 1340 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1332 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, 1341 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1333 [CLOCK_BOOTTIME] = &clock_monotonic, 1342 [CLOCK_BOOTTIME] = &clock_boottime,
1334 [CLOCK_REALTIME_ALARM] = &alarm_clock, 1343 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1335 [CLOCK_BOOTTIME_ALARM] = &alarm_clock, 1344 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1336 [CLOCK_TAI] = &clock_tai, 1345 [CLOCK_TAI] = &clock_tai,
1337 [CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active,
1338}; 1346};
1339 1347
1340static const struct k_clock *clockid_to_kclock(const clockid_t id) 1348static const struct k_clock *clockid_to_kclock(const clockid_t id)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 099572ca4a8f..49edc1c4f3e6 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -419,19 +419,6 @@ void tick_suspend_local(void)
419 clockevents_shutdown(td->evtdev); 419 clockevents_shutdown(td->evtdev);
420} 420}
421 421
422static void tick_forward_next_period(void)
423{
424 ktime_t delta, now = ktime_get();
425 u64 n;
426
427 delta = ktime_sub(now, tick_next_period);
428 n = ktime_divns(delta, tick_period);
429 tick_next_period += n * tick_period;
430 if (tick_next_period < now)
431 tick_next_period += tick_period;
432 tick_sched_forward_next_period();
433}
434
435/** 422/**
436 * tick_resume_local - Resume the local tick device 423 * tick_resume_local - Resume the local tick device
437 * 424 *
@@ -444,8 +431,6 @@ void tick_resume_local(void)
444 struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 431 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
445 bool broadcast = tick_resume_check_broadcast(); 432 bool broadcast = tick_resume_check_broadcast();
446 433
447 tick_forward_next_period();
448
449 clockevents_tick_resume(td->evtdev); 434 clockevents_tick_resume(td->evtdev);
450 if (!broadcast) { 435 if (!broadcast) {
451 if (td->mode == TICKDEV_MODE_PERIODIC) 436 if (td->mode == TICKDEV_MODE_PERIODIC)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 21efab7485ca..e277284c2831 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,12 +141,6 @@ static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
141static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } 141static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
142#endif /* !(BROADCAST && ONESHOT) */ 142#endif /* !(BROADCAST && ONESHOT) */
143 143
144#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
145extern void tick_sched_forward_next_period(void);
146#else
147static inline void tick_sched_forward_next_period(void) { }
148#endif
149
150/* NO_HZ_FULL internal */ 144/* NO_HZ_FULL internal */
151#ifdef CONFIG_NO_HZ_FULL 145#ifdef CONFIG_NO_HZ_FULL
152extern void tick_nohz_init(void); 146extern void tick_nohz_init(void);
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index c1f518e7aa80..6fe615d57ebb 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
82 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || 82 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
83 !tick_device_is_functional(dev)) { 83 !tick_device_is_functional(dev)) {
84 84
85 printk(KERN_INFO "Clockevents: " 85 pr_info("Clockevents: could not switch to one-shot mode:");
86 "could not switch to one-shot mode:");
87 if (!dev) { 86 if (!dev) {
88 printk(" no tick device\n"); 87 pr_cont(" no tick device\n");
89 } else { 88 } else {
90 if (!tick_device_is_functional(dev)) 89 if (!tick_device_is_functional(dev))
91 printk(" %s is not functional.\n", dev->name); 90 pr_cont(" %s is not functional.\n", dev->name);
92 else 91 else
93 printk(" %s does not support one-shot mode.\n", 92 pr_cont(" %s does not support one-shot mode.\n",
94 dev->name); 93 dev->name);
95 } 94 }
96 return -EINVAL; 95 return -EINVAL;
97 } 96 }
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 646645e981f9..da9455a6b42b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -52,15 +52,6 @@ struct tick_sched *tick_get_tick_sched(int cpu)
52static ktime_t last_jiffies_update; 52static ktime_t last_jiffies_update;
53 53
54/* 54/*
55 * Called after resume. Make sure that jiffies are not fast forwarded due to
56 * clock monotonic being forwarded by the suspended time.
57 */
58void tick_sched_forward_next_period(void)
59{
60 last_jiffies_update = tick_next_period;
61}
62
63/*
64 * Must be called with interrupts disabled ! 55 * Must be called with interrupts disabled !
65 */ 56 */
66static void tick_do_update_jiffies64(ktime_t now) 57static void tick_do_update_jiffies64(ktime_t now)
@@ -804,12 +795,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
804 return; 795 return;
805 } 796 }
806 797
807 hrtimer_set_expires(&ts->sched_timer, tick); 798 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
808 799 hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
809 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 800 } else {
810 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); 801 hrtimer_set_expires(&ts->sched_timer, tick);
811 else
812 tick_program_event(tick, 1); 802 tick_program_event(tick, 1);
803 }
813} 804}
814 805
815static void tick_nohz_retain_tick(struct tick_sched *ts) 806static void tick_nohz_retain_tick(struct tick_sched *ts)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ca90219a1e73..49cbceef5deb 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
138 138
139static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 139static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
140{ 140{
141 /* Update both bases so mono and raw stay coupled. */ 141 tk->offs_boot = ktime_add(tk->offs_boot, delta);
142 tk->tkr_mono.base += delta;
143 tk->tkr_raw.base += delta;
144
145 /* Accumulate time spent in suspend */
146 tk->time_suspended += delta;
147} 142}
148 143
149/* 144/*
@@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void)
473} 468}
474EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); 469EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
475 470
471/**
472 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
473 *
474 * To keep it NMI safe since we're accessing from tracing, we're not using a
475 * separate timekeeper with updates to monotonic clock and boot offset
476 * protected with seqlocks. This has the following minor side effects:
477 *
478 * (1) Its possible that a timestamp be taken after the boot offset is updated
479 * but before the timekeeper is updated. If this happens, the new boot offset
480 * is added to the old timekeeping making the clock appear to update slightly
481 * earlier:
482 * CPU 0 CPU 1
483 * timekeeping_inject_sleeptime64()
484 * __timekeeping_inject_sleeptime(tk, delta);
485 * timestamp();
486 * timekeeping_update(tk, TK_CLEAR_NTP...);
487 *
488 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
489 * partially updated. Since the tk->offs_boot update is a rare event, this
490 * should be a rare occurrence which postprocessing should be able to handle.
491 */
492u64 notrace ktime_get_boot_fast_ns(void)
493{
494 struct timekeeper *tk = &tk_core.timekeeper;
495
496 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
497}
498EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
499
500
476/* 501/*
477 * See comment for __ktime_get_fast_ns() vs. timestamp ordering 502 * See comment for __ktime_get_fast_ns() vs. timestamp ordering
478 */ 503 */
@@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
764 789
765static ktime_t *offsets[TK_OFFS_MAX] = { 790static ktime_t *offsets[TK_OFFS_MAX] = {
766 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, 791 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
792 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
767 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, 793 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
768}; 794};
769 795
@@ -861,39 +887,6 @@ void ktime_get_ts64(struct timespec64 *ts)
861EXPORT_SYMBOL_GPL(ktime_get_ts64); 887EXPORT_SYMBOL_GPL(ktime_get_ts64);
862 888
863/** 889/**
864 * ktime_get_active_ts64 - Get the active non-suspended monotonic clock
865 * @ts: pointer to timespec variable
866 *
867 * The function calculates the monotonic clock from the realtime clock and
868 * the wall_to_monotonic offset, subtracts the accumulated suspend time and
869 * stores the result in normalized timespec64 format in the variable
870 * pointed to by @ts.
871 */
872void ktime_get_active_ts64(struct timespec64 *ts)
873{
874 struct timekeeper *tk = &tk_core.timekeeper;
875 struct timespec64 tomono, tsusp;
876 u64 nsec, nssusp;
877 unsigned int seq;
878
879 WARN_ON(timekeeping_suspended);
880
881 do {
882 seq = read_seqcount_begin(&tk_core.seq);
883 ts->tv_sec = tk->xtime_sec;
884 nsec = timekeeping_get_ns(&tk->tkr_mono);
885 tomono = tk->wall_to_monotonic;
886 nssusp = tk->time_suspended;
887 } while (read_seqcount_retry(&tk_core.seq, seq));
888
889 ts->tv_sec += tomono.tv_sec;
890 ts->tv_nsec = 0;
891 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
892 tsusp = ns_to_timespec64(nssusp);
893 *ts = timespec64_sub(*ts, tsusp);
894}
895
896/**
897 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC 890 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
898 * 891 *
899 * Returns the seconds portion of CLOCK_MONOTONIC with a single non 892 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
@@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1593 return; 1586 return;
1594 } 1587 }
1595 tk_xtime_add(tk, delta); 1588 tk_xtime_add(tk, delta);
1589 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1596 tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); 1590 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1597 tk_debug_account_sleep_time(delta); 1591 tk_debug_account_sleep_time(delta);
1598} 1592}
@@ -2125,7 +2119,7 @@ out:
2125void getboottime64(struct timespec64 *ts) 2119void getboottime64(struct timespec64 *ts)
2126{ 2120{
2127 struct timekeeper *tk = &tk_core.timekeeper; 2121 struct timekeeper *tk = &tk_core.timekeeper;
2128 ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended); 2122 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2129 2123
2130 *ts = ktime_to_timespec64(t); 2124 *ts = ktime_to_timespec64(t);
2131} 2125}
@@ -2139,13 +2133,6 @@ unsigned long get_seconds(void)
2139} 2133}
2140EXPORT_SYMBOL(get_seconds); 2134EXPORT_SYMBOL(get_seconds);
2141 2135
2142struct timespec __current_kernel_time(void)
2143{
2144 struct timekeeper *tk = &tk_core.timekeeper;
2145
2146 return timespec64_to_timespec(tk_xtime(tk));
2147}
2148
2149struct timespec64 current_kernel_time64(void) 2136struct timespec64 current_kernel_time64(void)
2150{ 2137{
2151 struct timekeeper *tk = &tk_core.timekeeper; 2138 struct timekeeper *tk = &tk_core.timekeeper;
@@ -2195,6 +2182,7 @@ void do_timer(unsigned long ticks)
2195 * ktime_get_update_offsets_now - hrtimer helper 2182 * ktime_get_update_offsets_now - hrtimer helper
2196 * @cwsseq: pointer to check and store the clock was set sequence number 2183 * @cwsseq: pointer to check and store the clock was set sequence number
2197 * @offs_real: pointer to storage for monotonic -> realtime offset 2184 * @offs_real: pointer to storage for monotonic -> realtime offset
2185 * @offs_boot: pointer to storage for monotonic -> boottime offset
2198 * @offs_tai: pointer to storage for monotonic -> clock tai offset 2186 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2199 * 2187 *
2200 * Returns current monotonic time and updates the offsets if the 2188 * Returns current monotonic time and updates the offsets if the
@@ -2204,7 +2192,7 @@ void do_timer(unsigned long ticks)
2204 * Called from hrtimer_interrupt() or retrigger_next_event() 2192 * Called from hrtimer_interrupt() or retrigger_next_event()
2205 */ 2193 */
2206ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, 2194ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2207 ktime_t *offs_tai) 2195 ktime_t *offs_boot, ktime_t *offs_tai)
2208{ 2196{
2209 struct timekeeper *tk = &tk_core.timekeeper; 2197 struct timekeeper *tk = &tk_core.timekeeper;
2210 unsigned int seq; 2198 unsigned int seq;
@@ -2221,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2221 if (*cwsseq != tk->clock_was_set_seq) { 2209 if (*cwsseq != tk->clock_was_set_seq) {
2222 *cwsseq = tk->clock_was_set_seq; 2210 *cwsseq = tk->clock_was_set_seq;
2223 *offs_real = tk->offs_real; 2211 *offs_real = tk->offs_real;
2212 *offs_boot = tk->offs_boot;
2224 *offs_tai = tk->offs_tai; 2213 *offs_tai = tk->offs_tai;
2225 } 2214 }
2226 2215
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 79b67f5e0343..7a9b4eb7a1d5 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -6,6 +6,7 @@
6 */ 6 */
7extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, 7extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
8 ktime_t *offs_real, 8 ktime_t *offs_real,
9 ktime_t *offs_boot,
9 ktime_t *offs_tai); 10 ktime_t *offs_tai);
10 11
11extern int timekeeping_valid_for_hres(void); 12extern int timekeeping_valid_for_hres(void);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d88e96d4e12c..56ba0f2a01db 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
977{ 977{
978 struct perf_event_query_bpf __user *uquery = info; 978 struct perf_event_query_bpf __user *uquery = info;
979 struct perf_event_query_bpf query = {}; 979 struct perf_event_query_bpf query = {};
980 u32 *ids, prog_cnt, ids_len;
980 int ret; 981 int ret;
981 982
982 if (!capable(CAP_SYS_ADMIN)) 983 if (!capable(CAP_SYS_ADMIN))
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
985 return -EINVAL; 986 return -EINVAL;
986 if (copy_from_user(&query, uquery, sizeof(query))) 987 if (copy_from_user(&query, uquery, sizeof(query)))
987 return -EFAULT; 988 return -EFAULT;
988 if (query.ids_len > BPF_TRACE_MAX_PROGS) 989
990 ids_len = query.ids_len;
991 if (ids_len > BPF_TRACE_MAX_PROGS)
989 return -E2BIG; 992 return -E2BIG;
993 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
994 if (!ids)
995 return -ENOMEM;
996 /*
997 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
998 * is required when user only wants to check for uquery->prog_cnt.
999 * There is no need to check for it since the case is handled
1000 * gracefully in bpf_prog_array_copy_info.
1001 */
990 1002
991 mutex_lock(&bpf_event_mutex); 1003 mutex_lock(&bpf_event_mutex);
992 ret = bpf_prog_array_copy_info(event->tp_event->prog_array, 1004 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
993 uquery->ids, 1005 ids,
994 query.ids_len, 1006 ids_len,
995 &uquery->prog_cnt); 1007 &prog_cnt);
996 mutex_unlock(&bpf_event_mutex); 1008 mutex_unlock(&bpf_event_mutex);
997 1009
1010 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1011 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1012 ret = -EFAULT;
1013
1014 kfree(ids);
998 return ret; 1015 return ret;
999} 1016}
1000 1017
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dfbcf9ee1447..414d7210b2ec 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1165,7 +1165,7 @@ static struct {
1165 { trace_clock, "perf", 1 }, 1165 { trace_clock, "perf", 1 },
1166 { ktime_get_mono_fast_ns, "mono", 1 }, 1166 { ktime_get_mono_fast_ns, "mono", 1 },
1167 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1167 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1168 { ktime_get_mono_fast_ns, "boot", 1 }, 1168 { ktime_get_boot_fast_ns, "boot", 1 },
1169 ARCH_TRACE_CLOCKS 1169 ARCH_TRACE_CLOCKS
1170}; 1170};
1171 1171
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e954ae3d82c0..e3a658bac10f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
356 __field( unsigned int, seqnum ) 356 __field( unsigned int, seqnum )
357 ), 357 ),
358 358
359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", 359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
360 __entry->seqnum, 360 __entry->seqnum,
361 __entry->tv_sec, 361 __entry->tv_sec,
362 __entry->tv_nsec, 362 __entry->tv_nsec,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 9b4716bb8bb0..1f951b3df60c 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1499,14 +1499,14 @@ static int process_preds(struct trace_event_call *call,
1499 return ret; 1499 return ret;
1500 } 1500 }
1501 1501
1502 if (!nr_preds) { 1502 if (!nr_preds)
1503 prog = NULL; 1503 return -EINVAL;
1504 } else { 1504
1505 prog = predicate_parse(filter_string, nr_parens, nr_preds, 1505 prog = predicate_parse(filter_string, nr_parens, nr_preds,
1506 parse_pred, call, pe); 1506 parse_pred, call, pe);
1507 if (IS_ERR(prog)) 1507 if (IS_ERR(prog))
1508 return PTR_ERR(prog); 1508 return PTR_ERR(prog);
1509 } 1509
1510 rcu_assign_pointer(filter->prog, prog); 1510 rcu_assign_pointer(filter->prog, prog);
1511 return 0; 1511 return 0;
1512} 1512}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1cd3fb4d70f8..02aed76e0978 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
512 if (ret == 0) 512 if (ret == 0)
513 tk->tp.flags |= TP_FLAG_REGISTERED; 513 tk->tp.flags |= TP_FLAG_REGISTERED;
514 else { 514 else {
515 pr_warn("Could not insert probe at %s+%lu: %d\n",
516 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
517 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 515 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
518 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 516 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
519 ret = 0; 517 ret = 0;
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c0bba30fef0a..bbfb229aa067 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -84,7 +84,8 @@ again:
84 __free_pages(page, page_order); 84 __free_pages(page, page_order);
85 page = NULL; 85 page = NULL;
86 86
87 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 87 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
88 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
88 !(gfp & GFP_DMA)) { 89 !(gfp & GFP_DMA)) {
89 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 90 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
90 goto again; 91 goto again;
diff --git a/lib/errseq.c b/lib/errseq.c
index df782418b333..81f9e33aa7e7 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
111 * errseq_sample() - Grab current errseq_t value. 111 * errseq_sample() - Grab current errseq_t value.
112 * @eseq: Pointer to errseq_t to be sampled. 112 * @eseq: Pointer to errseq_t to be sampled.
113 * 113 *
114 * This function allows callers to sample an errseq_t value, marking it as 114 * This function allows callers to initialise their errseq_t variable.
115 * "seen" if required. 115 * If the error has been "seen", new callers will not see an old error.
116 * If there is an unseen error in @eseq, the caller of this function will
117 * see it the next time it checks for an error.
116 * 118 *
119 * Context: Any context.
117 * Return: The current errseq value. 120 * Return: The current errseq value.
118 */ 121 */
119errseq_t errseq_sample(errseq_t *eseq) 122errseq_t errseq_sample(errseq_t *eseq)
120{ 123{
121 errseq_t old = READ_ONCE(*eseq); 124 errseq_t old = READ_ONCE(*eseq);
122 errseq_t new = old;
123 125
124 /* 126 /* If nobody has seen this error yet, then we can be the first. */
125 * For the common case of no errors ever having been set, we can skip 127 if (!(old & ERRSEQ_SEEN))
126 * marking the SEEN bit. Once an error has been set, the value will 128 old = 0;
127 * never go back to zero. 129 return old;
128 */
129 if (old != 0) {
130 new |= ERRSEQ_SEEN;
131 if (old != new)
132 cmpxchg(eseq, old, new);
133 }
134 return new;
135} 130}
136EXPORT_SYMBOL(errseq_sample); 131EXPORT_SYMBOL(errseq_sample);
137 132
diff --git a/lib/kobject.c b/lib/kobject.c
index e1d1f290bf35..18989b5b3b56 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -233,13 +233,12 @@ static int kobject_add_internal(struct kobject *kobj)
233 233
234 /* be noisy on error issues */ 234 /* be noisy on error issues */
235 if (error == -EEXIST) 235 if (error == -EEXIST)
236 WARN(1, 236 pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
237 "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", 237 __func__, kobject_name(kobj));
238 __func__, kobject_name(kobj));
239 else 238 else
240 WARN(1, "%s failed for %s (error: %d parent: %s)\n", 239 pr_err("%s failed for %s (error: %d parent: %s)\n",
241 __func__, kobject_name(kobj), error, 240 __func__, kobject_name(kobj), error,
242 parent ? kobject_name(parent) : "'none'"); 241 parent ? kobject_name(parent) : "'none'");
243 } else 242 } else
244 kobj->state_in_sysfs = 1; 243 kobj->state_in_sysfs = 1;
245 244
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0b79908dfe89..5939549c0e7b 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -10,7 +10,10 @@
10 * Pablo Neira Ayuso <pablo@netfilter.org> 10 * Pablo Neira Ayuso <pablo@netfilter.org>
11 * 11 *
12 * ========================================================================== 12 * ==========================================================================
13 * 13 */
14
15/**
16 * DOC: ts_intro
14 * INTRODUCTION 17 * INTRODUCTION
15 * 18 *
16 * The textsearch infrastructure provides text searching facilities for 19 * The textsearch infrastructure provides text searching facilities for
@@ -19,7 +22,9 @@
19 * 22 *
20 * ARCHITECTURE 23 * ARCHITECTURE
21 * 24 *
22 * User 25 * .. code-block:: none
26 *
27 * User
23 * +----------------+ 28 * +----------------+
24 * | finish()|<--------------(6)-----------------+ 29 * | finish()|<--------------(6)-----------------+
25 * |get_next_block()|<--------------(5)---------------+ | 30 * |get_next_block()|<--------------(5)---------------+ |
@@ -33,21 +38,21 @@
33 * | (3)|----->| find()/next() |-----------+ | 38 * | (3)|----->| find()/next() |-----------+ |
34 * | (7)|----->| destroy() |----------------------+ 39 * | (7)|----->| destroy() |----------------------+
35 * +----------------+ +---------------+ 40 * +----------------+ +---------------+
36 * 41 *
37 * (1) User configures a search by calling _prepare() specifying the 42 * (1) User configures a search by calling textsearch_prepare() specifying
38 * search parameters such as the pattern and algorithm name. 43 * the search parameters such as the pattern and algorithm name.
39 * (2) Core requests the algorithm to allocate and initialize a search 44 * (2) Core requests the algorithm to allocate and initialize a search
40 * configuration according to the specified parameters. 45 * configuration according to the specified parameters.
41 * (3) User starts the search(es) by calling _find() or _next() to 46 * (3) User starts the search(es) by calling textsearch_find() or
42 * fetch subsequent occurrences. A state variable is provided 47 * textsearch_next() to fetch subsequent occurrences. A state variable
43 * to the algorithm to store persistent variables. 48 * is provided to the algorithm to store persistent variables.
44 * (4) Core eventually resets the search offset and forwards the find() 49 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm. 50 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continuously 51 * (5) Algorithm calls get_next_block() provided by the user continuously
47 * to fetch the data to be searched in block by block. 52 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block 53 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional) 54 * to clean up any leftovers from get_next_block. (Optional)
50 * (7) User destroys the configuration by calling _destroy(). 55 * (7) User destroys the configuration by calling textsearch_destroy().
51 * (8) Core notifies the algorithm to destroy algorithm specific 56 * (8) Core notifies the algorithm to destroy algorithm specific
52 * allocations. (Optional) 57 * allocations. (Optional)
53 * 58 *
@@ -62,9 +67,10 @@
62 * amount of times and even in parallel as long as a separate struct 67 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance. 68 * ts_state variable is provided to every instance.
64 * 69 *
65 * The actual search is performed by either calling textsearch_find_- 70 * The actual search is performed by either calling
66 * continuous() for linear data or by providing an own get_next_block() 71 * textsearch_find_continuous() for linear data or by providing
67 * implementation and calling textsearch_find(). Both functions return 72 * an own get_next_block() implementation and
73 * calling textsearch_find(). Both functions return
68 * the position of the first occurrence of the pattern or UINT_MAX if 74 * the position of the first occurrence of the pattern or UINT_MAX if
69 * no match was found. Subsequent occurrences can be found by calling 75 * no match was found. Subsequent occurrences can be found by calling
70 * textsearch_next() regardless of the linearity of the data. 76 * textsearch_next() regardless of the linearity of the data.
@@ -72,7 +78,7 @@
72 * Once you're done using a configuration it must be given back via 78 * Once you're done using a configuration it must be given back via
73 * textsearch_destroy. 79 * textsearch_destroy.
74 * 80 *
75 * EXAMPLE 81 * EXAMPLE::
76 * 82 *
77 * int pos; 83 * int pos;
78 * struct ts_config *conf; 84 * struct ts_config *conf;
@@ -87,13 +93,13 @@
87 * goto errout; 93 * goto errout;
88 * } 94 * }
89 * 95 *
90 * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); 96 * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
91 * if (pos != UINT_MAX) 97 * if (pos != UINT_MAX)
92 * panic("Oh my god, dancing chickens at %d\n", pos); 98 * panic("Oh my god, dancing chickens at \%d\n", pos);
93 * 99 *
94 * textsearch_destroy(conf); 100 * textsearch_destroy(conf);
95 * ==========================================================================
96 */ 101 */
102/* ========================================================================== */
97 103
98#include <linux/module.h> 104#include <linux/module.h>
99#include <linux/types.h> 105#include <linux/types.h>
@@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
225 * 231 *
226 * Returns the position of first occurrence of the pattern or 232 * Returns the position of first occurrence of the pattern or
227 * %UINT_MAX if no occurrence was found. 233 * %UINT_MAX if no occurrence was found.
228 */ 234 */
229unsigned int textsearch_find_continuous(struct ts_config *conf, 235unsigned int textsearch_find_continuous(struct ts_config *conf,
230 struct ts_state *state, 236 struct ts_state *state,
231 const void *data, unsigned int len) 237 const void *data, unsigned int len)
diff --git a/mm/filemap.c b/mm/filemap.c
index 9276bdb2343c..0604cb02e6f3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
786 VM_BUG_ON_PAGE(!PageLocked(new), new); 786 VM_BUG_ON_PAGE(!PageLocked(new), new);
787 VM_BUG_ON_PAGE(new->mapping, new); 787 VM_BUG_ON_PAGE(new->mapping, new);
788 788
789 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 789 error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
790 if (!error) { 790 if (!error) {
791 struct address_space *mapping = old->mapping; 791 struct address_space *mapping = old->mapping;
792 void (*freepage)(struct page *); 792 void (*freepage)(struct page *);
@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,
842 return error; 842 return error;
843 } 843 }
844 844
845 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 845 error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
846 if (error) { 846 if (error) {
847 if (!huge) 847 if (!huge)
848 mem_cgroup_cancel_charge(page, memcg, false); 848 mem_cgroup_cancel_charge(page, memcg, false);
@@ -1585,8 +1585,7 @@ no_page:
1585 if (fgp_flags & FGP_ACCESSED) 1585 if (fgp_flags & FGP_ACCESSED)
1586 __SetPageReferenced(page); 1586 __SetPageReferenced(page);
1587 1587
1588 err = add_to_page_cache_lru(page, mapping, offset, 1588 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
1589 gfp_mask & GFP_RECLAIM_MASK);
1590 if (unlikely(err)) { 1589 if (unlikely(err)) {
1591 put_page(page); 1590 put_page(page);
1592 page = NULL; 1591 page = NULL;
@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2387 if (!page) 2386 if (!page)
2388 return -ENOMEM; 2387 return -ENOMEM;
2389 2388
2390 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 2389 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
2391 if (ret == 0) 2390 if (ret == 0)
2392 ret = mapping->a_ops->readpage(file, page); 2391 ret = mapping->a_ops->readpage(file, page);
2393 else if (ret == -EEXIST) 2392 else if (ret == -EEXIST)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14ed6ee5e02f..a3a1815f8e11 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2925 pmde = maybe_pmd_mkwrite(pmde, vma); 2925 pmde = maybe_pmd_mkwrite(pmde, vma);
2926 2926
2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
2928 page_add_anon_rmap(new, vma, mmun_start, true); 2928 if (PageAnon(new))
2929 page_add_anon_rmap(new, vma, mmun_start, true);
2930 else
2931 page_add_file_rmap(new, true);
2929 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2932 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2930 if (vma->vm_flags & VM_LOCKED) 2933 if (vma->vm_flags & VM_LOCKED)
2931 mlock_vma_page(new); 2934 mlock_vma_page(new);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e074f7c637aa..2bd3df3d101a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2192{ 2192{
2193 struct memcg_kmem_cache_create_work *cw; 2193 struct memcg_kmem_cache_create_work *cw;
2194 2194
2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196 if (!cw) 2196 if (!cw)
2197 return; 2197 return;
2198 2198
diff --git a/mm/migrate.c b/mm/migrate.c
index f65dd69e1fd1..568433023831 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
472 pslot = radix_tree_lookup_slot(&mapping->i_pages, 472 pslot = radix_tree_lookup_slot(&mapping->i_pages,
473 page_index(page)); 473 page_index(page));
474 474
475 expected_count += 1 + page_has_private(page); 475 expected_count += hpage_nr_pages(page) + page_has_private(page);
476 if (page_count(page) != expected_count || 476 if (page_count(page) != expected_count ||
477 radix_tree_deref_slot_protected(pslot, 477 radix_tree_deref_slot_protected(pslot,
478 &mapping->i_pages.xa_lock) != page) { 478 &mapping->i_pages.xa_lock) != page) {
@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
505 */ 505 */
506 newpage->index = page->index; 506 newpage->index = page->index;
507 newpage->mapping = page->mapping; 507 newpage->mapping = page->mapping;
508 get_page(newpage); /* add cache reference */ 508 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
509 if (PageSwapBacked(page)) { 509 if (PageSwapBacked(page)) {
510 __SetPageSwapBacked(newpage); 510 __SetPageSwapBacked(newpage);
511 if (PageSwapCache(page)) { 511 if (PageSwapCache(page)) {
@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping,
524 } 524 }
525 525
526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
527 if (PageTransHuge(page)) {
528 int i;
529 int index = page_index(page);
530
531 for (i = 0; i < HPAGE_PMD_NR; i++) {
532 pslot = radix_tree_lookup_slot(&mapping->i_pages,
533 index + i);
534 radix_tree_replace_slot(&mapping->i_pages, pslot,
535 newpage + i);
536 }
537 } else {
538 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
539 }
527 540
528 /* 541 /*
529 * Drop cache reference from old page by unfreezing 542 * Drop cache reference from old page by unfreezing
530 * to one less reference. 543 * to one less reference.
531 * We know this isn't the last reference. 544 * We know this isn't the last reference.
532 */ 545 */
533 page_ref_unfreeze(page, expected_count - 1); 546 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
534 547
535 xa_unlock(&mapping->i_pages); 548 xa_unlock(&mapping->i_pages);
536 /* Leave irq disabled to prevent preemption while updating stats */ 549 /* Leave irq disabled to prevent preemption while updating stats */
@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1622 current_node = NUMA_NO_NODE; 1635 current_node = NUMA_NO_NODE;
1623 } 1636 }
1624out_flush: 1637out_flush:
1638 if (list_empty(&pagelist))
1639 return err;
1640
1625 /* Make sure we do not overwrite the existing error */ 1641 /* Make sure we do not overwrite the existing error */
1626 err1 = do_move_pages_to_node(mm, &pagelist, current_node); 1642 err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1627 if (!err1) 1643 if (!err1)
diff --git a/mm/mmap.c b/mm/mmap.c
index 188f195883b9..9d5968d1e8e3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = {
100 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 100 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
101}; 101};
102 102
103#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
104static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
105{
106 return prot;
107}
108#endif
109
103pgprot_t vm_get_page_prot(unsigned long vm_flags) 110pgprot_t vm_get_page_prot(unsigned long vm_flags)
104{ 111{
105 return __pgprot(pgprot_val(protection_map[vm_flags & 112 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
106 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 113 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
107 pgprot_val(arch_vm_get_page_prot(vm_flags))); 114 pgprot_val(arch_vm_get_page_prot(vm_flags)));
115
116 return arch_filter_pgprot(ret);
108} 117}
109EXPORT_SYMBOL(vm_get_page_prot); 118EXPORT_SYMBOL(vm_get_page_prot);
110 119
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5c1a3279e63f..337c6afb3345 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page)
2502 if (mapping && mapping_cap_account_dirty(mapping)) { 2502 if (mapping && mapping_cap_account_dirty(mapping)) {
2503 struct inode *inode = mapping->host; 2503 struct inode *inode = mapping->host;
2504 struct bdi_writeback *wb; 2504 struct bdi_writeback *wb;
2505 bool locked; 2505 struct wb_lock_cookie cookie = {};
2506 2506
2507 wb = unlocked_inode_to_wb_begin(inode, &locked); 2507 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2508 current->nr_dirtied--; 2508 current->nr_dirtied--;
2509 dec_node_page_state(page, NR_DIRTIED); 2509 dec_node_page_state(page, NR_DIRTIED);
2510 dec_wb_stat(wb, WB_DIRTIED); 2510 dec_wb_stat(wb, WB_DIRTIED);
2511 unlocked_inode_to_wb_end(inode, locked); 2511 unlocked_inode_to_wb_end(inode, &cookie);
2512 } 2512 }
2513} 2513}
2514EXPORT_SYMBOL(account_page_redirty); 2514EXPORT_SYMBOL(account_page_redirty);
@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page)
2614 if (mapping_cap_account_dirty(mapping)) { 2614 if (mapping_cap_account_dirty(mapping)) {
2615 struct inode *inode = mapping->host; 2615 struct inode *inode = mapping->host;
2616 struct bdi_writeback *wb; 2616 struct bdi_writeback *wb;
2617 bool locked; 2617 struct wb_lock_cookie cookie = {};
2618 2618
2619 lock_page_memcg(page); 2619 lock_page_memcg(page);
2620 wb = unlocked_inode_to_wb_begin(inode, &locked); 2620 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2621 2621
2622 if (TestClearPageDirty(page)) 2622 if (TestClearPageDirty(page))
2623 account_page_cleaned(page, mapping, wb); 2623 account_page_cleaned(page, mapping, wb);
2624 2624
2625 unlocked_inode_to_wb_end(inode, locked); 2625 unlocked_inode_to_wb_end(inode, &cookie);
2626 unlock_page_memcg(page); 2626 unlock_page_memcg(page);
2627 } else { 2627 } else {
2628 ClearPageDirty(page); 2628 ClearPageDirty(page);
@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page)
2654 if (mapping && mapping_cap_account_dirty(mapping)) { 2654 if (mapping && mapping_cap_account_dirty(mapping)) {
2655 struct inode *inode = mapping->host; 2655 struct inode *inode = mapping->host;
2656 struct bdi_writeback *wb; 2656 struct bdi_writeback *wb;
2657 bool locked; 2657 struct wb_lock_cookie cookie = {};
2658 2658
2659 /* 2659 /*
2660 * Yes, Virginia, this is indeed insane. 2660 * Yes, Virginia, this is indeed insane.
@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page)
2691 * always locked coming in here, so we get the desired 2691 * always locked coming in here, so we get the desired
2692 * exclusion. 2692 * exclusion.
2693 */ 2693 */
2694 wb = unlocked_inode_to_wb_begin(inode, &locked); 2694 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2695 if (TestClearPageDirty(page)) { 2695 if (TestClearPageDirty(page)) {
2696 dec_lruvec_page_state(page, NR_FILE_DIRTY); 2696 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2697 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2697 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2698 dec_wb_stat(wb, WB_RECLAIMABLE); 2698 dec_wb_stat(wb, WB_RECLAIMABLE);
2699 ret = 1; 2699 ret = 1;
2700 } 2700 }
2701 unlocked_inode_to_wb_end(inode, locked); 2701 unlocked_inode_to_wb_end(inode, &cookie);
2702 return ret; 2702 return ret;
2703 } 2703 }
2704 return TestClearPageDirty(page); 2704 return TestClearPageDirty(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index f0dd4e4565bc..8d5337fed37b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1376 1376
1377 if (!PageAnon(page))
1378 continue;
1379
1380 set_pmd_migration_entry(&pvmw, page); 1377 set_pmd_migration_entry(&pvmw, page);
1381 continue; 1378 continue;
1382 } 1379 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b920ce3ae02..9b697323a88c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -303,7 +303,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
303/* 303/*
304 * Add a shrinker callback to be called from the vm. 304 * Add a shrinker callback to be called from the vm.
305 */ 305 */
306int register_shrinker(struct shrinker *shrinker) 306int prealloc_shrinker(struct shrinker *shrinker)
307{ 307{
308 size_t size = sizeof(*shrinker->nr_deferred); 308 size_t size = sizeof(*shrinker->nr_deferred);
309 309
@@ -313,10 +313,29 @@ int register_shrinker(struct shrinker *shrinker)
313 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 313 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
314 if (!shrinker->nr_deferred) 314 if (!shrinker->nr_deferred)
315 return -ENOMEM; 315 return -ENOMEM;
316 return 0;
317}
318
319void free_prealloced_shrinker(struct shrinker *shrinker)
320{
321 kfree(shrinker->nr_deferred);
322 shrinker->nr_deferred = NULL;
323}
316 324
325void register_shrinker_prepared(struct shrinker *shrinker)
326{
317 down_write(&shrinker_rwsem); 327 down_write(&shrinker_rwsem);
318 list_add_tail(&shrinker->list, &shrinker_list); 328 list_add_tail(&shrinker->list, &shrinker_list);
319 up_write(&shrinker_rwsem); 329 up_write(&shrinker_rwsem);
330}
331
332int register_shrinker(struct shrinker *shrinker)
333{
334 int err = prealloc_shrinker(shrinker);
335
336 if (err)
337 return err;
338 register_shrinker_prepared(shrinker);
320 return 0; 339 return 0;
321} 340}
322EXPORT_SYMBOL(register_shrinker); 341EXPORT_SYMBOL(register_shrinker);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 032e0fe45940..28a4c3490359 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
1825{ 1825{
1826 unsigned int size = info->entries_size; 1826 unsigned int size = info->entries_size;
1827 const void *entries = info->entries; 1827 const void *entries = info->entries;
1828 int ret;
1829 1828
1830 newinfo->entries_size = size; 1829 newinfo->entries_size = size;
1831 1830 if (info->nentries) {
1832 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); 1831 int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
1833 if (ret) 1832 info->nentries);
1834 return ret; 1833 if (ret)
1834 return ret;
1835 }
1835 1836
1836 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1837 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1837 entries, newinfo); 1838 entries, newinfo);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 53ecda10b790..13e2ae6be620 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -174,7 +174,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
174 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : 174 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
175 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : 175 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
176 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? 176 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
177 "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); 177 "REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND");
178 178
179 179
180 180
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index fcb40c12b1f8..3b3d33ea9ed8 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2569,6 +2569,11 @@ static int try_write(struct ceph_connection *con)
2569 int ret = 1; 2569 int ret = 1;
2570 2570
2571 dout("try_write start %p state %lu\n", con, con->state); 2571 dout("try_write start %p state %lu\n", con, con->state);
2572 if (con->state != CON_STATE_PREOPEN &&
2573 con->state != CON_STATE_CONNECTING &&
2574 con->state != CON_STATE_NEGOTIATING &&
2575 con->state != CON_STATE_OPEN)
2576 return 0;
2572 2577
2573more: 2578more:
2574 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2579 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2594,6 +2599,8 @@ more:
2594 } 2599 }
2595 2600
2596more_kvec: 2601more_kvec:
2602 BUG_ON(!con->sock);
2603
2597 /* kvec data queued? */ 2604 /* kvec data queued? */
2598 if (con->out_kvec_left) { 2605 if (con->out_kvec_left) {
2599 ret = write_partial_kvec(con); 2606 ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index b3dac24412d3..21ac6e3b96bb 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
209 __open_session(monc); 209 __open_session(monc);
210} 210}
211 211
212static void un_backoff(struct ceph_mon_client *monc)
213{
214 monc->hunt_mult /= 2; /* reduce by 50% */
215 if (monc->hunt_mult < 1)
216 monc->hunt_mult = 1;
217 dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
218}
219
212/* 220/*
213 * Reschedule delayed work timer. 221 * Reschedule delayed work timer.
214 */ 222 */
@@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
963 if (!monc->hunting) { 971 if (!monc->hunting) {
964 ceph_con_keepalive(&monc->con); 972 ceph_con_keepalive(&monc->con);
965 __validate_auth(monc); 973 __validate_auth(monc);
974 un_backoff(monc);
966 } 975 }
967 976
968 if (is_auth && 977 if (is_auth &&
@@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
1123 dout("%s found mon%d\n", __func__, monc->cur_mon); 1132 dout("%s found mon%d\n", __func__, monc->cur_mon);
1124 monc->hunting = false; 1133 monc->hunting = false;
1125 monc->had_a_connection = true; 1134 monc->had_a_connection = true;
1126 monc->hunt_mult /= 2; /* reduce by 50% */ 1135 un_backoff(monc);
1127 if (monc->hunt_mult < 1) 1136 __schedule_delayed(monc);
1128 monc->hunt_mult = 1;
1129 } 1137 }
1130} 1138}
1131 1139
diff --git a/net/core/dev.c b/net/core/dev.c
index 969462ebb296..af0558b00c6c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2969,7 +2969,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
2969} 2969}
2970EXPORT_SYMBOL(passthru_features_check); 2970EXPORT_SYMBOL(passthru_features_check);
2971 2971
2972static netdev_features_t dflt_features_check(const struct sk_buff *skb, 2972static netdev_features_t dflt_features_check(struct sk_buff *skb,
2973 struct net_device *dev, 2973 struct net_device *dev,
2974 netdev_features_t features) 2974 netdev_features_t features)
2975{ 2975{
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e3e6a3e2ca22..d884d8f5f0e5 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -839,7 +839,7 @@ void dev_mc_flush(struct net_device *dev)
839EXPORT_SYMBOL(dev_mc_flush); 839EXPORT_SYMBOL(dev_mc_flush);
840 840
841/** 841/**
842 * dev_mc_flush - Init multicast address list 842 * dev_mc_init - Init multicast address list
843 * @dev: device 843 * @dev: device
844 * 844 *
845 * Init multicast address list. 845 * Init multicast address list.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7b7a14abba28..ce519861be59 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t);
55static void __neigh_notify(struct neighbour *n, int type, int flags, 55static void __neigh_notify(struct neighbour *n, int type, int flags,
56 u32 pid); 56 u32 pid);
57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 57static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 58static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 struct net_device *dev);
59 60
60#ifdef CONFIG_PROC_FS 61#ifdef CONFIG_PROC_FS
61static const struct file_operations neigh_stat_seq_fops; 62static const struct file_operations neigh_stat_seq_fops;
@@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
291{ 292{
292 write_lock_bh(&tbl->lock); 293 write_lock_bh(&tbl->lock);
293 neigh_flush_dev(tbl, dev); 294 neigh_flush_dev(tbl, dev);
294 pneigh_ifdown(tbl, dev); 295 pneigh_ifdown_and_unlock(tbl, dev);
295 write_unlock_bh(&tbl->lock);
296 296
297 del_timer_sync(&tbl->proxy_timer); 297 del_timer_sync(&tbl->proxy_timer);
298 pneigh_queue_purge(&tbl->proxy_queue); 298 pneigh_queue_purge(&tbl->proxy_queue);
@@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
681 return -ENOENT; 681 return -ENOENT;
682} 682}
683 683
684static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 684static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
685 struct net_device *dev)
685{ 686{
686 struct pneigh_entry *n, **np; 687 struct pneigh_entry *n, **np, *freelist = NULL;
687 u32 h; 688 u32 h;
688 689
689 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 690 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
691 while ((n = *np) != NULL) { 692 while ((n = *np) != NULL) {
692 if (!dev || n->dev == dev) { 693 if (!dev || n->dev == dev) {
693 *np = n->next; 694 *np = n->next;
694 if (tbl->pdestructor) 695 n->next = freelist;
695 tbl->pdestructor(n); 696 freelist = n;
696 if (n->dev)
697 dev_put(n->dev);
698 kfree(n);
699 continue; 697 continue;
700 } 698 }
701 np = &n->next; 699 np = &n->next;
702 } 700 }
703 } 701 }
702 write_unlock_bh(&tbl->lock);
703 while ((n = freelist)) {
704 freelist = n->next;
705 n->next = NULL;
706 if (tbl->pdestructor)
707 tbl->pdestructor(n);
708 if (n->dev)
709 dev_put(n->dev);
710 kfree(n);
711 }
704 return -ENOENT; 712 return -ENOENT;
705} 713}
706 714
@@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2323 2331
2324 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); 2332 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2325 if (!err) { 2333 if (!err) {
2326 if (tb[NDA_IFINDEX]) 2334 if (tb[NDA_IFINDEX]) {
2335 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2336 return -EINVAL;
2327 filter_idx = nla_get_u32(tb[NDA_IFINDEX]); 2337 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2328 2338 }
2329 if (tb[NDA_MASTER]) 2339 if (tb[NDA_MASTER]) {
2340 if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2341 return -EINVAL;
2330 filter_master_idx = nla_get_u32(tb[NDA_MASTER]); 2342 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2331 2343 }
2332 if (filter_idx || filter_master_idx) 2344 if (filter_idx || filter_master_idx)
2333 flags |= NLM_F_DUMP_FILTERED; 2345 flags |= NLM_F_DUMP_FILTERED;
2334 } 2346 }
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8396705deffc..40c851693f77 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
91 91
92 next_opt = memchr(opt, '#', end - opt) ?: end; 92 next_opt = memchr(opt, '#', end - opt) ?: end;
93 opt_len = next_opt - opt; 93 opt_len = next_opt - opt;
94 if (!opt_len) { 94 if (opt_len <= 0 || opt_len > 128) {
95 printk(KERN_WARNING 95 pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
96 "Empty option to dns_resolver key\n"); 96 opt_len);
97 return -EINVAL; 97 return -EINVAL;
98 } 98 }
99 99
@@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
127 } 127 }
128 128
129 bad_option_value: 129 bad_option_value:
130 printk(KERN_WARNING 130 pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
131 "Option '%*.*s' to dns_resolver key:" 131 opt_nlen, opt_nlen, opt);
132 " bad/missing value\n",
133 opt_nlen, opt_nlen, opt);
134 return -EINVAL; 132 return -EINVAL;
135 } while (opt = next_opt + 1, opt < end); 133 } while (opt = next_opt + 1, opt < end);
136 } 134 }
diff --git a/net/ife/ife.c b/net/ife/ife.c
index 7d1ec76e7f43..13bbf8cb6a39 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
69 int total_pull; 69 int total_pull;
70 u16 ifehdrln; 70 u16 ifehdrln;
71 71
72 if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
73 return NULL;
74
72 ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); 75 ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
73 ifehdrln = ntohs(ifehdr->metalen); 76 ifehdrln = ntohs(ifehdr->metalen);
74 total_pull = skb->dev->hard_header_len + ifehdrln; 77 total_pull = skb->dev->hard_header_len + ifehdrln;
@@ -92,12 +95,43 @@ struct meta_tlvhdr {
92 __be16 len; 95 __be16 len;
93}; 96};
94 97
98static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
99 const unsigned char *ifehdr_end)
100{
101 const struct meta_tlvhdr *tlv;
102 u16 tlvlen;
103
104 if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
105 return false;
106
107 tlv = (const struct meta_tlvhdr *)skbdata;
108 tlvlen = ntohs(tlv->len);
109
110 /* tlv length field is inc header, check on minimum */
111 if (tlvlen < NLA_HDRLEN)
112 return false;
113
114 /* overflow by NLA_ALIGN check */
115 if (NLA_ALIGN(tlvlen) < tlvlen)
116 return false;
117
118 if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
119 return false;
120
121 return true;
122}
123
95/* Caller takes care of presenting data in network order 124/* Caller takes care of presenting data in network order
96 */ 125 */
97void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) 126void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
127 u16 *dlen, u16 *totlen)
98{ 128{
99 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; 129 struct meta_tlvhdr *tlv;
130
131 if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
132 return NULL;
100 133
134 tlv = (struct meta_tlvhdr *)skbdata;
101 *dlen = ntohs(tlv->len) - NLA_HDRLEN; 135 *dlen = ntohs(tlv->len) - NLA_HDRLEN;
102 *attrtype = ntohs(tlv->type); 136 *attrtype = ntohs(tlv->type);
103 137
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4c11b810a447..83c73bab2c3d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1109,6 +1109,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1109 struct ip_options_rcu *opt; 1109 struct ip_options_rcu *opt;
1110 struct rtable *rt; 1110 struct rtable *rt;
1111 1111
1112 rt = *rtp;
1113 if (unlikely(!rt))
1114 return -EFAULT;
1115
1112 /* 1116 /*
1113 * setup for corking. 1117 * setup for corking.
1114 */ 1118 */
@@ -1124,9 +1128,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1124 cork->flags |= IPCORK_OPT; 1128 cork->flags |= IPCORK_OPT;
1125 cork->addr = ipc->addr; 1129 cork->addr = ipc->addr;
1126 } 1130 }
1127 rt = *rtp; 1131
1128 if (unlikely(!rt))
1129 return -EFAULT;
1130 /* 1132 /*
1131 * We steal reference to this route, caller should not release it 1133 * We steal reference to this route, caller should not release it
1132 */ 1134 */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bccc4c270087..9ce1c726185e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2368,6 +2368,7 @@ void tcp_write_queue_purge(struct sock *sk)
2368 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2368 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2369 sk_mem_reclaim(sk); 2369 sk_mem_reclaim(sk);
2370 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2370 tcp_clear_all_retrans_hints(tcp_sk(sk));
2371 tcp_sk(sk)->packets_out = 0;
2371} 2372}
2372 2373
2373int tcp_disconnect(struct sock *sk, int flags) 2374int tcp_disconnect(struct sock *sk, int flags)
@@ -2417,7 +2418,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2417 icsk->icsk_backoff = 0; 2418 icsk->icsk_backoff = 0;
2418 tp->snd_cwnd = 2; 2419 tp->snd_cwnd = 2;
2419 icsk->icsk_probes_out = 0; 2420 icsk->icsk_probes_out = 0;
2420 tp->packets_out = 0;
2421 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2421 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2422 tp->snd_cwnd_cnt = 0; 2422 tp->snd_cwnd_cnt = 0;
2423 tp->window_clamp = 0; 2423 tp->window_clamp = 0;
@@ -2813,8 +2813,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2813#ifdef CONFIG_TCP_MD5SIG 2813#ifdef CONFIG_TCP_MD5SIG
2814 case TCP_MD5SIG: 2814 case TCP_MD5SIG:
2815 case TCP_MD5SIG_EXT: 2815 case TCP_MD5SIG_EXT:
2816 /* Read the IP->Key mappings from userspace */ 2816 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
2817 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 2817 err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
2818 else
2819 err = -EINVAL;
2818 break; 2820 break;
2819#endif 2821#endif
2820 case TCP_USER_TIMEOUT: 2822 case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 367def6ddeda..e51c644484dc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3868,11 +3868,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3868 int length = (th->doff << 2) - sizeof(*th); 3868 int length = (th->doff << 2) - sizeof(*th);
3869 const u8 *ptr = (const u8 *)(th + 1); 3869 const u8 *ptr = (const u8 *)(th + 1);
3870 3870
3871 /* If the TCP option is too short, we can short cut */ 3871 /* If not enough data remaining, we can short cut */
3872 if (length < TCPOLEN_MD5SIG) 3872 while (length >= TCPOLEN_MD5SIG) {
3873 return NULL;
3874
3875 while (length > 0) {
3876 int opcode = *ptr++; 3873 int opcode = *ptr++;
3877 int opsize; 3874 int opsize;
3878 3875
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index ccbfa83e4bb0..ce77bcc2490c 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
48 fields such as the source, destination, flowlabel, hop-limit and 48 fields such as the source, destination, flowlabel, hop-limit and
49 the packet mark. 49 the packet mark.
50 50
51if NF_NAT_IPV6
52
53config NFT_CHAIN_NAT_IPV6
54 tristate "IPv6 nf_tables nat chain support"
55 help
56 This option enables the "nat" chain for IPv6 in nf_tables. This
57 chain type is used to perform Network Address Translation (NAT)
58 packet transformations such as the source, destination address and
59 source and destination ports.
60
61config NFT_MASQ_IPV6
62 tristate "IPv6 masquerade support for nf_tables"
63 depends on NFT_MASQ
64 select NF_NAT_MASQUERADE_IPV6
65 help
66 This is the expression that provides IPv4 masquerading support for
67 nf_tables.
68
69config NFT_REDIR_IPV6
70 tristate "IPv6 redirect support for nf_tables"
71 depends on NFT_REDIR
72 select NF_NAT_REDIRECT
73 help
74 This is the expression that provides IPv4 redirect support for
75 nf_tables.
76
77endif # NF_NAT_IPV6
78
51config NFT_REJECT_IPV6 79config NFT_REJECT_IPV6
52 select NF_REJECT_IPV6 80 select NF_REJECT_IPV6
53 default NFT_REJECT 81 default NFT_REJECT
@@ -107,39 +135,12 @@ config NF_NAT_IPV6
107 135
108if NF_NAT_IPV6 136if NF_NAT_IPV6
109 137
110config NFT_CHAIN_NAT_IPV6
111 depends on NF_TABLES_IPV6
112 tristate "IPv6 nf_tables nat chain support"
113 help
114 This option enables the "nat" chain for IPv6 in nf_tables. This
115 chain type is used to perform Network Address Translation (NAT)
116 packet transformations such as the source, destination address and
117 source and destination ports.
118
119config NF_NAT_MASQUERADE_IPV6 138config NF_NAT_MASQUERADE_IPV6
120 tristate "IPv6 masquerade support" 139 tristate "IPv6 masquerade support"
121 help 140 help
122 This is the kernel functionality to provide NAT in the masquerade 141 This is the kernel functionality to provide NAT in the masquerade
123 flavour (automatic source address selection) for IPv6. 142 flavour (automatic source address selection) for IPv6.
124 143
125config NFT_MASQ_IPV6
126 tristate "IPv6 masquerade support for nf_tables"
127 depends on NF_TABLES_IPV6
128 depends on NFT_MASQ
129 select NF_NAT_MASQUERADE_IPV6
130 help
131 This is the expression that provides IPv4 masquerading support for
132 nf_tables.
133
134config NFT_REDIR_IPV6
135 tristate "IPv6 redirect support for nf_tables"
136 depends on NF_TABLES_IPV6
137 depends on NFT_REDIR
138 select NF_NAT_REDIRECT
139 help
140 This is the expression that provides IPv4 redirect support for
141 nf_tables.
142
143endif # NF_NAT_IPV6 144endif # NF_NAT_IPV6
144 145
145config IP6_NF_IPTABLES 146config IP6_NF_IPTABLES
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 49b954d6d0fa..cde7d8251377 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3975,6 +3975,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3975 3975
3976static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 3976static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3978 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
3978 [RTA_OIF] = { .type = NLA_U32 }, 3979 [RTA_OIF] = { .type = NLA_U32 },
3979 [RTA_IIF] = { .type = NLA_U32 }, 3980 [RTA_IIF] = { .type = NLA_U32 },
3980 [RTA_PRIORITY] = { .type = NLA_U32 }, 3981 [RTA_PRIORITY] = { .type = NLA_U32 },
@@ -3986,6 +3987,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3986 [RTA_EXPIRES] = { .type = NLA_U32 }, 3987 [RTA_EXPIRES] = { .type = NLA_U32 },
3987 [RTA_UID] = { .type = NLA_U32 }, 3988 [RTA_UID] = { .type = NLA_U32 },
3988 [RTA_MARK] = { .type = NLA_U32 }, 3989 [RTA_MARK] = { .type = NLA_U32 },
3990 [RTA_TABLE] = { .type = NLA_U32 },
3989}; 3991};
3990 3992
3991static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 3993static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index f343e6f0fc95..5fe139484919 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
136 isrh->nexthdr = proto; 136 isrh->nexthdr = proto;
137 137
138 hdr->daddr = isrh->segments[isrh->first_segment]; 138 hdr->daddr = isrh->segments[isrh->first_segment];
139 set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); 139 set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
140 140
141#ifdef CONFIG_IPV6_SEG6_HMAC 141#ifdef CONFIG_IPV6_SEG6_HMAC
142 if (sr_has_hmac(isrh)) { 142 if (sr_has_hmac(isrh)) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 0fbd3ee26165..40261cb68e83 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
183} 183}
184EXPORT_SYMBOL_GPL(l2tp_tunnel_get); 184EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
185 185
186struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
187{
188 const struct l2tp_net *pn = l2tp_pernet(net);
189 struct l2tp_tunnel *tunnel;
190 int count = 0;
191
192 rcu_read_lock_bh();
193 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
194 if (++count > nth) {
195 l2tp_tunnel_inc_refcount(tunnel);
196 rcu_read_unlock_bh();
197 return tunnel;
198 }
199 }
200 rcu_read_unlock_bh();
201
202 return NULL;
203}
204EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
205
186/* Lookup a session. A new reference is held on the returned session. */ 206/* Lookup a session. A new reference is held on the returned session. */
187struct l2tp_session *l2tp_session_get(const struct net *net, 207struct l2tp_session *l2tp_session_get(const struct net *net,
188 struct l2tp_tunnel *tunnel, 208 struct l2tp_tunnel *tunnel,
@@ -335,26 +355,6 @@ err_tlock:
335} 355}
336EXPORT_SYMBOL_GPL(l2tp_session_register); 356EXPORT_SYMBOL_GPL(l2tp_session_register);
337 357
338struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
339{
340 struct l2tp_net *pn = l2tp_pernet(net);
341 struct l2tp_tunnel *tunnel;
342 int count = 0;
343
344 rcu_read_lock_bh();
345 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
346 if (++count > nth) {
347 rcu_read_unlock_bh();
348 return tunnel;
349 }
350 }
351
352 rcu_read_unlock_bh();
353
354 return NULL;
355}
356EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
357
358/***************************************************************************** 358/*****************************************************************************
359 * Receive data handling 359 * Receive data handling
360 *****************************************************************************/ 360 *****************************************************************************/
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index ba33cbec71eb..c199020f8a8a 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
212} 212}
213 213
214struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); 214struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
215struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
216
215void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 217void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
216 218
217struct l2tp_session *l2tp_session_get(const struct net *net, 219struct l2tp_session *l2tp_session_get(const struct net *net,
@@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
220struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); 222struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
221struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, 223struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
222 const char *ifname); 224 const char *ifname);
223struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
224 225
225int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, 226int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
226 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, 227 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 72e713da4733..7f1e842ef05a 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data {
47 47
48static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) 48static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
49{ 49{
50 pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); 50 /* Drop reference taken during previous invocation */
51 if (pd->tunnel)
52 l2tp_tunnel_dec_refcount(pd->tunnel);
53
54 pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
51 pd->tunnel_idx++; 55 pd->tunnel_idx++;
52} 56}
53 57
@@ -96,7 +100,17 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
96 100
97static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) 101static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
98{ 102{
99 /* nothing to do */ 103 struct l2tp_dfs_seq_data *pd = v;
104
105 if (!pd || pd == SEQ_START_TOKEN)
106 return;
107
108 /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
109 if (pd->tunnel) {
110 l2tp_tunnel_dec_refcount(pd->tunnel);
111 pd->tunnel = NULL;
112 pd->session = NULL;
113 }
100} 114}
101 115
102static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) 116static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index b05dbd9ffcb2..6616c9fd292f 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
487 struct net *net = sock_net(skb->sk); 487 struct net *net = sock_net(skb->sk);
488 488
489 for (;;) { 489 for (;;) {
490 tunnel = l2tp_tunnel_find_nth(net, ti); 490 tunnel = l2tp_tunnel_get_nth(net, ti);
491 if (tunnel == NULL) 491 if (tunnel == NULL)
492 goto out; 492 goto out;
493 493
494 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, 494 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
495 cb->nlh->nlmsg_seq, NLM_F_MULTI, 495 cb->nlh->nlmsg_seq, NLM_F_MULTI,
496 tunnel, L2TP_CMD_TUNNEL_GET) < 0) 496 tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
497 l2tp_tunnel_dec_refcount(tunnel);
497 goto out; 498 goto out;
499 }
500 l2tp_tunnel_dec_refcount(tunnel);
498 501
499 ti++; 502 ti++;
500 } 503 }
@@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
848 851
849 for (;;) { 852 for (;;) {
850 if (tunnel == NULL) { 853 if (tunnel == NULL) {
851 tunnel = l2tp_tunnel_find_nth(net, ti); 854 tunnel = l2tp_tunnel_get_nth(net, ti);
852 if (tunnel == NULL) 855 if (tunnel == NULL)
853 goto out; 856 goto out;
854 } 857 }
@@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
856 session = l2tp_session_get_nth(tunnel, si); 859 session = l2tp_session_get_nth(tunnel, si);
857 if (session == NULL) { 860 if (session == NULL) {
858 ti++; 861 ti++;
862 l2tp_tunnel_dec_refcount(tunnel);
859 tunnel = NULL; 863 tunnel = NULL;
860 si = 0; 864 si = 0;
861 continue; 865 continue;
@@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
865 cb->nlh->nlmsg_seq, NLM_F_MULTI, 869 cb->nlh->nlmsg_seq, NLM_F_MULTI,
866 session, L2TP_CMD_SESSION_GET) < 0) { 870 session, L2TP_CMD_SESSION_GET) < 0) {
867 l2tp_session_dec_refcount(session); 871 l2tp_session_dec_refcount(session);
872 l2tp_tunnel_dec_refcount(tunnel);
868 break; 873 break;
869 } 874 }
870 l2tp_session_dec_refcount(session); 875 l2tp_session_dec_refcount(session);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 896bbca9bdaa..1fd9e145076a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
619 lock_sock(sk); 619 lock_sock(sk);
620 620
621 error = -EINVAL; 621 error = -EINVAL;
622
623 if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
624 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
625 sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
626 sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
627 goto end;
628
622 if (sp->sa_protocol != PX_PROTO_OL2TP) 629 if (sp->sa_protocol != PX_PROTO_OL2TP)
623 goto end; 630 goto end;
624 631
@@ -1551,16 +1558,19 @@ struct pppol2tp_seq_data {
1551 1558
1552static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) 1559static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1553{ 1560{
1561 /* Drop reference taken during previous invocation */
1562 if (pd->tunnel)
1563 l2tp_tunnel_dec_refcount(pd->tunnel);
1564
1554 for (;;) { 1565 for (;;) {
1555 pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); 1566 pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
1556 pd->tunnel_idx++; 1567 pd->tunnel_idx++;
1557 1568
1558 if (pd->tunnel == NULL) 1569 /* Only accept L2TPv2 tunnels */
1559 break; 1570 if (!pd->tunnel || pd->tunnel->version == 2)
1571 return;
1560 1572
1561 /* Ignore L2TPv3 tunnels */ 1573 l2tp_tunnel_dec_refcount(pd->tunnel);
1562 if (pd->tunnel->version < 3)
1563 break;
1564 } 1574 }
1565} 1575}
1566 1576
@@ -1609,7 +1619,17 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
1609 1619
1610static void pppol2tp_seq_stop(struct seq_file *p, void *v) 1620static void pppol2tp_seq_stop(struct seq_file *p, void *v)
1611{ 1621{
1612 /* nothing to do */ 1622 struct pppol2tp_seq_data *pd = v;
1623
1624 if (!pd || pd == SEQ_START_TOKEN)
1625 return;
1626
1627 /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
1628 if (pd->tunnel) {
1629 l2tp_tunnel_dec_refcount(pd->tunnel);
1630 pd->tunnel = NULL;
1631 pd->session = NULL;
1632 }
1613} 1633}
1614 1634
1615static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) 1635static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 01dcc0823d1f..cb80ebb38311 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock)
199 llc->laddr.lsap, llc->daddr.lsap); 199 llc->laddr.lsap, llc->daddr.lsap);
200 if (!llc_send_disc(sk)) 200 if (!llc_send_disc(sk))
201 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 201 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
202 if (!sock_flag(sk, SOCK_ZAPPED)) 202 if (!sock_flag(sk, SOCK_ZAPPED)) {
203 struct llc_sap *sap = llc->sap;
204
205 /* Hold this for release_sock(), so that llc_backlog_rcv()
206 * could still use it.
207 */
208 llc_sap_hold(sap);
203 llc_sap_remove_socket(llc->sap, sk); 209 llc_sap_remove_socket(llc->sap, sk);
204 release_sock(sk); 210 release_sock(sk);
211 llc_sap_put(sap);
212 } else {
213 release_sock(sk);
214 }
205 if (llc->dev) 215 if (llc->dev)
206 dev_put(llc->dev); 216 dev_put(llc->dev);
207 sock_put(sk); 217 sock_put(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 163121192aca..4d78375f9872 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
1099 1099
1100int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) 1100int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
1101{ 1101{
1102 struct llc_sock *llc = llc_sk(sk); 1102 llc_sk_stop_all_timers(sk, false);
1103
1104 del_timer(&llc->pf_cycle_timer.timer);
1105 del_timer(&llc->ack_timer.timer);
1106 del_timer(&llc->rej_sent_timer.timer);
1107 del_timer(&llc->busy_state_timer.timer);
1108 llc->ack_must_be_send = 0;
1109 llc->ack_pf = 0;
1110 return 0; 1103 return 0;
1111} 1104}
1112 1105
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 110e32bcb399..c0ac522b48a1 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -961,6 +961,26 @@ out:
961 return sk; 961 return sk;
962} 962}
963 963
964void llc_sk_stop_all_timers(struct sock *sk, bool sync)
965{
966 struct llc_sock *llc = llc_sk(sk);
967
968 if (sync) {
969 del_timer_sync(&llc->pf_cycle_timer.timer);
970 del_timer_sync(&llc->ack_timer.timer);
971 del_timer_sync(&llc->rej_sent_timer.timer);
972 del_timer_sync(&llc->busy_state_timer.timer);
973 } else {
974 del_timer(&llc->pf_cycle_timer.timer);
975 del_timer(&llc->ack_timer.timer);
976 del_timer(&llc->rej_sent_timer.timer);
977 del_timer(&llc->busy_state_timer.timer);
978 }
979
980 llc->ack_must_be_send = 0;
981 llc->ack_pf = 0;
982}
983
964/** 984/**
965 * llc_sk_free - Frees a LLC socket 985 * llc_sk_free - Frees a LLC socket
966 * @sk - socket to free 986 * @sk - socket to free
@@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk)
973 993
974 llc->state = LLC_CONN_OUT_OF_SVC; 994 llc->state = LLC_CONN_OUT_OF_SVC;
975 /* Stop all (possibly) running timers */ 995 /* Stop all (possibly) running timers */
976 llc_conn_ac_stop_all_timers(sk, NULL); 996 llc_sk_stop_all_timers(sk, true);
977#ifdef DEBUG_LLC_CONN_ALLOC 997#ifdef DEBUG_LLC_CONN_ALLOC
978 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, 998 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
979 skb_queue_len(&llc->pdu_unack_q), 999 skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 704b3832dbad..44d8a55e9721 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -594,6 +594,7 @@ config NFT_QUOTA
594config NFT_REJECT 594config NFT_REJECT
595 default m if NETFILTER_ADVANCED=n 595 default m if NETFILTER_ADVANCED=n
596 tristate "Netfilter nf_tables reject support" 596 tristate "Netfilter nf_tables reject support"
597 depends on !NF_TABLES_INET || (IPV6!=m || m)
597 help 598 help
598 This option adds the "reject" expression that you can use to 599 This option adds the "reject" expression that you can use to
599 explicitly deny and notify via TCP reset/ICMP informational errors 600 explicitly deny and notify via TCP reset/ICMP informational errors
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5ebde4b15810..f36098887ad0 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2384 strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2384 strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
2385 sizeof(cfg.mcast_ifn)); 2385 sizeof(cfg.mcast_ifn));
2386 cfg.syncid = dm->syncid; 2386 cfg.syncid = dm->syncid;
2387 rtnl_lock();
2388 mutex_lock(&ipvs->sync_mutex);
2389 ret = start_sync_thread(ipvs, &cfg, dm->state); 2387 ret = start_sync_thread(ipvs, &cfg, dm->state);
2390 mutex_unlock(&ipvs->sync_mutex);
2391 rtnl_unlock();
2392 } else { 2388 } else {
2393 mutex_lock(&ipvs->sync_mutex); 2389 mutex_lock(&ipvs->sync_mutex);
2394 ret = stop_sync_thread(ipvs, dm->state); 2390 ret = stop_sync_thread(ipvs, dm->state);
@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
3481 if (ipvs->mixed_address_family_dests > 0) 3477 if (ipvs->mixed_address_family_dests > 0)
3482 return -EINVAL; 3478 return -EINVAL;
3483 3479
3484 rtnl_lock();
3485 mutex_lock(&ipvs->sync_mutex);
3486 ret = start_sync_thread(ipvs, &c, 3480 ret = start_sync_thread(ipvs, &c,
3487 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); 3481 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
3488 mutex_unlock(&ipvs->sync_mutex);
3489 rtnl_unlock();
3490 return ret; 3482 return ret;
3491} 3483}
3492 3484
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index fbaf3bd05b2e..001501e25625 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -49,6 +49,7 @@
49#include <linux/kthread.h> 49#include <linux/kthread.h>
50#include <linux/wait.h> 50#include <linux/wait.h>
51#include <linux/kernel.h> 51#include <linux/kernel.h>
52#include <linux/sched/signal.h>
52 53
53#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ 54#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
54 55
@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
1360/* 1361/*
1361 * Specifiy default interface for outgoing multicasts 1362 * Specifiy default interface for outgoing multicasts
1362 */ 1363 */
1363static int set_mcast_if(struct sock *sk, char *ifname) 1364static int set_mcast_if(struct sock *sk, struct net_device *dev)
1364{ 1365{
1365 struct net_device *dev;
1366 struct inet_sock *inet = inet_sk(sk); 1366 struct inet_sock *inet = inet_sk(sk);
1367 struct net *net = sock_net(sk);
1368
1369 dev = __dev_get_by_name(net, ifname);
1370 if (!dev)
1371 return -ENODEV;
1372 1367
1373 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1368 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1374 return -EINVAL; 1369 return -EINVAL;
@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
1396 * in the in_addr structure passed in as a parameter. 1391 * in the in_addr structure passed in as a parameter.
1397 */ 1392 */
1398static int 1393static int
1399join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) 1394join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
1400{ 1395{
1401 struct net *net = sock_net(sk);
1402 struct ip_mreqn mreq; 1396 struct ip_mreqn mreq;
1403 struct net_device *dev;
1404 int ret; 1397 int ret;
1405 1398
1406 memset(&mreq, 0, sizeof(mreq)); 1399 memset(&mreq, 0, sizeof(mreq));
1407 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); 1400 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
1408 1401
1409 dev = __dev_get_by_name(net, ifname);
1410 if (!dev)
1411 return -ENODEV;
1412 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1402 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1413 return -EINVAL; 1403 return -EINVAL;
1414 1404
@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
1423 1413
1424#ifdef CONFIG_IP_VS_IPV6 1414#ifdef CONFIG_IP_VS_IPV6
1425static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, 1415static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1426 char *ifname) 1416 struct net_device *dev)
1427{ 1417{
1428 struct net *net = sock_net(sk);
1429 struct net_device *dev;
1430 int ret; 1418 int ret;
1431 1419
1432 dev = __dev_get_by_name(net, ifname);
1433 if (!dev)
1434 return -ENODEV;
1435 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) 1420 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1436 return -EINVAL; 1421 return -EINVAL;
1437 1422
@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
1443} 1428}
1444#endif 1429#endif
1445 1430
1446static int bind_mcastif_addr(struct socket *sock, char *ifname) 1431static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
1447{ 1432{
1448 struct net *net = sock_net(sock->sk);
1449 struct net_device *dev;
1450 __be32 addr; 1433 __be32 addr;
1451 struct sockaddr_in sin; 1434 struct sockaddr_in sin;
1452 1435
1453 dev = __dev_get_by_name(net, ifname);
1454 if (!dev)
1455 return -ENODEV;
1456
1457 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); 1436 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1458 if (!addr) 1437 if (!addr)
1459 pr_err("You probably need to specify IP address on " 1438 pr_err("You probably need to specify IP address on "
1460 "multicast interface.\n"); 1439 "multicast interface.\n");
1461 1440
1462 IP_VS_DBG(7, "binding socket with (%s) %pI4\n", 1441 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
1463 ifname, &addr); 1442 dev->name, &addr);
1464 1443
1465 /* Now bind the socket with the address of multicast interface */ 1444 /* Now bind the socket with the address of multicast interface */
1466 sin.sin_family = AF_INET; 1445 sin.sin_family = AF_INET;
@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
1493/* 1472/*
1494 * Set up sending multicast socket over UDP 1473 * Set up sending multicast socket over UDP
1495 */ 1474 */
1496static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) 1475static int make_send_sock(struct netns_ipvs *ipvs, int id,
1476 struct net_device *dev, struct socket **sock_ret)
1497{ 1477{
1498 /* multicast addr */ 1478 /* multicast addr */
1499 union ipvs_sockaddr mcast_addr; 1479 union ipvs_sockaddr mcast_addr;
@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1505 IPPROTO_UDP, &sock); 1485 IPPROTO_UDP, &sock);
1506 if (result < 0) { 1486 if (result < 0) {
1507 pr_err("Error during creation of socket; terminating\n"); 1487 pr_err("Error during creation of socket; terminating\n");
1508 return ERR_PTR(result); 1488 goto error;
1509 } 1489 }
1510 result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); 1490 *sock_ret = sock;
1491 result = set_mcast_if(sock->sk, dev);
1511 if (result < 0) { 1492 if (result < 0) {
1512 pr_err("Error setting outbound mcast interface\n"); 1493 pr_err("Error setting outbound mcast interface\n");
1513 goto error; 1494 goto error;
@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1522 set_sock_size(sock->sk, 1, result); 1503 set_sock_size(sock->sk, 1, result);
1523 1504
1524 if (AF_INET == ipvs->mcfg.mcast_af) 1505 if (AF_INET == ipvs->mcfg.mcast_af)
1525 result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); 1506 result = bind_mcastif_addr(sock, dev);
1526 else 1507 else
1527 result = 0; 1508 result = 0;
1528 if (result < 0) { 1509 if (result < 0) {
@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
1538 goto error; 1519 goto error;
1539 } 1520 }
1540 1521
1541 return sock; 1522 return 0;
1542 1523
1543error: 1524error:
1544 sock_release(sock); 1525 return result;
1545 return ERR_PTR(result);
1546} 1526}
1547 1527
1548 1528
1549/* 1529/*
1550 * Set up receiving multicast socket over UDP 1530 * Set up receiving multicast socket over UDP
1551 */ 1531 */
1552static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, 1532static int make_receive_sock(struct netns_ipvs *ipvs, int id,
1553 int ifindex) 1533 struct net_device *dev, struct socket **sock_ret)
1554{ 1534{
1555 /* multicast addr */ 1535 /* multicast addr */
1556 union ipvs_sockaddr mcast_addr; 1536 union ipvs_sockaddr mcast_addr;
@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1562 IPPROTO_UDP, &sock); 1542 IPPROTO_UDP, &sock);
1563 if (result < 0) { 1543 if (result < 0) {
1564 pr_err("Error during creation of socket; terminating\n"); 1544 pr_err("Error during creation of socket; terminating\n");
1565 return ERR_PTR(result); 1545 goto error;
1566 } 1546 }
1547 *sock_ret = sock;
1567 /* it is equivalent to the REUSEADDR option in user-space */ 1548 /* it is equivalent to the REUSEADDR option in user-space */
1568 sock->sk->sk_reuse = SK_CAN_REUSE; 1549 sock->sk->sk_reuse = SK_CAN_REUSE;
1569 result = sysctl_sync_sock_size(ipvs); 1550 result = sysctl_sync_sock_size(ipvs);
@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1571 set_sock_size(sock->sk, 0, result); 1552 set_sock_size(sock->sk, 0, result);
1572 1553
1573 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); 1554 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
1574 sock->sk->sk_bound_dev_if = ifindex; 1555 sock->sk->sk_bound_dev_if = dev->ifindex;
1575 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); 1556 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
1576 if (result < 0) { 1557 if (result < 0) {
1577 pr_err("Error binding to the multicast addr\n"); 1558 pr_err("Error binding to the multicast addr\n");
@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
1582#ifdef CONFIG_IP_VS_IPV6 1563#ifdef CONFIG_IP_VS_IPV6
1583 if (ipvs->bcfg.mcast_af == AF_INET6) 1564 if (ipvs->bcfg.mcast_af == AF_INET6)
1584 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, 1565 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
1585 ipvs->bcfg.mcast_ifn); 1566 dev);
1586 else 1567 else
1587#endif 1568#endif
1588 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, 1569 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
1589 ipvs->bcfg.mcast_ifn); 1570 dev);
1590 if (result < 0) { 1571 if (result < 0) {
1591 pr_err("Error joining to the multicast group\n"); 1572 pr_err("Error joining to the multicast group\n");
1592 goto error; 1573 goto error;
1593 } 1574 }
1594 1575
1595 return sock; 1576 return 0;
1596 1577
1597error: 1578error:
1598 sock_release(sock); 1579 return result;
1599 return ERR_PTR(result);
1600} 1580}
1601 1581
1602 1582
@@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
1778int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, 1758int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1779 int state) 1759 int state)
1780{ 1760{
1781 struct ip_vs_sync_thread_data *tinfo; 1761 struct ip_vs_sync_thread_data *tinfo = NULL;
1782 struct task_struct **array = NULL, *task; 1762 struct task_struct **array = NULL, *task;
1783 struct socket *sock;
1784 struct net_device *dev; 1763 struct net_device *dev;
1785 char *name; 1764 char *name;
1786 int (*threadfn)(void *data); 1765 int (*threadfn)(void *data);
1787 int id, count, hlen; 1766 int id = 0, count, hlen;
1788 int result = -ENOMEM; 1767 int result = -ENOMEM;
1789 u16 mtu, min_mtu; 1768 u16 mtu, min_mtu;
1790 1769
@@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1792 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", 1771 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
1793 sizeof(struct ip_vs_sync_conn_v0)); 1772 sizeof(struct ip_vs_sync_conn_v0));
1794 1773
1774 /* Do not hold one mutex and then to block on another */
1775 for (;;) {
1776 rtnl_lock();
1777 if (mutex_trylock(&ipvs->sync_mutex))
1778 break;
1779 rtnl_unlock();
1780 mutex_lock(&ipvs->sync_mutex);
1781 if (rtnl_trylock())
1782 break;
1783 mutex_unlock(&ipvs->sync_mutex);
1784 }
1785
1795 if (!ipvs->sync_state) { 1786 if (!ipvs->sync_state) {
1796 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); 1787 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
1797 ipvs->threads_mask = count - 1; 1788 ipvs->threads_mask = count - 1;
@@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1810 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); 1801 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
1811 if (!dev) { 1802 if (!dev) {
1812 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); 1803 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
1813 return -ENODEV; 1804 result = -ENODEV;
1805 goto out_early;
1814 } 1806 }
1815 hlen = (AF_INET6 == c->mcast_af) ? 1807 hlen = (AF_INET6 == c->mcast_af) ?
1816 sizeof(struct ipv6hdr) + sizeof(struct udphdr) : 1808 sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1827 c->sync_maxlen = mtu - hlen; 1819 c->sync_maxlen = mtu - hlen;
1828 1820
1829 if (state == IP_VS_STATE_MASTER) { 1821 if (state == IP_VS_STATE_MASTER) {
1822 result = -EEXIST;
1830 if (ipvs->ms) 1823 if (ipvs->ms)
1831 return -EEXIST; 1824 goto out_early;
1832 1825
1833 ipvs->mcfg = *c; 1826 ipvs->mcfg = *c;
1834 name = "ipvs-m:%d:%d"; 1827 name = "ipvs-m:%d:%d";
1835 threadfn = sync_thread_master; 1828 threadfn = sync_thread_master;
1836 } else if (state == IP_VS_STATE_BACKUP) { 1829 } else if (state == IP_VS_STATE_BACKUP) {
1830 result = -EEXIST;
1837 if (ipvs->backup_threads) 1831 if (ipvs->backup_threads)
1838 return -EEXIST; 1832 goto out_early;
1839 1833
1840 ipvs->bcfg = *c; 1834 ipvs->bcfg = *c;
1841 name = "ipvs-b:%d:%d"; 1835 name = "ipvs-b:%d:%d";
1842 threadfn = sync_thread_backup; 1836 threadfn = sync_thread_backup;
1843 } else { 1837 } else {
1844 return -EINVAL; 1838 result = -EINVAL;
1839 goto out_early;
1845 } 1840 }
1846 1841
1847 if (state == IP_VS_STATE_MASTER) { 1842 if (state == IP_VS_STATE_MASTER) {
1848 struct ipvs_master_sync_state *ms; 1843 struct ipvs_master_sync_state *ms;
1849 1844
1845 result = -ENOMEM;
1850 ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); 1846 ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
1851 if (!ipvs->ms) 1847 if (!ipvs->ms)
1852 goto out; 1848 goto out;
@@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1862 } else { 1858 } else {
1863 array = kcalloc(count, sizeof(struct task_struct *), 1859 array = kcalloc(count, sizeof(struct task_struct *),
1864 GFP_KERNEL); 1860 GFP_KERNEL);
1861 result = -ENOMEM;
1865 if (!array) 1862 if (!array)
1866 goto out; 1863 goto out;
1867 } 1864 }
1868 1865
1869 tinfo = NULL;
1870 for (id = 0; id < count; id++) { 1866 for (id = 0; id < count; id++) {
1871 if (state == IP_VS_STATE_MASTER) 1867 result = -ENOMEM;
1872 sock = make_send_sock(ipvs, id);
1873 else
1874 sock = make_receive_sock(ipvs, id, dev->ifindex);
1875 if (IS_ERR(sock)) {
1876 result = PTR_ERR(sock);
1877 goto outtinfo;
1878 }
1879 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); 1868 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1880 if (!tinfo) 1869 if (!tinfo)
1881 goto outsocket; 1870 goto out;
1882 tinfo->ipvs = ipvs; 1871 tinfo->ipvs = ipvs;
1883 tinfo->sock = sock; 1872 tinfo->sock = NULL;
1884 if (state == IP_VS_STATE_BACKUP) { 1873 if (state == IP_VS_STATE_BACKUP) {
1885 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, 1874 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
1886 GFP_KERNEL); 1875 GFP_KERNEL);
1887 if (!tinfo->buf) 1876 if (!tinfo->buf)
1888 goto outtinfo; 1877 goto out;
1889 } else { 1878 } else {
1890 tinfo->buf = NULL; 1879 tinfo->buf = NULL;
1891 } 1880 }
1892 tinfo->id = id; 1881 tinfo->id = id;
1882 if (state == IP_VS_STATE_MASTER)
1883 result = make_send_sock(ipvs, id, dev, &tinfo->sock);
1884 else
1885 result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
1886 if (result < 0)
1887 goto out;
1893 1888
1894 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); 1889 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
1895 if (IS_ERR(task)) { 1890 if (IS_ERR(task)) {
1896 result = PTR_ERR(task); 1891 result = PTR_ERR(task);
1897 goto outtinfo; 1892 goto out;
1898 } 1893 }
1899 tinfo = NULL; 1894 tinfo = NULL;
1900 if (state == IP_VS_STATE_MASTER) 1895 if (state == IP_VS_STATE_MASTER)
@@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1911 ipvs->sync_state |= state; 1906 ipvs->sync_state |= state;
1912 spin_unlock_bh(&ipvs->sync_buff_lock); 1907 spin_unlock_bh(&ipvs->sync_buff_lock);
1913 1908
1909 mutex_unlock(&ipvs->sync_mutex);
1910 rtnl_unlock();
1911
1914 /* increase the module use count */ 1912 /* increase the module use count */
1915 ip_vs_use_count_inc(); 1913 ip_vs_use_count_inc();
1916 1914
1917 return 0; 1915 return 0;
1918 1916
1919outsocket: 1917out:
1920 sock_release(sock); 1918 /* We do not need RTNL lock anymore, release it here so that
1921 1919 * sock_release below and in the kthreads can use rtnl_lock
1922outtinfo: 1920 * to leave the mcast group.
1923 if (tinfo) { 1921 */
1924 sock_release(tinfo->sock); 1922 rtnl_unlock();
1925 kfree(tinfo->buf);
1926 kfree(tinfo);
1927 }
1928 count = id; 1923 count = id;
1929 while (count-- > 0) { 1924 while (count-- > 0) {
1930 if (state == IP_VS_STATE_MASTER) 1925 if (state == IP_VS_STATE_MASTER)
@@ -1932,13 +1927,23 @@ outtinfo:
1932 else 1927 else
1933 kthread_stop(array[count]); 1928 kthread_stop(array[count]);
1934 } 1929 }
1935 kfree(array);
1936
1937out:
1938 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { 1930 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
1939 kfree(ipvs->ms); 1931 kfree(ipvs->ms);
1940 ipvs->ms = NULL; 1932 ipvs->ms = NULL;
1941 } 1933 }
1934 mutex_unlock(&ipvs->sync_mutex);
1935 if (tinfo) {
1936 if (tinfo->sock)
1937 sock_release(tinfo->sock);
1938 kfree(tinfo->buf);
1939 kfree(tinfo);
1940 }
1941 kfree(array);
1942 return result;
1943
1944out_early:
1945 mutex_unlock(&ipvs->sync_mutex);
1946 rtnl_unlock();
1942 return result; 1947 return result;
1943} 1948}
1944 1949
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 8ef21d9f9a00..4b2b3d53acfc 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
252static inline int expect_matches(const struct nf_conntrack_expect *a, 252static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b) 253 const struct nf_conntrack_expect *b)
254{ 254{
255 return a->master == b->master && a->class == b->class && 255 return a->master == b->master &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && 257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && 258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
@@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
421 h = nf_ct_expect_dst_hash(net, &expect->tuple); 421 h = nf_ct_expect_dst_hash(net, &expect->tuple);
422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { 422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
423 if (expect_matches(i, expect)) { 423 if (expect_matches(i, expect)) {
424 if (i->class != expect->class)
425 return -EALREADY;
426
424 if (nf_ct_remove_expect(i)) 427 if (nf_ct_remove_expect(i))
425 break; 428 break;
426 } else if (expect_clash(i, expect)) { 429 } else if (expect_clash(i, expect)) {
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 9fe0ddc333fb..277bbfe26478 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemleak.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/mutex.h> 14#include <linux/mutex.h>
14#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
@@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
71 rcu_read_unlock(); 72 rcu_read_unlock();
72 73
73 alloc = max(newlen, NF_CT_EXT_PREALLOC); 74 alloc = max(newlen, NF_CT_EXT_PREALLOC);
75 kmemleak_not_leak(old);
74 new = __krealloc(old, alloc, gfp); 76 new = __krealloc(old, alloc, gfp);
75 if (!new) 77 if (!new)
76 return NULL; 78 return NULL;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4dbb5bad4363..908e51e2dc2b 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
938 datalen, rtp_exp, rtcp_exp, 938 datalen, rtp_exp, rtcp_exp,
939 mediaoff, medialen, daddr); 939 mediaoff, medialen, daddr);
940 else { 940 else {
941 if (nf_ct_expect_related(rtp_exp) == 0) { 941 /* -EALREADY handling works around end-points that send
942 if (nf_ct_expect_related(rtcp_exp) != 0) 942 * SDP messages with identical port but different media type,
943 nf_ct_unexpect_related(rtp_exp); 943 * we pretend expectation was set up.
944 else 944 */
945 int errp = nf_ct_expect_related(rtp_exp);
946
947 if (errp == 0 || errp == -EALREADY) {
948 int errcp = nf_ct_expect_related(rtcp_exp);
949
950 if (errcp == 0 || errcp == -EALREADY)
945 ret = NF_ACCEPT; 951 ret = NF_ACCEPT;
952 else if (errp == 0)
953 nf_ct_unexpect_related(rtp_exp);
946 } 954 }
947 } 955 }
948 nf_ct_expect_put(rtcp_exp); 956 nf_ct_expect_put(rtcp_exp);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9134cc429ad4..04d4e3772584 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2361 } 2361 }
2362 2362
2363 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2363 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2364 if (nft_is_active_next(net, old_rule)) { 2364 if (!nft_is_active_next(net, old_rule)) {
2365 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2366 old_rule);
2367 if (trans == NULL) {
2368 err = -ENOMEM;
2369 goto err2;
2370 }
2371 nft_deactivate_next(net, old_rule);
2372 chain->use--;
2373 list_add_tail_rcu(&rule->list, &old_rule->list);
2374 } else {
2375 err = -ENOENT; 2365 err = -ENOENT;
2376 goto err2; 2366 goto err2;
2377 } 2367 }
2378 } else if (nlh->nlmsg_flags & NLM_F_APPEND) 2368 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2379 if (old_rule) 2369 old_rule);
2380 list_add_rcu(&rule->list, &old_rule->list); 2370 if (trans == NULL) {
2381 else 2371 err = -ENOMEM;
2382 list_add_tail_rcu(&rule->list, &chain->rules); 2372 goto err2;
2383 else { 2373 }
2384 if (old_rule) 2374 nft_deactivate_next(net, old_rule);
2385 list_add_tail_rcu(&rule->list, &old_rule->list); 2375 chain->use--;
2386 else
2387 list_add_rcu(&rule->list, &chain->rules);
2388 }
2389 2376
2390 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { 2377 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2391 err = -ENOMEM; 2378 err = -ENOMEM;
2392 goto err3; 2379 goto err2;
2380 }
2381
2382 list_add_tail_rcu(&rule->list, &old_rule->list);
2383 } else {
2384 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
2385 err = -ENOMEM;
2386 goto err2;
2387 }
2388
2389 if (nlh->nlmsg_flags & NLM_F_APPEND) {
2390 if (old_rule)
2391 list_add_rcu(&rule->list, &old_rule->list);
2392 else
2393 list_add_tail_rcu(&rule->list, &chain->rules);
2394 } else {
2395 if (old_rule)
2396 list_add_tail_rcu(&rule->list, &old_rule->list);
2397 else
2398 list_add_rcu(&rule->list, &chain->rules);
2399 }
2393 } 2400 }
2394 chain->use++; 2401 chain->use++;
2395 return 0; 2402 return 0;
2396 2403
2397err3:
2398 list_del_rcu(&rule->list);
2399err2: 2404err2:
2400 nf_tables_rule_destroy(&ctx, rule); 2405 nf_tables_rule_destroy(&ctx, rule);
2401err1: 2406err1:
@@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3207 3212
3208 err = ops->init(set, &desc, nla); 3213 err = ops->init(set, &desc, nla);
3209 if (err < 0) 3214 if (err < 0)
3210 goto err2; 3215 goto err3;
3211 3216
3212 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); 3217 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
3213 if (err < 0) 3218 if (err < 0)
3214 goto err3; 3219 goto err4;
3215 3220
3216 list_add_tail_rcu(&set->list, &table->sets); 3221 list_add_tail_rcu(&set->list, &table->sets);
3217 table->use++; 3222 table->use++;
3218 return 0; 3223 return 0;
3219 3224
3220err3: 3225err4:
3221 ops->destroy(set); 3226 ops->destroy(set);
3227err3:
3228 kfree(set->name);
3222err2: 3229err2:
3223 kvfree(set); 3230 kvfree(set);
3224err1: 3231err1:
@@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
5738 struct nft_base_chain *basechain; 5745 struct nft_base_chain *basechain;
5739 5746
5740 if (nft_trans_chain_name(trans)) 5747 if (nft_trans_chain_name(trans))
5741 strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); 5748 swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
5742 5749
5743 if (!nft_is_base_chain(trans->ctx.chain)) 5750 if (!nft_is_base_chain(trans->ctx.chain))
5744 return; 5751 return;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 773da82190dc..94df000abb92 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
36MODULE_ALIAS("ip6t_connmark"); 36MODULE_ALIAS("ip6t_connmark");
37 37
38static unsigned int 38static unsigned int
39connmark_tg_shift(struct sk_buff *skb, 39connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
40 const struct xt_connmark_tginfo1 *info,
41 u8 shift_bits, u8 shift_dir)
42{ 40{
43 enum ip_conntrack_info ctinfo; 41 enum ip_conntrack_info ctinfo;
42 u_int32_t new_targetmark;
44 struct nf_conn *ct; 43 struct nf_conn *ct;
45 u_int32_t newmark; 44 u_int32_t newmark;
46 45
@@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
51 switch (info->mode) { 50 switch (info->mode) {
52 case XT_CONNMARK_SET: 51 case XT_CONNMARK_SET:
53 newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; 52 newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
54 if (shift_dir == D_SHIFT_RIGHT) 53 if (info->shift_dir == D_SHIFT_RIGHT)
55 newmark >>= shift_bits; 54 newmark >>= info->shift_bits;
56 else 55 else
57 newmark <<= shift_bits; 56 newmark <<= info->shift_bits;
57
58 if (ct->mark != newmark) { 58 if (ct->mark != newmark) {
59 ct->mark = newmark; 59 ct->mark = newmark;
60 nf_conntrack_event_cache(IPCT_MARK, ct); 60 nf_conntrack_event_cache(IPCT_MARK, ct);
61 } 61 }
62 break; 62 break;
63 case XT_CONNMARK_SAVE: 63 case XT_CONNMARK_SAVE:
64 newmark = (ct->mark & ~info->ctmask) ^ 64 new_targetmark = (skb->mark & info->nfmask);
65 (skb->mark & info->nfmask); 65 if (info->shift_dir == D_SHIFT_RIGHT)
66 if (shift_dir == D_SHIFT_RIGHT) 66 new_targetmark >>= info->shift_bits;
67 newmark >>= shift_bits;
68 else 67 else
69 newmark <<= shift_bits; 68 new_targetmark <<= info->shift_bits;
69
70 newmark = (ct->mark & ~info->ctmask) ^
71 new_targetmark;
70 if (ct->mark != newmark) { 72 if (ct->mark != newmark) {
71 ct->mark = newmark; 73 ct->mark = newmark;
72 nf_conntrack_event_cache(IPCT_MARK, ct); 74 nf_conntrack_event_cache(IPCT_MARK, ct);
73 } 75 }
74 break; 76 break;
75 case XT_CONNMARK_RESTORE: 77 case XT_CONNMARK_RESTORE:
76 newmark = (skb->mark & ~info->nfmask) ^ 78 new_targetmark = (ct->mark & info->ctmask);
77 (ct->mark & info->ctmask); 79 if (info->shift_dir == D_SHIFT_RIGHT)
78 if (shift_dir == D_SHIFT_RIGHT) 80 new_targetmark >>= info->shift_bits;
79 newmark >>= shift_bits;
80 else 81 else
81 newmark <<= shift_bits; 82 new_targetmark <<= info->shift_bits;
83
84 newmark = (skb->mark & ~info->nfmask) ^
85 new_targetmark;
82 skb->mark = newmark; 86 skb->mark = newmark;
83 break; 87 break;
84 } 88 }
@@ -89,8 +93,14 @@ static unsigned int
89connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) 93connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
90{ 94{
91 const struct xt_connmark_tginfo1 *info = par->targinfo; 95 const struct xt_connmark_tginfo1 *info = par->targinfo;
92 96 const struct xt_connmark_tginfo2 info2 = {
93 return connmark_tg_shift(skb, info, 0, 0); 97 .ctmark = info->ctmark,
98 .ctmask = info->ctmask,
99 .nfmask = info->nfmask,
100 .mode = info->mode,
101 };
102
103 return connmark_tg_shift(skb, &info2);
94} 104}
95 105
96static unsigned int 106static unsigned int
@@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
98{ 108{
99 const struct xt_connmark_tginfo2 *info = par->targinfo; 109 const struct xt_connmark_tginfo2 *info = par->targinfo;
100 110
101 return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, 111 return connmark_tg_shift(skb, info);
102 info->shift_bits, info->shift_dir);
103} 112}
104 113
105static int connmark_tg_check(const struct xt_tgchk_param *par) 114static int connmark_tg_check(const struct xt_tgchk_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 616cb9c18f88..01f3515cada0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
329 skb_set_queue_mapping(skb, queue_index); 329 skb_set_queue_mapping(skb, queue_index);
330} 330}
331 331
332/* register_prot_hook must be invoked with the po->bind_lock held, 332/* __register_prot_hook must be invoked through register_prot_hook
333 * or from a context in which asynchronous accesses to the packet 333 * or from a context in which asynchronous accesses to the packet
334 * socket is not possible (packet_create()). 334 * socket is not possible (packet_create()).
335 */ 335 */
336static void register_prot_hook(struct sock *sk) 336static void __register_prot_hook(struct sock *sk)
337{ 337{
338 struct packet_sock *po = pkt_sk(sk); 338 struct packet_sock *po = pkt_sk(sk);
339 339
@@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk)
348 } 348 }
349} 349}
350 350
351/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 351static void register_prot_hook(struct sock *sk)
352 * held. If the sync parameter is true, we will temporarily drop 352{
353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
354 __register_prot_hook(sk);
355}
356
357/* If the sync parameter is true, we will temporarily drop
353 * the po->bind_lock and do a synchronize_net to make sure no 358 * the po->bind_lock and do a synchronize_net to make sure no
354 * asynchronous packet processing paths still refer to the elements 359 * asynchronous packet processing paths still refer to the elements
355 * of po->prot_hook. If the sync parameter is false, it is the 360 * of po->prot_hook. If the sync parameter is false, it is the
@@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
359{ 364{
360 struct packet_sock *po = pkt_sk(sk); 365 struct packet_sock *po = pkt_sk(sk);
361 366
367 lockdep_assert_held_once(&po->bind_lock);
368
362 po->running = 0; 369 po->running = 0;
363 370
364 if (po->fanout) 371 if (po->fanout)
@@ -3008,6 +3015,7 @@ static int packet_release(struct socket *sock)
3008 3015
3009 packet_flush_mclist(sk); 3016 packet_flush_mclist(sk);
3010 3017
3018 lock_sock(sk);
3011 if (po->rx_ring.pg_vec) { 3019 if (po->rx_ring.pg_vec) {
3012 memset(&req_u, 0, sizeof(req_u)); 3020 memset(&req_u, 0, sizeof(req_u));
3013 packet_set_ring(sk, &req_u, 1, 0); 3021 packet_set_ring(sk, &req_u, 1, 0);
@@ -3017,6 +3025,7 @@ static int packet_release(struct socket *sock)
3017 memset(&req_u, 0, sizeof(req_u)); 3025 memset(&req_u, 0, sizeof(req_u));
3018 packet_set_ring(sk, &req_u, 1, 1); 3026 packet_set_ring(sk, &req_u, 1, 1);
3019 } 3027 }
3028 release_sock(sk);
3020 3029
3021 f = fanout_release(sk); 3030 f = fanout_release(sk);
3022 3031
@@ -3250,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3250 3259
3251 if (proto) { 3260 if (proto) {
3252 po->prot_hook.type = proto; 3261 po->prot_hook.type = proto;
3253 register_prot_hook(sk); 3262 __register_prot_hook(sk);
3254 } 3263 }
3255 3264
3256 mutex_lock(&net->packet.sklist_lock); 3265 mutex_lock(&net->packet.sklist_lock);
@@ -3643,6 +3652,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3643 union tpacket_req_u req_u; 3652 union tpacket_req_u req_u;
3644 int len; 3653 int len;
3645 3654
3655 lock_sock(sk);
3646 switch (po->tp_version) { 3656 switch (po->tp_version) {
3647 case TPACKET_V1: 3657 case TPACKET_V1:
3648 case TPACKET_V2: 3658 case TPACKET_V2:
@@ -3653,12 +3663,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3653 len = sizeof(req_u.req3); 3663 len = sizeof(req_u.req3);
3654 break; 3664 break;
3655 } 3665 }
3656 if (optlen < len) 3666 if (optlen < len) {
3657 return -EINVAL; 3667 ret = -EINVAL;
3658 if (copy_from_user(&req_u.req, optval, len)) 3668 } else {
3659 return -EFAULT; 3669 if (copy_from_user(&req_u.req, optval, len))
3660 return packet_set_ring(sk, &req_u, 0, 3670 ret = -EFAULT;
3661 optname == PACKET_TX_RING); 3671 else
3672 ret = packet_set_ring(sk, &req_u, 0,
3673 optname == PACKET_TX_RING);
3674 }
3675 release_sock(sk);
3676 return ret;
3662 } 3677 }
3663 case PACKET_COPY_THRESH: 3678 case PACKET_COPY_THRESH:
3664 { 3679 {
@@ -3724,12 +3739,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3724 3739
3725 if (optlen != sizeof(val)) 3740 if (optlen != sizeof(val))
3726 return -EINVAL; 3741 return -EINVAL;
3727 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3728 return -EBUSY;
3729 if (copy_from_user(&val, optval, sizeof(val))) 3742 if (copy_from_user(&val, optval, sizeof(val)))
3730 return -EFAULT; 3743 return -EFAULT;
3731 po->tp_loss = !!val; 3744
3732 return 0; 3745 lock_sock(sk);
3746 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3747 ret = -EBUSY;
3748 } else {
3749 po->tp_loss = !!val;
3750 ret = 0;
3751 }
3752 release_sock(sk);
3753 return ret;
3733 } 3754 }
3734 case PACKET_AUXDATA: 3755 case PACKET_AUXDATA:
3735 { 3756 {
@@ -3740,7 +3761,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3740 if (copy_from_user(&val, optval, sizeof(val))) 3761 if (copy_from_user(&val, optval, sizeof(val)))
3741 return -EFAULT; 3762 return -EFAULT;
3742 3763
3764 lock_sock(sk);
3743 po->auxdata = !!val; 3765 po->auxdata = !!val;
3766 release_sock(sk);
3744 return 0; 3767 return 0;
3745 } 3768 }
3746 case PACKET_ORIGDEV: 3769 case PACKET_ORIGDEV:
@@ -3752,7 +3775,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3752 if (copy_from_user(&val, optval, sizeof(val))) 3775 if (copy_from_user(&val, optval, sizeof(val)))
3753 return -EFAULT; 3776 return -EFAULT;
3754 3777
3778 lock_sock(sk);
3755 po->origdev = !!val; 3779 po->origdev = !!val;
3780 release_sock(sk);
3756 return 0; 3781 return 0;
3757 } 3782 }
3758 case PACKET_VNET_HDR: 3783 case PACKET_VNET_HDR:
@@ -3761,15 +3786,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3761 3786
3762 if (sock->type != SOCK_RAW) 3787 if (sock->type != SOCK_RAW)
3763 return -EINVAL; 3788 return -EINVAL;
3764 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3765 return -EBUSY;
3766 if (optlen < sizeof(val)) 3789 if (optlen < sizeof(val))
3767 return -EINVAL; 3790 return -EINVAL;
3768 if (copy_from_user(&val, optval, sizeof(val))) 3791 if (copy_from_user(&val, optval, sizeof(val)))
3769 return -EFAULT; 3792 return -EFAULT;
3770 3793
3771 po->has_vnet_hdr = !!val; 3794 lock_sock(sk);
3772 return 0; 3795 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3796 ret = -EBUSY;
3797 } else {
3798 po->has_vnet_hdr = !!val;
3799 ret = 0;
3800 }
3801 release_sock(sk);
3802 return ret;
3773 } 3803 }
3774 case PACKET_TIMESTAMP: 3804 case PACKET_TIMESTAMP:
3775 { 3805 {
@@ -3807,11 +3837,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3807 3837
3808 if (optlen != sizeof(val)) 3838 if (optlen != sizeof(val))
3809 return -EINVAL; 3839 return -EINVAL;
3810 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3811 return -EBUSY;
3812 if (copy_from_user(&val, optval, sizeof(val))) 3840 if (copy_from_user(&val, optval, sizeof(val)))
3813 return -EFAULT; 3841 return -EFAULT;
3814 po->tp_tx_has_off = !!val; 3842
3843 lock_sock(sk);
3844 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3845 ret = -EBUSY;
3846 } else {
3847 po->tp_tx_has_off = !!val;
3848 ret = 0;
3849 }
3850 release_sock(sk);
3815 return 0; 3851 return 0;
3816 } 3852 }
3817 case PACKET_QDISC_BYPASS: 3853 case PACKET_QDISC_BYPASS:
@@ -4208,8 +4244,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4208 /* Added to avoid minimal code churn */ 4244 /* Added to avoid minimal code churn */
4209 struct tpacket_req *req = &req_u->req; 4245 struct tpacket_req *req = &req_u->req;
4210 4246
4211 lock_sock(sk);
4212
4213 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4247 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4214 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4248 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4215 4249
@@ -4347,7 +4381,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4347 if (pg_vec) 4381 if (pg_vec)
4348 free_pg_vec(pg_vec, order, req->tp_block_nr); 4382 free_pg_vec(pg_vec, order, req->tp_block_nr);
4349out: 4383out:
4350 release_sock(sk);
4351 return err; 4384 return err;
4352} 4385}
4353 4386
diff --git a/net/packet/internal.h b/net/packet/internal.h
index a1d2b2319ae9..3bb7c5fb3bff 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -112,10 +112,12 @@ struct packet_sock {
112 int copy_thresh; 112 int copy_thresh;
113 spinlock_t bind_lock; 113 spinlock_t bind_lock;
114 struct mutex pg_vec_lock; 114 struct mutex pg_vec_lock;
115 unsigned int running:1, /* prot_hook is attached*/ 115 unsigned int running; /* bind_lock must be held */
116 auxdata:1, 116 unsigned int auxdata:1, /* writer must hold sock lock */
117 origdev:1, 117 origdev:1,
118 has_vnet_hdr:1; 118 has_vnet_hdr:1,
119 tp_loss:1,
120 tp_tx_has_off:1;
119 int pressure; 121 int pressure;
120 int ifindex; /* bound device */ 122 int ifindex; /* bound device */
121 __be16 num; 123 __be16 num;
@@ -125,8 +127,6 @@ struct packet_sock {
125 enum tpacket_versions tp_version; 127 enum tpacket_versions tp_version;
126 unsigned int tp_hdrlen; 128 unsigned int tp_hdrlen;
127 unsigned int tp_reserve; 129 unsigned int tp_reserve;
128 unsigned int tp_loss:1;
129 unsigned int tp_tx_has_off:1;
130 unsigned int tp_tstamp; 130 unsigned int tp_tstamp;
131 struct net_device __rcu *cached_dev; 131 struct net_device __rcu *cached_dev;
132 int (*xmit)(struct sk_buff *skb); 132 int (*xmit)(struct sk_buff *skb);
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index b33e5aeb4c06..2aa07b547b16 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1135,3 +1135,4 @@ module_exit(qrtr_proto_fini);
1135 1135
1136MODULE_DESCRIPTION("Qualcomm IPC-router driver"); 1136MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1137MODULE_LICENSE("GPL v2"); 1137MODULE_LICENSE("GPL v2");
1138MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index a5994cf0512b..8527cfdc446d 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
652 } 652 }
653 } 653 }
654 654
655 return 0; 655 return -ENOENT;
656} 656}
657 657
658static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 658static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
@@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
682 u16 mtype; 682 u16 mtype;
683 u16 dlen; 683 u16 dlen;
684 684
685 curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); 685 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
686 &dlen, NULL);
687 if (!curr_data) {
688 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
689 return TC_ACT_SHOT;
690 }
686 691
687 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { 692 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
688 /* abuse overlimits to count when we receive metadata 693 /* abuse overlimits to count when we receive metadata
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 31083b5035ec..2e3f7b75a8ec 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -556,46 +556,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
556 addr->v6.sin6_scope_id = 0; 556 addr->v6.sin6_scope_id = 0;
557} 557}
558 558
559/* Compare addresses exactly. 559static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
560 * v4-mapped-v6 is also in consideration. 560 const union sctp_addr *addr2)
561 */
562static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
563 const union sctp_addr *addr2)
564{ 561{
565 if (addr1->sa.sa_family != addr2->sa.sa_family) { 562 if (addr1->sa.sa_family != addr2->sa.sa_family) {
566 if (addr1->sa.sa_family == AF_INET && 563 if (addr1->sa.sa_family == AF_INET &&
567 addr2->sa.sa_family == AF_INET6 && 564 addr2->sa.sa_family == AF_INET6 &&
568 ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { 565 ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
569 if (addr2->v6.sin6_port == addr1->v4.sin_port && 566 addr2->v6.sin6_addr.s6_addr32[3] ==
570 addr2->v6.sin6_addr.s6_addr32[3] == 567 addr1->v4.sin_addr.s_addr)
571 addr1->v4.sin_addr.s_addr) 568 return 1;
572 return 1; 569
573 }
574 if (addr2->sa.sa_family == AF_INET && 570 if (addr2->sa.sa_family == AF_INET &&
575 addr1->sa.sa_family == AF_INET6 && 571 addr1->sa.sa_family == AF_INET6 &&
576 ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { 572 ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
577 if (addr1->v6.sin6_port == addr2->v4.sin_port && 573 addr1->v6.sin6_addr.s6_addr32[3] ==
578 addr1->v6.sin6_addr.s6_addr32[3] == 574 addr2->v4.sin_addr.s_addr)
579 addr2->v4.sin_addr.s_addr) 575 return 1;
580 return 1; 576
581 }
582 return 0; 577 return 0;
583 } 578 }
584 if (addr1->v6.sin6_port != addr2->v6.sin6_port) 579
585 return 0;
586 if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) 580 if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
587 return 0; 581 return 0;
582
588 /* If this is a linklocal address, compare the scope_id. */ 583 /* If this is a linklocal address, compare the scope_id. */
589 if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { 584 if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
590 if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && 585 addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
591 (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { 586 addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
592 return 0; 587 return 0;
593 }
594 }
595 588
596 return 1; 589 return 1;
597} 590}
598 591
592/* Compare addresses exactly.
593 * v4-mapped-v6 is also in consideration.
594 */
595static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
596 const union sctp_addr *addr2)
597{
598 return __sctp_v6_cmp_addr(addr1, addr2) &&
599 addr1->v6.sin6_port == addr2->v6.sin6_port;
600}
601
599/* Initialize addr struct to INADDR_ANY. */ 602/* Initialize addr struct to INADDR_ANY. */
600static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) 603static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
601{ 604{
@@ -875,8 +878,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
875 const union sctp_addr *addr2, 878 const union sctp_addr *addr2,
876 struct sctp_sock *opt) 879 struct sctp_sock *opt)
877{ 880{
878 struct sctp_af *af1, *af2;
879 struct sock *sk = sctp_opt2sk(opt); 881 struct sock *sk = sctp_opt2sk(opt);
882 struct sctp_af *af1, *af2;
880 883
881 af1 = sctp_get_af_specific(addr1->sa.sa_family); 884 af1 = sctp_get_af_specific(addr1->sa.sa_family);
882 af2 = sctp_get_af_specific(addr2->sa.sa_family); 885 af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -892,10 +895,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
892 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
893 return 1; 896 return 1;
894 897
895 if (addr1->sa.sa_family != addr2->sa.sa_family) 898 return __sctp_v6_cmp_addr(addr1, addr2);
896 return 0;
897
898 return af1->cmp_addr(addr1, addr2);
899} 899}
900 900
901/* Verify that the provided sockaddr looks bindable. Common verification, 901/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5f8046c62d90..f5d4b69dbabc 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1259,14 +1259,12 @@ static int smc_shutdown(struct socket *sock, int how)
1259 rc = smc_close_shutdown_write(smc); 1259 rc = smc_close_shutdown_write(smc);
1260 break; 1260 break;
1261 case SHUT_RD: 1261 case SHUT_RD:
1262 if (sk->sk_state == SMC_LISTEN) 1262 rc = 0;
1263 rc = smc_close_active(smc); 1263 /* nothing more to do because peer is not involved */
1264 else
1265 rc = 0;
1266 /* nothing more to do because peer is not involved */
1267 break; 1264 break;
1268 } 1265 }
1269 rc1 = kernel_sock_shutdown(smc->clcsock, how); 1266 if (smc->clcsock)
1267 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1270 /* map sock_shutdown_cmd constants to sk_shutdown value range */ 1268 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1271 sk->sk_shutdown |= how + 1; 1269 sk->sk_shutdown |= how + 1;
1272 1270
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index b9283ce5cd85..092bebc70048 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
67 67
68static void strp_start_timer(struct strparser *strp, long timeo) 68static void strp_start_timer(struct strparser *strp, long timeo)
69{ 69{
70 if (timeo) 70 if (timeo && timeo != LONG_MAX)
71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); 71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
72} 72}
73 73
@@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
296 strp_start_timer(strp, timeo); 296 strp_start_timer(strp, timeo);
297 } 297 }
298 298
299 stm->accum_len += cand_len;
299 strp->need_bytes = stm->strp.full_len - 300 strp->need_bytes = stm->strp.full_len -
300 stm->accum_len; 301 stm->accum_len;
301 stm->accum_len += cand_len;
302 stm->early_eaten = cand_len; 302 stm->early_eaten = cand_len;
303 STRP_STATS_ADD(strp->stats.bytes, cand_len); 303 STRP_STATS_ADD(strp->stats.bytes, cand_len);
304 desc->count = 0; /* Stop reading socket */ 304 desc->count = 0; /* Stop reading socket */
@@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
321 /* Hurray, we have a new message! */ 321 /* Hurray, we have a new message! */
322 cancel_delayed_work(&strp->msg_timer_work); 322 cancel_delayed_work(&strp->msg_timer_work);
323 strp->skb_head = NULL; 323 strp->skb_head = NULL;
324 strp->need_bytes = 0;
324 STRP_STATS_INCR(strp->stats.msgs); 325 STRP_STATS_INCR(strp->stats.msgs);
325 326
326 /* Give skb to upper layer */ 327 /* Give skb to upper layer */
@@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp)
410 return; 411 return;
411 412
412 if (strp->need_bytes) { 413 if (strp->need_bytes) {
413 if (strp_peek_len(strp) >= strp->need_bytes) 414 if (strp_peek_len(strp) < strp->need_bytes)
414 strp->need_bytes = 0;
415 else
416 return; 415 return;
417 } 416 }
418 417
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0f08934b2cea..c81ef5e6c981 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
1375 struct dentry *clnt_dir = pipe_dentry->d_parent; 1375 struct dentry *clnt_dir = pipe_dentry->d_parent;
1376 struct dentry *gssd_dir = clnt_dir->d_parent; 1376 struct dentry *gssd_dir = clnt_dir->d_parent;
1377 1377
1378 dget(pipe_dentry);
1378 __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); 1379 __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
1379 __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); 1380 __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
1380 __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); 1381 __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 32dc33a94bc7..5453e564da82 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
777 777
778 ret = tipc_bearer_get_name(net, bearer_name, bearer_id); 778 ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
779 if (ret || !mon) 779 if (ret || !mon)
780 return -EINVAL; 780 return 0;
781 781
782 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 782 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
783 NLM_F_MULTI, TIPC_NL_MON_GET); 783 NLM_F_MULTI, TIPC_NL_MON_GET);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index b1fe20972aa9..dd1c4fa2eb78 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -241,7 +241,8 @@ err:
241static struct publication *tipc_service_remove_publ(struct net *net, 241static struct publication *tipc_service_remove_publ(struct net *net,
242 struct tipc_service *sc, 242 struct tipc_service *sc,
243 u32 lower, u32 upper, 243 u32 lower, u32 upper,
244 u32 node, u32 key) 244 u32 node, u32 key,
245 struct service_range **rng)
245{ 246{
246 struct tipc_subscription *sub, *tmp; 247 struct tipc_subscription *sub, *tmp;
247 struct service_range *sr; 248 struct service_range *sr;
@@ -275,19 +276,15 @@ static struct publication *tipc_service_remove_publ(struct net *net,
275 276
276 list_del(&p->all_publ); 277 list_del(&p->all_publ);
277 list_del(&p->local_publ); 278 list_del(&p->local_publ);
278 279 if (list_empty(&sr->all_publ))
279 /* Remove service range item if this was its last publication */
280 if (list_empty(&sr->all_publ)) {
281 last = true; 280 last = true;
282 rb_erase(&sr->tree_node, &sc->ranges);
283 kfree(sr);
284 }
285 281
286 /* Notify any waiting subscriptions */ 282 /* Notify any waiting subscriptions */
287 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 283 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
288 tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, 284 tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN,
289 p->port, p->node, p->scope, last); 285 p->port, p->node, p->scope, last);
290 } 286 }
287 *rng = sr;
291 return p; 288 return p;
292} 289}
293 290
@@ -379,13 +376,20 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
379 u32 node, u32 key) 376 u32 node, u32 key)
380{ 377{
381 struct tipc_service *sc = tipc_service_find(net, type); 378 struct tipc_service *sc = tipc_service_find(net, type);
379 struct service_range *sr = NULL;
382 struct publication *p = NULL; 380 struct publication *p = NULL;
383 381
384 if (!sc) 382 if (!sc)
385 return NULL; 383 return NULL;
386 384
387 spin_lock_bh(&sc->lock); 385 spin_lock_bh(&sc->lock);
388 p = tipc_service_remove_publ(net, sc, lower, upper, node, key); 386 p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr);
387
388 /* Remove service range item if this was its last publication */
389 if (sr && list_empty(&sr->all_publ)) {
390 rb_erase(&sr->tree_node, &sc->ranges);
391 kfree(sr);
392 }
389 393
390 /* Delete service item if this no more publications and subscriptions */ 394 /* Delete service item if this no more publications and subscriptions */
391 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { 395 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
@@ -665,13 +669,14 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
665/** 669/**
666 * tipc_nametbl_subscribe - add a subscription object to the name table 670 * tipc_nametbl_subscribe - add a subscription object to the name table
667 */ 671 */
668void tipc_nametbl_subscribe(struct tipc_subscription *sub) 672bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
669{ 673{
670 struct name_table *nt = tipc_name_table(sub->net); 674 struct name_table *nt = tipc_name_table(sub->net);
671 struct tipc_net *tn = tipc_net(sub->net); 675 struct tipc_net *tn = tipc_net(sub->net);
672 struct tipc_subscr *s = &sub->evt.s; 676 struct tipc_subscr *s = &sub->evt.s;
673 u32 type = tipc_sub_read(s, seq.type); 677 u32 type = tipc_sub_read(s, seq.type);
674 struct tipc_service *sc; 678 struct tipc_service *sc;
679 bool res = true;
675 680
676 spin_lock_bh(&tn->nametbl_lock); 681 spin_lock_bh(&tn->nametbl_lock);
677 sc = tipc_service_find(sub->net, type); 682 sc = tipc_service_find(sub->net, type);
@@ -685,8 +690,10 @@ void tipc_nametbl_subscribe(struct tipc_subscription *sub)
685 pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, 690 pr_warn("Failed to subscribe for {%u,%u,%u}\n", type,
686 tipc_sub_read(s, seq.lower), 691 tipc_sub_read(s, seq.lower),
687 tipc_sub_read(s, seq.upper)); 692 tipc_sub_read(s, seq.upper));
693 res = false;
688 } 694 }
689 spin_unlock_bh(&tn->nametbl_lock); 695 spin_unlock_bh(&tn->nametbl_lock);
696 return res;
690} 697}
691 698
692/** 699/**
@@ -744,16 +751,17 @@ int tipc_nametbl_init(struct net *net)
744static void tipc_service_delete(struct net *net, struct tipc_service *sc) 751static void tipc_service_delete(struct net *net, struct tipc_service *sc)
745{ 752{
746 struct service_range *sr, *tmpr; 753 struct service_range *sr, *tmpr;
747 struct publication *p, *tmpb; 754 struct publication *p, *tmp;
748 755
749 spin_lock_bh(&sc->lock); 756 spin_lock_bh(&sc->lock);
750 rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { 757 rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) {
751 list_for_each_entry_safe(p, tmpb, 758 list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) {
752 &sr->all_publ, all_publ) {
753 tipc_service_remove_publ(net, sc, p->lower, p->upper, 759 tipc_service_remove_publ(net, sc, p->lower, p->upper,
754 p->node, p->key); 760 p->node, p->key, &sr);
755 kfree_rcu(p, rcu); 761 kfree_rcu(p, rcu);
756 } 762 }
763 rb_erase(&sr->tree_node, &sc->ranges);
764 kfree(sr);
757 } 765 }
758 hlist_del_init_rcu(&sc->service_list); 766 hlist_del_init_rcu(&sc->service_list);
759 spin_unlock_bh(&sc->lock); 767 spin_unlock_bh(&sc->lock);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 4b14fc28d9e2..0febba41da86 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -126,7 +126,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
126struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, 126struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
127 u32 lower, u32 upper, 127 u32 lower, u32 upper,
128 u32 node, u32 key); 128 u32 node, u32 key);
129void tipc_nametbl_subscribe(struct tipc_subscription *s); 129bool tipc_nametbl_subscribe(struct tipc_subscription *s);
130void tipc_nametbl_unsubscribe(struct tipc_subscription *s); 130void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
131int tipc_nametbl_init(struct net *net); 131int tipc_nametbl_init(struct net *net);
132void tipc_nametbl_stop(struct net *net); 132void tipc_nametbl_stop(struct net *net);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 856f9e97ea29..4fbaa0464405 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -252,6 +252,8 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
252 u64 *w0 = (u64 *)&node_id[0]; 252 u64 *w0 = (u64 *)&node_id[0];
253 u64 *w1 = (u64 *)&node_id[8]; 253 u64 *w1 = (u64 *)&node_id[8];
254 254
255 if (!attrs[TIPC_NLA_NET_NODEID_W1])
256 return -EINVAL;
255 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); 257 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
256 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); 258 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
257 tipc_net_init(net, node_id, 0); 259 tipc_net_init(net, node_id, 0);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index b76f13f6fea1..6ff2254088f6 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,7 +79,10 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
79 79
80const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { 80const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
81 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, 81 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
82 [TIPC_NLA_NET_ID] = { .type = NLA_U32 } 82 [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
83 [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
84 [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 },
85 [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 },
83}; 86};
84 87
85const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 88const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c77dd2f3c589..6f98b56dd48e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2232,8 +2232,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2232 struct net *net = sock_net(skb->sk); 2232 struct net *net = sock_net(skb->sk);
2233 u32 prev_bearer = cb->args[0]; 2233 u32 prev_bearer = cb->args[0];
2234 struct tipc_nl_msg msg; 2234 struct tipc_nl_msg msg;
2235 int bearer_id;
2235 int err; 2236 int err;
2236 int i;
2237 2237
2238 if (prev_bearer == MAX_BEARERS) 2238 if (prev_bearer == MAX_BEARERS)
2239 return 0; 2239 return 0;
@@ -2243,16 +2243,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2243 msg.seq = cb->nlh->nlmsg_seq; 2243 msg.seq = cb->nlh->nlmsg_seq;
2244 2244
2245 rtnl_lock(); 2245 rtnl_lock();
2246 for (i = prev_bearer; i < MAX_BEARERS; i++) { 2246 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2247 prev_bearer = i;
2248 err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2247 err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
2249 if (err) 2248 if (err)
2250 goto out; 2249 break;
2251 } 2250 }
2252
2253out:
2254 rtnl_unlock(); 2251 rtnl_unlock();
2255 cb->args[0] = prev_bearer; 2252 cb->args[0] = bearer_id;
2256 2253
2257 return skb->len; 2254 return skb->len;
2258} 2255}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1fd1c8b5ce03..252a52ae0893 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1278,7 +1278,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1278 struct tipc_msg *hdr = &tsk->phdr; 1278 struct tipc_msg *hdr = &tsk->phdr;
1279 struct tipc_name_seq *seq; 1279 struct tipc_name_seq *seq;
1280 struct sk_buff_head pkts; 1280 struct sk_buff_head pkts;
1281 u32 dnode, dport; 1281 u32 dport, dnode = 0;
1282 u32 type, inst; 1282 u32 type, inst;
1283 int mtu, rc; 1283 int mtu, rc;
1284 1284
@@ -1348,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1348 msg_set_destnode(hdr, dnode); 1348 msg_set_destnode(hdr, dnode);
1349 msg_set_destport(hdr, dest->addr.id.ref); 1349 msg_set_destport(hdr, dest->addr.id.ref);
1350 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1350 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1351 } else {
1352 return -EINVAL;
1351 } 1353 }
1352 1354
1353 /* Block or return if destination link is congested */ 1355 /* Block or return if destination link is congested */
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index b7d80bc5f4ab..f340e53da625 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -153,7 +153,10 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net,
153 memcpy(&sub->evt.s, s, sizeof(*s)); 153 memcpy(&sub->evt.s, s, sizeof(*s));
154 spin_lock_init(&sub->lock); 154 spin_lock_init(&sub->lock);
155 kref_init(&sub->kref); 155 kref_init(&sub->kref);
156 tipc_nametbl_subscribe(sub); 156 if (!tipc_nametbl_subscribe(sub)) {
157 kfree(sub);
158 return NULL;
159 }
157 timer_setup(&sub->timer, tipc_sub_timeout, 0); 160 timer_setup(&sub->timer, tipc_sub_timeout, 0);
158 timeout = tipc_sub_read(&sub->evt.s, timeout); 161 timeout = tipc_sub_read(&sub->evt.s, timeout);
159 if (timeout != TIPC_WAIT_FOREVER) 162 if (timeout != TIPC_WAIT_FOREVER)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4dc766b03f00..71e79597f940 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -41,6 +41,8 @@
41#include <net/strparser.h> 41#include <net/strparser.h>
42#include <net/tls.h> 42#include <net/tls.h>
43 43
44#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
44static int tls_do_decryption(struct sock *sk, 46static int tls_do_decryption(struct sock *sk,
45 struct scatterlist *sgin, 47 struct scatterlist *sgin,
46 struct scatterlist *sgout, 48 struct scatterlist *sgout,
@@ -673,7 +675,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
673{ 675{
674 struct tls_context *tls_ctx = tls_get_ctx(sk); 676 struct tls_context *tls_ctx = tls_get_ctx(sk);
675 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 677 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
676 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size]; 678 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
677 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; 679 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
678 struct scatterlist *sgin = &sgin_arr[0]; 680 struct scatterlist *sgin = &sgin_arr[0];
679 struct strp_msg *rxm = strp_msg(skb); 681 struct strp_msg *rxm = strp_msg(skb);
@@ -1094,6 +1096,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1094 goto free_priv; 1096 goto free_priv;
1095 } 1097 }
1096 1098
1099 /* Sanity-check the IV size for stack allocations. */
1100 if (iv_size > MAX_IV_SIZE) {
1101 rc = -EINVAL;
1102 goto free_priv;
1103 }
1104
1097 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 1105 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1098 cctx->tag_size = tag_size; 1106 cctx->tag_size = tag_size;
1099 cctx->overhead_size = cctx->prepend_size + cctx->tag_size; 1107 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index aac9b8f6552e..c1076c19b858 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void)
2018} 2018}
2019EXPORT_SYMBOL_GPL(vsock_core_get_transport); 2019EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2020 2020
2021static void __exit vsock_exit(void)
2022{
2023 /* Do nothing. This function makes this module removable. */
2024}
2025
2021module_init(vsock_init_tables); 2026module_init(vsock_init_tables);
2027module_exit(vsock_exit);
2022 2028
2023MODULE_AUTHOR("VMware, Inc."); 2029MODULE_AUTHOR("VMware, Inc.");
2024MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2030MODULE_DESCRIPTION("VMware Virtual Socket Family");
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index 830c55514f9f..49b13553eaae 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -56,6 +56,21 @@ struct dummy {
56 unsigned long jiffies_expire; 56 unsigned long jiffies_expire;
57}; 57};
58 58
59/*
60 * The constructor makes more sense together with klp_shadow_get_or_alloc().
61 * In this example, it would be safe to assign the pointer also to the shadow
62 * variable returned by klp_shadow_alloc(). But we wanted to show the more
63 * complicated use of the API.
64 */
65static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
66{
67 void **shadow_leak = shadow_data;
68 void *leak = ctor_data;
69
70 *shadow_leak = leak;
71 return 0;
72}
73
59struct dummy *livepatch_fix1_dummy_alloc(void) 74struct dummy *livepatch_fix1_dummy_alloc(void)
60{ 75{
61 struct dummy *d; 76 struct dummy *d;
@@ -74,7 +89,8 @@ struct dummy *livepatch_fix1_dummy_alloc(void)
74 * pointer to handle resource release. 89 * pointer to handle resource release.
75 */ 90 */
76 leak = kzalloc(sizeof(int), GFP_KERNEL); 91 leak = kzalloc(sizeof(int), GFP_KERNEL);
77 klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL); 92 klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
93 shadow_leak_ctor, leak);
78 94
79 pr_info("%s: dummy @ %p, expires @ %lx\n", 95 pr_info("%s: dummy @ %p, expires @ %lx\n",
80 __func__, d, d->jiffies_expire); 96 __func__, d, d->jiffies_expire);
@@ -82,9 +98,19 @@ struct dummy *livepatch_fix1_dummy_alloc(void)
82 return d; 98 return d;
83} 99}
84 100
101static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
102{
103 void *d = obj;
104 void **shadow_leak = shadow_data;
105
106 kfree(*shadow_leak);
107 pr_info("%s: dummy @ %p, prevented leak @ %p\n",
108 __func__, d, *shadow_leak);
109}
110
85void livepatch_fix1_dummy_free(struct dummy *d) 111void livepatch_fix1_dummy_free(struct dummy *d)
86{ 112{
87 void **shadow_leak, *leak; 113 void **shadow_leak;
88 114
89 /* 115 /*
90 * Patch: fetch the saved SV_LEAK shadow variable, detach and 116 * Patch: fetch the saved SV_LEAK shadow variable, detach and
@@ -93,15 +119,10 @@ void livepatch_fix1_dummy_free(struct dummy *d)
93 * was loaded.) 119 * was loaded.)
94 */ 120 */
95 shadow_leak = klp_shadow_get(d, SV_LEAK); 121 shadow_leak = klp_shadow_get(d, SV_LEAK);
96 if (shadow_leak) { 122 if (shadow_leak)
97 leak = *shadow_leak; 123 klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor);
98 klp_shadow_free(d, SV_LEAK); 124 else
99 kfree(leak);
100 pr_info("%s: dummy @ %p, prevented leak @ %p\n",
101 __func__, d, leak);
102 } else {
103 pr_info("%s: dummy @ %p leaked!\n", __func__, d); 125 pr_info("%s: dummy @ %p leaked!\n", __func__, d);
104 }
105 126
106 kfree(d); 127 kfree(d);
107} 128}
@@ -147,7 +168,7 @@ static int livepatch_shadow_fix1_init(void)
147static void livepatch_shadow_fix1_exit(void) 168static void livepatch_shadow_fix1_exit(void)
148{ 169{
149 /* Cleanup any existing SV_LEAK shadow variables */ 170 /* Cleanup any existing SV_LEAK shadow variables */
150 klp_shadow_free_all(SV_LEAK); 171 klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor);
151 172
152 WARN_ON(klp_unregister_patch(&patch)); 173 WARN_ON(klp_unregister_patch(&patch));
153} 174}
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
index ff9948f0ec00..b34c7bf83356 100644
--- a/samples/livepatch/livepatch-shadow-fix2.c
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -53,39 +53,42 @@ struct dummy {
53bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) 53bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
54{ 54{
55 int *shadow_count; 55 int *shadow_count;
56 int count;
57 56
58 /* 57 /*
59 * Patch: handle in-flight dummy structures, if they do not 58 * Patch: handle in-flight dummy structures, if they do not
60 * already have a SV_COUNTER shadow variable, then attach a 59 * already have a SV_COUNTER shadow variable, then attach a
61 * new one. 60 * new one.
62 */ 61 */
63 count = 0;
64 shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, 62 shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER,
65 &count, sizeof(count), 63 sizeof(*shadow_count), GFP_NOWAIT,
66 GFP_NOWAIT); 64 NULL, NULL);
67 if (shadow_count) 65 if (shadow_count)
68 *shadow_count += 1; 66 *shadow_count += 1;
69 67
70 return time_after(jiffies, d->jiffies_expire); 68 return time_after(jiffies, d->jiffies_expire);
71} 69}
72 70
71static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
72{
73 void *d = obj;
74 void **shadow_leak = shadow_data;
75
76 kfree(*shadow_leak);
77 pr_info("%s: dummy @ %p, prevented leak @ %p\n",
78 __func__, d, *shadow_leak);
79}
80
73void livepatch_fix2_dummy_free(struct dummy *d) 81void livepatch_fix2_dummy_free(struct dummy *d)
74{ 82{
75 void **shadow_leak, *leak; 83 void **shadow_leak;
76 int *shadow_count; 84 int *shadow_count;
77 85
78 /* Patch: copy the memory leak patch from the fix1 module. */ 86 /* Patch: copy the memory leak patch from the fix1 module. */
79 shadow_leak = klp_shadow_get(d, SV_LEAK); 87 shadow_leak = klp_shadow_get(d, SV_LEAK);
80 if (shadow_leak) { 88 if (shadow_leak)
81 leak = *shadow_leak; 89 klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor);
82 klp_shadow_free(d, SV_LEAK); 90 else
83 kfree(leak);
84 pr_info("%s: dummy @ %p, prevented leak @ %p\n",
85 __func__, d, leak);
86 } else {
87 pr_info("%s: dummy @ %p leaked!\n", __func__, d); 91 pr_info("%s: dummy @ %p leaked!\n", __func__, d);
88 }
89 92
90 /* 93 /*
91 * Patch: fetch the SV_COUNTER shadow variable and display 94 * Patch: fetch the SV_COUNTER shadow variable and display
@@ -95,7 +98,7 @@ void livepatch_fix2_dummy_free(struct dummy *d)
95 if (shadow_count) { 98 if (shadow_count) {
96 pr_info("%s: dummy @ %p, check counter = %d\n", 99 pr_info("%s: dummy @ %p, check counter = %d\n",
97 __func__, d, *shadow_count); 100 __func__, d, *shadow_count);
98 klp_shadow_free(d, SV_COUNTER); 101 klp_shadow_free(d, SV_COUNTER, NULL);
99 } 102 }
100 103
101 kfree(d); 104 kfree(d);
@@ -142,7 +145,7 @@ static int livepatch_shadow_fix2_init(void)
142static void livepatch_shadow_fix2_exit(void) 145static void livepatch_shadow_fix2_exit(void)
143{ 146{
144 /* Cleanup any existing SV_COUNTER shadow variables */ 147 /* Cleanup any existing SV_COUNTER shadow variables */
145 klp_shadow_free_all(SV_COUNTER); 148 klp_shadow_free_all(SV_COUNTER, NULL);
146 149
147 WARN_ON(klp_unregister_patch(&patch)); 150 WARN_ON(klp_unregister_patch(&patch));
148} 151}
diff --git a/security/commoncap.c b/security/commoncap.c
index 48620c93d697..1ce701fcb3f3 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
449 magic |= VFS_CAP_FLAGS_EFFECTIVE; 449 magic |= VFS_CAP_FLAGS_EFFECTIVE;
450 memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); 450 memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
451 cap->magic_etc = cpu_to_le32(magic); 451 cap->magic_etc = cpu_to_le32(magic);
452 } else {
453 size = -ENOMEM;
452 } 454 }
453 } 455 }
454 kfree(tmpbuf); 456 kfree(tmpbuf);
diff --git a/sound/core/control.c b/sound/core/control.c
index 69734b0eafd0..9aa15bfc7936 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1492,7 +1492,7 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
1492 int op_flag) 1492 int op_flag)
1493{ 1493{
1494 struct snd_ctl_tlv header; 1494 struct snd_ctl_tlv header;
1495 unsigned int *container; 1495 unsigned int __user *container;
1496 unsigned int container_size; 1496 unsigned int container_size;
1497 struct snd_kcontrol *kctl; 1497 struct snd_kcontrol *kctl;
1498 struct snd_ctl_elem_id id; 1498 struct snd_ctl_elem_id id;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index b719d0bd833e..06d7c40af570 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -27,10 +27,11 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
27 s32 __user *src) 27 s32 __user *src)
28{ 28{
29 snd_pcm_sframes_t delay; 29 snd_pcm_sframes_t delay;
30 int err;
30 31
31 delay = snd_pcm_delay(substream); 32 err = snd_pcm_delay(substream, &delay);
32 if (delay < 0) 33 if (err)
33 return delay; 34 return err;
34 if (put_user(delay, src)) 35 if (put_user(delay, src))
35 return -EFAULT; 36 return -EFAULT;
36 return 0; 37 return 0;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 35ffccea94c3..0e875d5a9e86 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2692,7 +2692,8 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2692 return err; 2692 return err;
2693} 2693}
2694 2694
2695static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) 2695static int snd_pcm_delay(struct snd_pcm_substream *substream,
2696 snd_pcm_sframes_t *delay)
2696{ 2697{
2697 struct snd_pcm_runtime *runtime = substream->runtime; 2698 struct snd_pcm_runtime *runtime = substream->runtime;
2698 int err; 2699 int err;
@@ -2708,7 +2709,9 @@ static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
2708 n += runtime->delay; 2709 n += runtime->delay;
2709 } 2710 }
2710 snd_pcm_stream_unlock_irq(substream); 2711 snd_pcm_stream_unlock_irq(substream);
2711 return err < 0 ? err : n; 2712 if (!err)
2713 *delay = n;
2714 return err;
2712} 2715}
2713 2716
2714static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, 2717static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
@@ -2751,6 +2754,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2751 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2754 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2752 sync_ptr.s.status.tstamp = status->tstamp; 2755 sync_ptr.s.status.tstamp = status->tstamp;
2753 sync_ptr.s.status.suspended_state = status->suspended_state; 2756 sync_ptr.s.status.suspended_state = status->suspended_state;
2757 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2754 snd_pcm_stream_unlock_irq(substream); 2758 snd_pcm_stream_unlock_irq(substream);
2755 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2759 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2756 return -EFAULT; 2760 return -EFAULT;
@@ -2916,11 +2920,13 @@ static int snd_pcm_common_ioctl(struct file *file,
2916 return snd_pcm_hwsync(substream); 2920 return snd_pcm_hwsync(substream);
2917 case SNDRV_PCM_IOCTL_DELAY: 2921 case SNDRV_PCM_IOCTL_DELAY:
2918 { 2922 {
2919 snd_pcm_sframes_t delay = snd_pcm_delay(substream); 2923 snd_pcm_sframes_t delay;
2920 snd_pcm_sframes_t __user *res = arg; 2924 snd_pcm_sframes_t __user *res = arg;
2925 int err;
2921 2926
2922 if (delay < 0) 2927 err = snd_pcm_delay(substream, &delay);
2923 return delay; 2928 if (err)
2929 return err;
2924 if (put_user(delay, res)) 2930 if (put_user(delay, res))
2925 return -EFAULT; 2931 return -EFAULT;
2926 return 0; 2932 return 0;
@@ -3008,13 +3014,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3008 case SNDRV_PCM_IOCTL_DROP: 3014 case SNDRV_PCM_IOCTL_DROP:
3009 return snd_pcm_drop(substream); 3015 return snd_pcm_drop(substream);
3010 case SNDRV_PCM_IOCTL_DELAY: 3016 case SNDRV_PCM_IOCTL_DELAY:
3011 { 3017 return snd_pcm_delay(substream, frames);
3012 result = snd_pcm_delay(substream);
3013 if (result < 0)
3014 return result;
3015 *frames = result;
3016 return 0;
3017 }
3018 default: 3018 default:
3019 return -EINVAL; 3019 return -EINVAL;
3020 } 3020 }
@@ -3234,7 +3234,7 @@ static __poll_t snd_pcm_capture_poll(struct file *file, poll_table * wait)
3234/* 3234/*
3235 * mmap status record 3235 * mmap status record
3236 */ 3236 */
3237static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3237static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3238{ 3238{
3239 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3239 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3240 struct snd_pcm_runtime *runtime; 3240 struct snd_pcm_runtime *runtime;
@@ -3270,7 +3270,7 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
3270/* 3270/*
3271 * mmap control record 3271 * mmap control record
3272 */ 3272 */
3273static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3273static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3274{ 3274{
3275 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3275 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3276 struct snd_pcm_runtime *runtime; 3276 struct snd_pcm_runtime *runtime;
@@ -3359,7 +3359,7 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3359/* 3359/*
3360 * fault callback for mmapping a RAM page 3360 * fault callback for mmapping a RAM page
3361 */ 3361 */
3362static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3362static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3363{ 3363{
3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3365 struct snd_pcm_runtime *runtime; 3365 struct snd_pcm_runtime *runtime;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index f69764d7cdd7..e30e30ba6e39 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
36 struct snd_rawmidi_params params; 36 struct snd_rawmidi_params params;
37 unsigned int val; 37 unsigned int val;
38 38
39 if (rfile->output == NULL)
40 return -EINVAL;
41 if (get_user(params.stream, &src->stream) || 39 if (get_user(params.stream, &src->stream) ||
42 get_user(params.buffer_size, &src->buffer_size) || 40 get_user(params.buffer_size, &src->buffer_size) ||
43 get_user(params.avail_min, &src->avail_min) || 41 get_user(params.avail_min, &src->avail_min) ||
@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
46 params.no_active_sensing = val; 44 params.no_active_sensing = val;
47 switch (params.stream) { 45 switch (params.stream) {
48 case SNDRV_RAWMIDI_STREAM_OUTPUT: 46 case SNDRV_RAWMIDI_STREAM_OUTPUT:
47 if (!rfile->output)
48 return -EINVAL;
49 return snd_rawmidi_output_params(rfile->output, &params); 49 return snd_rawmidi_output_params(rfile->output, &params);
50 case SNDRV_RAWMIDI_STREAM_INPUT: 50 case SNDRV_RAWMIDI_STREAM_INPUT:
51 if (!rfile->input)
52 return -EINVAL;
51 return snd_rawmidi_input_params(rfile->input, &params); 53 return snd_rawmidi_input_params(rfile->input, &params);
52 } 54 }
53 return -EINVAL; 55 return -EINVAL;
@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
67 int err; 69 int err;
68 struct snd_rawmidi_status status; 70 struct snd_rawmidi_status status;
69 71
70 if (rfile->output == NULL)
71 return -EINVAL;
72 if (get_user(status.stream, &src->stream)) 72 if (get_user(status.stream, &src->stream))
73 return -EFAULT; 73 return -EFAULT;
74 74
75 switch (status.stream) { 75 switch (status.stream) {
76 case SNDRV_RAWMIDI_STREAM_OUTPUT: 76 case SNDRV_RAWMIDI_STREAM_OUTPUT:
77 if (!rfile->output)
78 return -EINVAL;
77 err = snd_rawmidi_output_status(rfile->output, &status); 79 err = snd_rawmidi_output_status(rfile->output, &status);
78 break; 80 break;
79 case SNDRV_RAWMIDI_STREAM_INPUT: 81 case SNDRV_RAWMIDI_STREAM_INPUT:
82 if (!rfile->input)
83 return -EINVAL;
80 err = snd_rawmidi_input_status(rfile->input, &status); 84 err = snd_rawmidi_input_status(rfile->input, &status);
81 break; 85 break;
82 default: 86 default:
@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
112 int err; 116 int err;
113 struct snd_rawmidi_status status; 117 struct snd_rawmidi_status status;
114 118
115 if (rfile->output == NULL)
116 return -EINVAL;
117 if (get_user(status.stream, &src->stream)) 119 if (get_user(status.stream, &src->stream))
118 return -EFAULT; 120 return -EFAULT;
119 121
120 switch (status.stream) { 122 switch (status.stream) {
121 case SNDRV_RAWMIDI_STREAM_OUTPUT: 123 case SNDRV_RAWMIDI_STREAM_OUTPUT:
124 if (!rfile->output)
125 return -EINVAL;
122 err = snd_rawmidi_output_status(rfile->output, &status); 126 err = snd_rawmidi_output_status(rfile->output, &status);
123 break; 127 break;
124 case SNDRV_RAWMIDI_STREAM_INPUT: 128 case SNDRV_RAWMIDI_STREAM_INPUT:
129 if (!rfile->input)
130 return -EINVAL;
125 err = snd_rawmidi_input_status(rfile->input, &status); 131 err = snd_rawmidi_input_status(rfile->input, &status);
126 break; 132 break;
127 default: 133 default:
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
26#include <sound/seq_oss_legacy.h> 26#include <sound/seq_oss_legacy.h>
27#include "seq_oss_readq.h" 27#include "seq_oss_readq.h"
28#include "seq_oss_writeq.h" 28#include "seq_oss_writeq.h"
29#include <linux/nospec.h>
29 30
30 31
31/* 32/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
287{ 288{
288 struct seq_oss_synthinfo *info; 289 struct seq_oss_synthinfo *info;
289 290
290 if (!snd_seq_oss_synth_is_valid(dp, dev)) 291 info = snd_seq_oss_synth_info(dp, dev);
292 if (!info)
291 return -ENXIO; 293 return -ENXIO;
292 294
293 info = &dp->synths[dev];
294 switch (info->arg.event_passing) { 295 switch (info->arg.event_passing) {
295 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 296 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 297 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 299 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
299 } 300 }
300 301
302 ch = array_index_nospec(ch, info->nr_voices);
301 if (note == 255 && info->ch[ch].note >= 0) { 303 if (note == 255 && info->ch[ch].note >= 0) {
302 /* volume control */ 304 /* volume control */
303 int type; 305 int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
347{ 349{
348 struct seq_oss_synthinfo *info; 350 struct seq_oss_synthinfo *info;
349 351
350 if (!snd_seq_oss_synth_is_valid(dp, dev)) 352 info = snd_seq_oss_synth_info(dp, dev);
353 if (!info)
351 return -ENXIO; 354 return -ENXIO;
352 355
353 info = &dp->synths[dev];
354 switch (info->arg.event_passing) { 356 switch (info->arg.event_passing) {
355 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 357 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 358 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 360 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
359 } 361 }
360 362
363 ch = array_index_nospec(ch, info->nr_voices);
361 if (info->ch[ch].note >= 0) { 364 if (info->ch[ch].note >= 0) {
362 note = info->ch[ch].note; 365 note = info->ch[ch].note;
363 info->ch[ch].vel = 0; 366 info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
381static int 384static int
382set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) 385set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
383{ 386{
384 if (! snd_seq_oss_synth_is_valid(dp, dev)) 387 if (!snd_seq_oss_synth_info(dp, dev))
385 return -ENXIO; 388 return -ENXIO;
386 389
387 ev->type = type; 390 ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
399static int 402static int
400set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) 403set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
401{ 404{
402 if (! snd_seq_oss_synth_is_valid(dp, dev)) 405 if (!snd_seq_oss_synth_info(dp, dev))
403 return -ENXIO; 406 return -ENXIO;
404 407
405 ev->type = type; 408 ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
29#include "../seq_lock.h" 29#include "../seq_lock.h"
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/nospec.h>
32 33
33 34
34/* 35/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
315{ 316{
316 if (dev < 0 || dev >= dp->max_mididev) 317 if (dev < 0 || dev >= dp->max_mididev)
317 return NULL; 318 return NULL;
319 dev = array_index_nospec(dev, dp->max_mididev);
318 return get_mdev(dev); 320 return get_mdev(dev);
319} 321}
320 322
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0ebbfdb1..278ebb993122 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/nospec.h>
29 30
30/* 31/*
31 * constants 32 * constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
339 dp->max_synthdev = 0; 340 dp->max_synthdev = 0;
340} 341}
341 342
342/* 343static struct seq_oss_synthinfo *
343 * check if the specified device is MIDI mapped device 344get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
344 */
345static int
346is_midi_dev(struct seq_oss_devinfo *dp, int dev)
347{ 345{
348 if (dev < 0 || dev >= dp->max_synthdev) 346 if (dev < 0 || dev >= dp->max_synthdev)
349 return 0; 347 return NULL;
350 if (dp->synths[dev].is_midi) 348 dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
351 return 1; 349 return &dp->synths[dev];
352 return 0;
353} 350}
354 351
355/* 352/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
359get_synthdev(struct seq_oss_devinfo *dp, int dev) 356get_synthdev(struct seq_oss_devinfo *dp, int dev)
360{ 357{
361 struct seq_oss_synth *rec; 358 struct seq_oss_synth *rec;
362 if (dev < 0 || dev >= dp->max_synthdev) 359 struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
363 return NULL; 360
364 if (! dp->synths[dev].opened) 361 if (!info)
365 return NULL; 362 return NULL;
366 if (dp->synths[dev].is_midi) 363 if (!info->opened)
367 return &midi_synth_dev;
368 if ((rec = get_sdev(dev)) == NULL)
369 return NULL; 364 return NULL;
365 if (info->is_midi) {
366 rec = &midi_synth_dev;
367 snd_use_lock_use(&rec->use_lock);
368 } else {
369 rec = get_sdev(dev);
370 if (!rec)
371 return NULL;
372 }
370 if (! rec->opened) { 373 if (! rec->opened) {
371 snd_use_lock_free(&rec->use_lock); 374 snd_use_lock_free(&rec->use_lock);
372 return NULL; 375 return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
402 struct seq_oss_synth *rec; 405 struct seq_oss_synth *rec;
403 struct seq_oss_synthinfo *info; 406 struct seq_oss_synthinfo *info;
404 407
405 if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) 408 info = get_synthinfo_nospec(dp, dev);
406 return; 409 if (!info || !info->opened)
407 info = &dp->synths[dev];
408 if (! info->opened)
409 return; 410 return;
410 if (info->sysex) 411 if (info->sysex)
411 info->sysex->len = 0; /* reset sysex */ 412 info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
454 const char __user *buf, int p, int c) 455 const char __user *buf, int p, int c)
455{ 456{
456 struct seq_oss_synth *rec; 457 struct seq_oss_synth *rec;
458 struct seq_oss_synthinfo *info;
457 int rc; 459 int rc;
458 460
459 if (dev < 0 || dev >= dp->max_synthdev) 461 info = get_synthinfo_nospec(dp, dev);
462 if (!info)
460 return -ENXIO; 463 return -ENXIO;
461 464
462 if (is_midi_dev(dp, dev)) 465 if (info->is_midi)
463 return 0; 466 return 0;
464 if ((rec = get_synthdev(dp, dev)) == NULL) 467 if ((rec = get_synthdev(dp, dev)) == NULL)
465 return -ENXIO; 468 return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
467 if (rec->oper.load_patch == NULL) 470 if (rec->oper.load_patch == NULL)
468 rc = -ENXIO; 471 rc = -ENXIO;
469 else 472 else
470 rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); 473 rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
471 snd_use_lock_free(&rec->use_lock); 474 snd_use_lock_free(&rec->use_lock);
472 return rc; 475 return rc;
473} 476}
474 477
475/* 478/*
476 * check if the device is valid synth device 479 * check if the device is valid synth device and return the synth info
477 */ 480 */
478int 481struct seq_oss_synthinfo *
479snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) 482snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
480{ 483{
481 struct seq_oss_synth *rec; 484 struct seq_oss_synth *rec;
485
482 rec = get_synthdev(dp, dev); 486 rec = get_synthdev(dp, dev);
483 if (rec) { 487 if (rec) {
484 snd_use_lock_free(&rec->use_lock); 488 snd_use_lock_free(&rec->use_lock);
485 return 1; 489 return get_synthinfo_nospec(dp, dev);
486 } 490 }
487 return 0; 491 return NULL;
488} 492}
489 493
490 494
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
499 int i, send; 503 int i, send;
500 unsigned char *dest; 504 unsigned char *dest;
501 struct seq_oss_synth_sysex *sysex; 505 struct seq_oss_synth_sysex *sysex;
506 struct seq_oss_synthinfo *info;
502 507
503 if (! snd_seq_oss_synth_is_valid(dp, dev)) 508 info = snd_seq_oss_synth_info(dp, dev);
509 if (!info)
504 return -ENXIO; 510 return -ENXIO;
505 511
506 sysex = dp->synths[dev].sysex; 512 sysex = info->sysex;
507 if (sysex == NULL) { 513 if (sysex == NULL) {
508 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); 514 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
509 if (sysex == NULL) 515 if (sysex == NULL)
510 return -ENOMEM; 516 return -ENOMEM;
511 dp->synths[dev].sysex = sysex; 517 info->sysex = sysex;
512 } 518 }
513 519
514 send = 0; 520 send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
553int 559int
554snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) 560snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
555{ 561{
556 if (! snd_seq_oss_synth_is_valid(dp, dev)) 562 struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
563
564 if (!info)
557 return -EINVAL; 565 return -EINVAL;
558 snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, 566 snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
559 dp->synths[dev].arg.addr.port); 567 info->arg.addr.port);
560 return 0; 568 return 0;
561} 569}
562 570
@@ -568,16 +576,18 @@ int
568snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) 576snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
569{ 577{
570 struct seq_oss_synth *rec; 578 struct seq_oss_synth *rec;
579 struct seq_oss_synthinfo *info;
571 int rc; 580 int rc;
572 581
573 if (is_midi_dev(dp, dev)) 582 info = get_synthinfo_nospec(dp, dev);
583 if (!info || info->is_midi)
574 return -ENXIO; 584 return -ENXIO;
575 if ((rec = get_synthdev(dp, dev)) == NULL) 585 if ((rec = get_synthdev(dp, dev)) == NULL)
576 return -ENXIO; 586 return -ENXIO;
577 if (rec->oper.ioctl == NULL) 587 if (rec->oper.ioctl == NULL)
578 rc = -ENXIO; 588 rc = -ENXIO;
579 else 589 else
580 rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); 590 rc = rec->oper.ioctl(&info->arg, cmd, addr);
581 snd_use_lock_free(&rec->use_lock); 591 snd_use_lock_free(&rec->use_lock);
582 return rc; 592 return rc;
583} 593}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
589int 599int
590snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) 600snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
591{ 601{
592 if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) 602 struct seq_oss_synthinfo *info;
603
604 info = snd_seq_oss_synth_info(dp, dev);
605 if (!info || info->is_midi)
593 return -ENXIO; 606 return -ENXIO;
594 ev->type = SNDRV_SEQ_EVENT_OSS; 607 ev->type = SNDRV_SEQ_EVENT_OSS;
595 memcpy(ev->data.raw8.d, data, 8); 608 memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); 37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, 38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
39 const char __user *buf, int p, int c); 39 const char __user *buf, int p, int c);
40int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); 40struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
41 int dev);
41int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, 42int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
42 struct snd_seq_event *ev); 43 struct snd_seq_event *ev);
43int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev); 44int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/nospec.h>
24#include <sound/opl3.h> 25#include <sound/opl3.h>
25#include <sound/asound_fm.h> 26#include <sound/asound_fm.h>
26 27
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
448{ 449{
449 unsigned short reg_side; 450 unsigned short reg_side;
450 unsigned char op_offset; 451 unsigned char op_offset;
451 unsigned char voice_offset; 452 unsigned char voice_offset, voice_op;
452 453
453 unsigned short opl3_reg; 454 unsigned short opl3_reg;
454 unsigned char reg_val; 455 unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
473 voice_offset = voice->voice - MAX_OPL2_VOICES; 474 voice_offset = voice->voice - MAX_OPL2_VOICES;
474 } 475 }
475 /* Get register offset of operator */ 476 /* Get register offset of operator */
476 op_offset = snd_opl3_regmap[voice_offset][voice->op]; 477 voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
478 voice_op = array_index_nospec(voice->op, 4);
479 op_offset = snd_opl3_regmap[voice_offset][voice_op];
477 480
478 reg_val = 0x00; 481 reg_val = 0x00;
479 /* Set amplitude modulation (tremolo) effect */ 482 /* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 8573289c381e..928a255bfc35 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -435,7 +435,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
435 err = init_stream(dice, AMDTP_IN_STREAM, i); 435 err = init_stream(dice, AMDTP_IN_STREAM, i);
436 if (err < 0) { 436 if (err < 0) {
437 for (; i >= 0; i--) 437 for (; i >= 0; i--)
438 destroy_stream(dice, AMDTP_OUT_STREAM, i); 438 destroy_stream(dice, AMDTP_IN_STREAM, i);
439 goto end; 439 goto end;
440 } 440 }
441 } 441 }
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 4ddb4cdd054b..96bb01b6b751 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2");
14#define OUI_WEISS 0x001c6a 14#define OUI_WEISS 0x001c6a
15#define OUI_LOUD 0x000ff2 15#define OUI_LOUD 0x000ff2
16#define OUI_FOCUSRITE 0x00130e 16#define OUI_FOCUSRITE 0x00130e
17#define OUI_TCELECTRONIC 0x001486 17#define OUI_TCELECTRONIC 0x000166
18 18
19#define DICE_CATEGORY_ID 0x04 19#define DICE_CATEGORY_ID 0x04
20#define WEISS_CATEGORY_ID 0x00 20#define WEISS_CATEGORY_ID 0x00
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
23 23
24#include "hpi_internal.h" 24#include "hpi_internal.h"
25#include "hpimsginit.h" 25#include "hpimsginit.h"
26#include <linux/nospec.h>
26 27
27/* The actual message size for each object type */ 28/* The actual message size for each object type */
28static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; 29static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
39{ 40{
40 u16 size; 41 u16 size;
41 42
42 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 43 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
44 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
43 size = msg_size[object]; 45 size = msg_size[object];
44 else 46 } else {
45 size = sizeof(*phm); 47 size = sizeof(*phm);
48 }
46 49
47 memset(phm, 0, size); 50 memset(phm, 0, size);
48 phm->size = size; 51 phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
66{ 69{
67 u16 size; 70 u16 size;
68 71
69 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 72 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
73 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
70 size = res_size[object]; 74 size = res_size[object];
71 else 75 } else {
72 size = sizeof(*phr); 76 size = sizeof(*phr);
77 }
73 78
74 memset(phr, 0, sizeof(*phr)); 79 memset(phr, 0, sizeof(*phr));
75 phr->size = size; 80 phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 5badd08e1d69..b1a2a7ea4172 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
33#include <linux/stringify.h> 33#include <linux/stringify.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/nospec.h>
36 37
37#ifdef MODULE_FIRMWARE 38#ifdef MODULE_FIRMWARE
38MODULE_FIRMWARE("asihpi/dsp5000.bin"); 39MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -186,7 +187,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
186 struct hpi_adapter *pa = NULL; 187 struct hpi_adapter *pa = NULL;
187 188
188 if (hm->h.adapter_index < ARRAY_SIZE(adapters)) 189 if (hm->h.adapter_index < ARRAY_SIZE(adapters))
189 pa = &adapters[hm->h.adapter_index]; 190 pa = &adapters[array_index_nospec(hm->h.adapter_index,
191 ARRAY_SIZE(adapters))];
190 192
191 if (!pa || !pa->adapter || !pa->adapter->type) { 193 if (!pa || !pa->adapter || !pa->adapter->type) {
192 hpi_init_response(&hr->r0, hm->h.object, 194 hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/nospec.h>
24#include <sound/core.h> 25#include <sound/core.h>
25#include "hda_codec.h" 26#include "hda_codec.h"
26#include "hda_local.h" 27#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
51 52
52 if (get_user(verb, &arg->verb)) 53 if (get_user(verb, &arg->verb))
53 return -EFAULT; 54 return -EFAULT;
54 res = get_wcaps(codec, verb >> 24); 55 /* open-code get_wcaps(verb>>24) with nospec */
56 verb >>= 24;
57 if (verb < codec->core.start_nid ||
58 verb >= codec->core.start_nid + codec->core.num_nodes) {
59 res = 0;
60 } else {
61 verb -= codec->core.start_nid;
62 verb = array_index_nospec(verb, codec->core.num_nodes);
63 res = codec->wcaps[verb];
64 }
55 if (put_user(res, &arg->res)) 65 if (put_user(res, &arg->res))
56 return -EFAULT; 66 return -EFAULT;
57 return 0; 67 return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7a111a1b5836..b0c8c79848a9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1647,7 +1647,8 @@ static void azx_check_snoop_available(struct azx *chip)
1647 */ 1647 */
1648 u8 val; 1648 u8 val;
1649 pci_read_config_byte(chip->pci, 0x42, &val); 1649 pci_read_config_byte(chip->pci, 0x42, &val);
1650 if (!(val & 0x80) && chip->pci->revision == 0x30) 1650 if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
1651 chip->pci->revision == 0x20))
1651 snoop = false; 1652 snoop = false;
1652 } 1653 }
1653 1654
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b4f1b6e88305..7d7eb1354eee 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1383,6 +1383,8 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx); 1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx);
1384 else 1384 else
1385 return; 1385 return;
1386 if (!pcm->pcm)
1387 return;
1386 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use)) 1388 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use))
1387 return; 1389 return;
1388 1390
@@ -2151,8 +2153,13 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
2151 int dev, err; 2153 int dev, err;
2152 int pin_idx, pcm_idx; 2154 int pin_idx, pcm_idx;
2153 2155
2154
2155 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) { 2156 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
2157 if (!get_pcm_rec(spec, pcm_idx)->pcm) {
2158 /* no PCM: mark this for skipping permanently */
2159 set_bit(pcm_idx, &spec->pcm_bitmap);
2160 continue;
2161 }
2162
2156 err = generic_hdmi_build_jack(codec, pcm_idx); 2163 err = generic_hdmi_build_jack(codec, pcm_idx);
2157 if (err < 0) 2164 if (err < 0)
2158 return err; 2165 return err;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index aef1f52db7d9..8c238e51bb5a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -331,6 +331,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
331 /* fallthrough */ 331 /* fallthrough */
332 case 0x10ec0215: 332 case 0x10ec0215:
333 case 0x10ec0233: 333 case 0x10ec0233:
334 case 0x10ec0235:
334 case 0x10ec0236: 335 case 0x10ec0236:
335 case 0x10ec0255: 336 case 0x10ec0255:
336 case 0x10ec0256: 337 case 0x10ec0256:
@@ -6370,6 +6371,8 @@ static const struct hda_fixup alc269_fixups[] = {
6370 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ 6371 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6371 { } 6372 { }
6372 }, 6373 },
6374 .chained = true,
6375 .chain_id = ALC269_FIXUP_HEADSET_MIC
6373 }, 6376 },
6374}; 6377};
6375 6378
@@ -6573,6 +6576,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6573 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6576 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6574 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6577 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6575 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6578 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6579 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6580 SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6576 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6581 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6577 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6582 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6578 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 6583 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -7157,8 +7162,11 @@ static int patch_alc269(struct hda_codec *codec)
7157 case 0x10ec0298: 7162 case 0x10ec0298:
7158 spec->codec_variant = ALC269_TYPE_ALC298; 7163 spec->codec_variant = ALC269_TYPE_ALC298;
7159 break; 7164 break;
7165 case 0x10ec0235:
7160 case 0x10ec0255: 7166 case 0x10ec0255:
7161 spec->codec_variant = ALC269_TYPE_ALC255; 7167 spec->codec_variant = ALC269_TYPE_ALC255;
7168 spec->shutup = alc256_shutup;
7169 spec->init_hook = alc256_init;
7162 break; 7170 break;
7163 case 0x10ec0236: 7171 case 0x10ec0236:
7164 case 0x10ec0256: 7172 case 0x10ec0256:
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 4c59983158e0..11b5b5e0e058 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
137#include <linux/pci.h> 137#include <linux/pci.h>
138#include <linux/math64.h> 138#include <linux/math64.h>
139#include <linux/io.h> 139#include <linux/io.h>
140#include <linux/nospec.h>
140 141
141#include <sound/core.h> 142#include <sound/core.h>
142#include <sound/control.h> 143#include <sound/control.h>
@@ -5698,40 +5699,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
5698 struct snd_pcm_channel_info *info) 5699 struct snd_pcm_channel_info *info)
5699{ 5700{
5700 struct hdspm *hdspm = snd_pcm_substream_chip(substream); 5701 struct hdspm *hdspm = snd_pcm_substream_chip(substream);
5702 unsigned int channel = info->channel;
5701 5703
5702 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 5704 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5703 if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { 5705 if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
5704 dev_info(hdspm->card->dev, 5706 dev_info(hdspm->card->dev,
5705 "snd_hdspm_channel_info: output channel out of range (%d)\n", 5707 "snd_hdspm_channel_info: output channel out of range (%d)\n",
5706 info->channel); 5708 channel);
5707 return -EINVAL; 5709 return -EINVAL;
5708 } 5710 }
5709 5711
5710 if (hdspm->channel_map_out[info->channel] < 0) { 5712 channel = array_index_nospec(channel, hdspm->max_channels_out);
5713 if (hdspm->channel_map_out[channel] < 0) {
5711 dev_info(hdspm->card->dev, 5714 dev_info(hdspm->card->dev,
5712 "snd_hdspm_channel_info: output channel %d mapped out\n", 5715 "snd_hdspm_channel_info: output channel %d mapped out\n",
5713 info->channel); 5716 channel);
5714 return -EINVAL; 5717 return -EINVAL;
5715 } 5718 }
5716 5719
5717 info->offset = hdspm->channel_map_out[info->channel] * 5720 info->offset = hdspm->channel_map_out[channel] *
5718 HDSPM_CHANNEL_BUFFER_BYTES; 5721 HDSPM_CHANNEL_BUFFER_BYTES;
5719 } else { 5722 } else {
5720 if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { 5723 if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
5721 dev_info(hdspm->card->dev, 5724 dev_info(hdspm->card->dev,
5722 "snd_hdspm_channel_info: input channel out of range (%d)\n", 5725 "snd_hdspm_channel_info: input channel out of range (%d)\n",
5723 info->channel); 5726 channel);
5724 return -EINVAL; 5727 return -EINVAL;
5725 } 5728 }
5726 5729
5727 if (hdspm->channel_map_in[info->channel] < 0) { 5730 channel = array_index_nospec(channel, hdspm->max_channels_in);
5731 if (hdspm->channel_map_in[channel] < 0) {
5728 dev_info(hdspm->card->dev, 5732 dev_info(hdspm->card->dev,
5729 "snd_hdspm_channel_info: input channel %d mapped out\n", 5733 "snd_hdspm_channel_info: input channel %d mapped out\n",
5730 info->channel); 5734 channel);
5731 return -EINVAL; 5735 return -EINVAL;
5732 } 5736 }
5733 5737
5734 info->offset = hdspm->channel_map_in[info->channel] * 5738 info->offset = hdspm->channel_map_in[channel] *
5735 HDSPM_CHANNEL_BUFFER_BYTES; 5739 HDSPM_CHANNEL_BUFFER_BYTES;
5736 } 5740 }
5737 5741
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index df648b1d9217..edd765e22377 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/nospec.h>
29 30
30#include <sound/core.h> 31#include <sound/core.h>
31#include <sound/control.h> 32#include <sound/control.h>
@@ -2071,9 +2072,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
2071 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) 2072 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
2072 return -EINVAL; 2073 return -EINVAL;
2073 2074
2074 if ((chn = rme9652->channel_map[info->channel]) < 0) { 2075 chn = rme9652->channel_map[array_index_nospec(info->channel,
2076 RME9652_NCHANNELS)];
2077 if (chn < 0)
2075 return -EINVAL; 2078 return -EINVAL;
2076 }
2077 2079
2078 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; 2080 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
2079 info->first = 0; 2081 info->first = 0;
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index b205c782e494..f41560ecbcd1 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -43,7 +43,7 @@
43#define DUAL_CHANNEL 2 43#define DUAL_CHANNEL 2
44 44
45static struct snd_soc_jack cz_jack; 45static struct snd_soc_jack cz_jack;
46struct clk *da7219_dai_clk; 46static struct clk *da7219_dai_clk;
47 47
48static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) 48static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
49{ 49{
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 80c2a06285bb..12bf24c26818 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -502,7 +502,7 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
502 } 502 }
503 503
504 if (adau->sigmadsp) { 504 if (adau->sigmadsp) {
505 ret = adau17x1_setup_firmware(adau, params_rate(params)); 505 ret = adau17x1_setup_firmware(component, params_rate(params));
506 if (ret < 0) 506 if (ret < 0)
507 return ret; 507 return ret;
508 } 508 }
@@ -835,26 +835,40 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg)
835} 835}
836EXPORT_SYMBOL_GPL(adau17x1_volatile_register); 836EXPORT_SYMBOL_GPL(adau17x1_volatile_register);
837 837
838int adau17x1_setup_firmware(struct adau *adau, unsigned int rate) 838int adau17x1_setup_firmware(struct snd_soc_component *component,
839 unsigned int rate)
839{ 840{
840 int ret; 841 int ret;
841 int dspsr; 842 int dspsr, dsp_run;
843 struct adau *adau = snd_soc_component_get_drvdata(component);
844 struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
845
846 snd_soc_dapm_mutex_lock(dapm);
842 847
843 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); 848 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr);
844 if (ret) 849 if (ret)
845 return ret; 850 goto err;
851
852 ret = regmap_read(adau->regmap, ADAU17X1_DSP_RUN, &dsp_run);
853 if (ret)
854 goto err;
846 855
847 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); 856 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1);
848 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); 857 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf);
858 regmap_write(adau->regmap, ADAU17X1_DSP_RUN, 0);
849 859
850 ret = sigmadsp_setup(adau->sigmadsp, rate); 860 ret = sigmadsp_setup(adau->sigmadsp, rate);
851 if (ret) { 861 if (ret) {
852 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); 862 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0);
853 return ret; 863 goto err;
854 } 864 }
855 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); 865 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr);
866 regmap_write(adau->regmap, ADAU17X1_DSP_RUN, dsp_run);
856 867
857 return 0; 868err:
869 snd_soc_dapm_mutex_unlock(dapm);
870
871 return ret;
858} 872}
859EXPORT_SYMBOL_GPL(adau17x1_setup_firmware); 873EXPORT_SYMBOL_GPL(adau17x1_setup_firmware);
860 874
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index a7b1cb770814..e6fe87beec07 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -68,7 +68,8 @@ int adau17x1_resume(struct snd_soc_component *component);
68 68
69extern const struct snd_soc_dai_ops adau17x1_dai_ops; 69extern const struct snd_soc_dai_ops adau17x1_dai_ops;
70 70
71int adau17x1_setup_firmware(struct adau *adau, unsigned int rate); 71int adau17x1_setup_firmware(struct snd_soc_component *component,
72 unsigned int rate);
72bool adau17x1_has_dsp(struct adau *adau); 73bool adau17x1_has_dsp(struct adau *adau);
73 74
74#define ADAU17X1_CLOCK_CONTROL 0x4000 75#define ADAU17X1_CLOCK_CONTROL 0x4000
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 12ee83d52405..b7cf7cce95fe 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -1187,7 +1187,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1187 return irq; 1187 return irq;
1188 } 1188 }
1189 1189
1190 ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, 1190 ret = devm_request_threaded_irq(dev, irq, NULL,
1191 pm8916_mbhc_switch_irq_handler,
1191 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 1192 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
1192 IRQF_ONESHOT, 1193 IRQF_ONESHOT,
1193 "mbhc switch irq", priv); 1194 "mbhc switch irq", priv);
@@ -1201,7 +1202,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1201 return irq; 1202 return irq;
1202 } 1203 }
1203 1204
1204 ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, 1205 ret = devm_request_threaded_irq(dev, irq, NULL,
1206 mbhc_btn_press_irq_handler,
1205 IRQF_TRIGGER_RISING | 1207 IRQF_TRIGGER_RISING |
1206 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1208 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1207 "mbhc btn press irq", priv); 1209 "mbhc btn press irq", priv);
@@ -1214,7 +1216,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1214 return irq; 1216 return irq;
1215 } 1217 }
1216 1218
1217 ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, 1219 ret = devm_request_threaded_irq(dev, irq, NULL,
1220 mbhc_btn_release_irq_handler,
1218 IRQF_TRIGGER_RISING | 1221 IRQF_TRIGGER_RISING |
1219 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1222 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1220 "mbhc btn release irq", priv); 1223 "mbhc btn release irq", priv);
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index e8a66b03faab..1570b91bf018 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = {
89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, 89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012},
90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, 90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a},
91 {RT5514_DELAY_BUF_CTRL3, 0x00000000}, 91 {RT5514_DELAY_BUF_CTRL3, 0x00000000},
92 {RT5514_ASRC_IN_CTRL1, 0x00000003},
92 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, 93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
93 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, 94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
94 {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, 95 {RT5514_DOWNFILTER0_CTRL3, 0x10000362},
@@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg)
181 case RT5514_PLL3_CALIB_CTRL5: 182 case RT5514_PLL3_CALIB_CTRL5:
182 case RT5514_DELAY_BUF_CTRL1: 183 case RT5514_DELAY_BUF_CTRL1:
183 case RT5514_DELAY_BUF_CTRL3: 184 case RT5514_DELAY_BUF_CTRL3:
185 case RT5514_ASRC_IN_CTRL1:
184 case RT5514_DOWNFILTER0_CTRL1: 186 case RT5514_DOWNFILTER0_CTRL1:
185 case RT5514_DOWNFILTER0_CTRL2: 187 case RT5514_DOWNFILTER0_CTRL2:
186 case RT5514_DOWNFILTER0_CTRL3: 188 case RT5514_DOWNFILTER0_CTRL3:
@@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev,
238 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: 240 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
239 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: 241 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
240 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: 242 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
243 case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1:
241 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: 244 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
242 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: 245 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
243 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3: 246 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 40a700493f4c..da8fd98c7f51 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -144,6 +144,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
144 144
145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; 145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
146 146
147 /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
148 if (ratio <= 256) {
149 pm = ratio;
150 fp = 1;
151 goto out;
152 }
153
147 /* Set the max fluctuation -- 0.1% of the max devisor */ 154 /* Set the max fluctuation -- 0.1% of the max devisor */
148 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; 155 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
149 156
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0823b08923b5..89df2d9f63d7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -217,6 +217,7 @@ struct fsl_ssi_soc_data {
217 * @dai_fmt: DAI configuration this device is currently used with 217 * @dai_fmt: DAI configuration this device is currently used with
218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX) 218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX)
219 * @i2s_net: I2S and Network mode configurations of SCR register 219 * @i2s_net: I2S and Network mode configurations of SCR register
220 * (this is the initial settings based on the DAI format)
220 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK 221 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK
221 * @use_dma: DMA is used or FIQ with stream filter 222 * @use_dma: DMA is used or FIQ with stream filter
222 * @use_dual_fifo: DMA with support for dual FIFO mode 223 * @use_dual_fifo: DMA with support for dual FIFO mode
@@ -829,16 +830,23 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
829 } 830 }
830 831
831 if (!fsl_ssi_is_ac97(ssi)) { 832 if (!fsl_ssi_is_ac97(ssi)) {
833 /*
834 * Keep the ssi->i2s_net intact while having a local variable
835 * to override settings for special use cases. Otherwise, the
836 * ssi->i2s_net will lose the settings for regular use cases.
837 */
838 u8 i2s_net = ssi->i2s_net;
839
832 /* Normal + Network mode to send 16-bit data in 32-bit frames */ 840 /* Normal + Network mode to send 16-bit data in 32-bit frames */
833 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16) 841 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
834 ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 842 i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
835 843
836 /* Use Normal mode to send mono data at 1st slot of 2 slots */ 844 /* Use Normal mode to send mono data at 1st slot of 2 slots */
837 if (channels == 1) 845 if (channels == 1)
838 ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL; 846 i2s_net = SSI_SCR_I2S_MODE_NORMAL;
839 847
840 regmap_update_bits(regs, REG_SSI_SCR, 848 regmap_update_bits(regs, REG_SSI_SCR,
841 SSI_SCR_I2S_NET_MASK, ssi->i2s_net); 849 SSI_SCR_I2S_NET_MASK, i2s_net);
842 } 850 }
843 851
844 /* In synchronous mode, the SSI uses STCCR for capture */ 852 /* In synchronous mode, the SSI uses STCCR for capture */
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index ceb105cbd461..addac2a8e52a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -72,24 +72,28 @@ config SND_SOC_INTEL_BAYTRAIL
72 for Baytrail Chromebooks but this option is now deprecated and is 72 for Baytrail Chromebooks but this option is now deprecated and is
73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead. 73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
74 74
75config SND_SST_ATOM_HIFI2_PLATFORM
76 tristate
77 select SND_SOC_COMPRESS
78
75config SND_SST_ATOM_HIFI2_PLATFORM_PCI 79config SND_SST_ATOM_HIFI2_PLATFORM_PCI
76 tristate "PCI HiFi2 (Medfield, Merrifield) Platforms" 80 tristate "PCI HiFi2 (Merrifield) Platforms"
77 depends on X86 && PCI 81 depends on X86 && PCI
78 select SND_SST_IPC_PCI 82 select SND_SST_IPC_PCI
79 select SND_SOC_COMPRESS 83 select SND_SST_ATOM_HIFI2_PLATFORM
80 help 84 help
81 If you have a Intel Medfield or Merrifield/Edison platform, then 85 If you have a Intel Merrifield/Edison platform, then
82 enable this option by saying Y or m. Distros will typically not 86 enable this option by saying Y or m. Distros will typically not
83 enable this option: Medfield devices are not available to 87 enable this option: while Merrifield/Edison can run a mainline
84 developers and while Merrifield/Edison can run a mainline kernel with 88 kernel with limited functionality it will require a firmware file
85 limited functionality it will require a firmware file which 89 which is not in the standard firmware tree
86 is not in the standard firmware tree
87 90
88config SND_SST_ATOM_HIFI2_PLATFORM 91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
89 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
93 default ACPI
90 depends on X86 && ACPI 94 depends on X86 && ACPI
91 select SND_SST_IPC_ACPI 95 select SND_SST_IPC_ACPI
92 select SND_SOC_COMPRESS 96 select SND_SST_ATOM_HIFI2_PLATFORM
93 select SND_SOC_ACPI_INTEL_MATCH 97 select SND_SOC_ACPI_INTEL_MATCH
94 select IOSF_MBI 98 select IOSF_MBI
95 help 99 help
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 09db2aec12a3..b2f5d2fa354d 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -281,7 +281,7 @@ static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream,
281static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, 281static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
282 unsigned int freq) 282 unsigned int freq)
283{ 283{
284 struct clk *parent_clk; 284 struct clk *parent_clk, *mux;
285 char *parent_clk_name; 285 char *parent_clk_name;
286 int ret = 0; 286 int ret = 0;
287 287
@@ -329,14 +329,21 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
329 return -ENODEV; 329 return -ENODEV;
330 } 330 }
331 331
332 mux = clk_get_parent(dmic->fclk);
333 if (IS_ERR(mux)) {
334 dev_err(dmic->dev, "can't get fck mux parent\n");
335 clk_put(parent_clk);
336 return -ENODEV;
337 }
338
332 mutex_lock(&dmic->mutex); 339 mutex_lock(&dmic->mutex);
333 if (dmic->active) { 340 if (dmic->active) {
334 /* disable clock while reparenting */ 341 /* disable clock while reparenting */
335 pm_runtime_put_sync(dmic->dev); 342 pm_runtime_put_sync(dmic->dev);
336 ret = clk_set_parent(dmic->fclk, parent_clk); 343 ret = clk_set_parent(mux, parent_clk);
337 pm_runtime_get_sync(dmic->dev); 344 pm_runtime_get_sync(dmic->dev);
338 } else { 345 } else {
339 ret = clk_set_parent(dmic->fclk, parent_clk); 346 ret = clk_set_parent(mux, parent_clk);
340 } 347 }
341 mutex_unlock(&dmic->mutex); 348 mutex_unlock(&dmic->mutex);
342 349
@@ -349,6 +356,7 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
349 dmic->fclk_freq = freq; 356 dmic->fclk_freq = freq;
350 357
351err_busy: 358err_busy:
359 clk_put(mux);
352 clk_put(parent_clk); 360 clk_put(parent_clk);
353 361
354 return ret; 362 return ret;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 6a76688a8ba9..94f081b93258 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1536,7 +1536,7 @@ static int rsnd_remove(struct platform_device *pdev)
1536 return ret; 1536 return ret;
1537} 1537}
1538 1538
1539static int rsnd_suspend(struct device *dev) 1539static int __maybe_unused rsnd_suspend(struct device *dev)
1540{ 1540{
1541 struct rsnd_priv *priv = dev_get_drvdata(dev); 1541 struct rsnd_priv *priv = dev_get_drvdata(dev);
1542 1542
@@ -1545,7 +1545,7 @@ static int rsnd_suspend(struct device *dev)
1545 return 0; 1545 return 0;
1546} 1546}
1547 1547
1548static int rsnd_resume(struct device *dev) 1548static int __maybe_unused rsnd_resume(struct device *dev)
1549{ 1549{
1550 struct rsnd_priv *priv = dev_get_drvdata(dev); 1550 struct rsnd_priv *priv = dev_get_drvdata(dev);
1551 1551
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index fa27d0fca6dc..986b8b2f90fb 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -513,7 +513,7 @@ static void remove_widget(struct snd_soc_component *comp,
513 */ 513 */
514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { 514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
515 /* enumerated widget mixer */ 515 /* enumerated widget mixer */
516 for (i = 0; i < w->num_kcontrols; i++) { 516 for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
517 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 517 struct snd_kcontrol *kcontrol = w->kcontrols[i];
518 struct soc_enum *se = 518 struct soc_enum *se =
519 (struct soc_enum *)kcontrol->private_value; 519 (struct soc_enum *)kcontrol->private_value;
@@ -530,7 +530,7 @@ static void remove_widget(struct snd_soc_component *comp,
530 } 530 }
531 } else { 531 } else {
532 /* volume mixer or bytes controls */ 532 /* volume mixer or bytes controls */
533 for (i = 0; i < w->num_kcontrols; i++) { 533 for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
534 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 534 struct snd_kcontrol *kcontrol = w->kcontrols[i];
535 535
536 if (dobj->widget.kcontrol_type 536 if (dobj->widget.kcontrol_type
@@ -1325,8 +1325,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
1325 ec->hdr.name); 1325 ec->hdr.name);
1326 1326
1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL); 1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL);
1328 if (kc[i].name == NULL) 1328 if (kc[i].name == NULL) {
1329 kfree(se);
1329 goto err_se; 1330 goto err_se;
1331 }
1330 kc[i].private_value = (long)se; 1332 kc[i].private_value = (long)se;
1331 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1333 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
1332 kc[i].access = ec->hdr.access; 1334 kc[i].access = ec->hdr.access;
@@ -1442,8 +1444,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
1442 be->hdr.name, be->hdr.access); 1444 be->hdr.name, be->hdr.access);
1443 1445
1444 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL); 1446 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL);
1445 if (kc[i].name == NULL) 1447 if (kc[i].name == NULL) {
1448 kfree(sbe);
1446 goto err; 1449 goto err;
1450 }
1447 kc[i].private_value = (long)sbe; 1451 kc[i].private_value = (long)sbe;
1448 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1452 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
1449 kc[i].access = be->hdr.access; 1453 kc[i].access = be->hdr.access;
@@ -2576,7 +2580,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
2576 2580
2577 /* match index */ 2581 /* match index */
2578 if (dobj->index != index && 2582 if (dobj->index != index &&
2579 dobj->index != SND_SOC_TPLG_INDEX_ALL) 2583 index != SND_SOC_TPLG_INDEX_ALL)
2580 continue; 2584 continue;
2581 2585
2582 switch (dobj->type) { 2586 switch (dobj->type) {
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index 6d7cde56a355..e2cf55c53ea8 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
125 } 125 }
126 126
127 usb_fill_int_urb(urb, line6->usbdev, 127 usb_fill_int_urb(urb, line6->usbdev,
128 usb_sndbulkpipe(line6->usbdev, 128 usb_sndintpipe(line6->usbdev,
129 line6->properties->ep_ctrl_w), 129 line6->properties->ep_ctrl_w),
130 transfer_buffer, length, midi_sent, line6, 130 transfer_buffer, length, midi_sent, line6,
131 line6->interval); 131 line6->interval);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 301ad61ed426..344d7b069d59 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1776,7 +1776,8 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
1776 build_feature_ctl(state, _ftr, ch_bits, control, 1776 build_feature_ctl(state, _ftr, ch_bits, control,
1777 &iterm, unitid, ch_read_only); 1777 &iterm, unitid, ch_read_only);
1778 if (uac_v2v3_control_is_readable(master_bits, control)) 1778 if (uac_v2v3_control_is_readable(master_bits, control))
1779 build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 1779 build_feature_ctl(state, _ftr, 0, control,
1780 &iterm, unitid,
1780 !uac_v2v3_control_is_writeable(master_bits, 1781 !uac_v2v3_control_is_writeable(master_bits,
1781 control)); 1782 control));
1782 } 1783 }
@@ -1859,7 +1860,7 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
1859 check_input_term(state, d->bTerminalID, &iterm); 1860 check_input_term(state, d->bTerminalID, &iterm);
1860 if (state->mixer->protocol == UAC_VERSION_2) { 1861 if (state->mixer->protocol == UAC_VERSION_2) {
1861 /* Check for jack detection. */ 1862 /* Check for jack detection. */
1862 if (uac_v2v3_control_is_readable(d->bmControls, 1863 if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls),
1863 UAC2_TE_CONNECTOR)) { 1864 UAC2_TE_CONNECTOR)) {
1864 build_connector_control(state, &iterm, true); 1865 build_connector_control(state, &iterm, true);
1865 } 1866 }
@@ -2561,7 +2562,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2561 if (err < 0 && err != -EINVAL) 2562 if (err < 0 && err != -EINVAL)
2562 return err; 2563 return err;
2563 2564
2564 if (uac_v2v3_control_is_readable(desc->bmControls, 2565 if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
2565 UAC2_TE_CONNECTOR)) { 2566 UAC2_TE_CONNECTOR)) {
2566 build_connector_control(&state, &state.oterm, 2567 build_connector_control(&state, &state.oterm,
2567 false); 2568 false);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e7df73..eaa03acd4686 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
353/* 353/*
354 * Dell usb dock with ALC4020 codec had a firmware problem where it got 354 * Dell usb dock with ALC4020 codec had a firmware problem where it got
355 * screwed up when zero volume is passed; just skip it as a workaround 355 * screwed up when zero volume is passed; just skip it as a workaround
356 *
357 * Also the extension unit gives an access error, so skip it as well.
356 */ 358 */
357static const struct usbmix_name_map dell_alc4020_map[] = { 359static const struct usbmix_name_map dell_alc4020_map[] = {
360 { 4, NULL }, /* extension unit */
358 { 16, NULL }, 361 { 16, NULL },
359 { 19, NULL }, 362 { 19, NULL },
360 { 0 } 363 { 0 }
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6a8f5843334e..956be9f7c72a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
349 * TODO: this conversion is not complete, update it 349 * TODO: this conversion is not complete, update it
350 * after adding UAC3 values to asound.h 350 * after adding UAC3 values to asound.h
351 */ 351 */
352 switch (is->bChPurpose) { 352 switch (is->bChRelationship) {
353 case UAC3_CH_MONO: 353 case UAC3_CH_MONO:
354 map = SNDRV_CHMAP_MONO; 354 map = SNDRV_CHMAP_MONO;
355 break; 355 break;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index ebcab5c5465d..8082f7b077f1 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -139,7 +139,7 @@ static void usb_stream_hwdep_vm_open(struct vm_area_struct *area)
139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); 139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
140} 140}
141 141
142static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 142static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf)
143{ 143{
144 unsigned long offset; 144 unsigned long offset;
145 struct page *page; 145 struct page *page;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index d8bd7c99b48c..c1dd9a7b48df 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -31,7 +31,7 @@
31#include "usbusx2y.h" 31#include "usbusx2y.h"
32#include "usX2Yhwdep.h" 32#include "usX2Yhwdep.h"
33 33
34static int snd_us428ctls_vm_fault(struct vm_fault *vmf) 34static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf)
35{ 35{
36 unsigned long offset; 36 unsigned long offset;
37 struct page * page; 37 struct page * page;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 0d050528a4e1..4fd9276b8e50 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -652,7 +652,7 @@ static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area)
652} 652}
653 653
654 654
655static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 655static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
656{ 656{
657 unsigned long offset; 657 unsigned long offset;
658 void *vaddr; 658 void *vaddr;
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 6edd177bb1c7..2ba95d6fe852 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot {
135#define KVM_REG_ARM_CRM_SHIFT 7 135#define KVM_REG_ARM_CRM_SHIFT 7
136#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 136#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
137#define KVM_REG_ARM_32_CRN_SHIFT 11 137#define KVM_REG_ARM_32_CRN_SHIFT 11
138/*
139 * For KVM currently all guest registers are nonsecure, but we reserve a bit
140 * in the encoding to distinguish secure from nonsecure for AArch32 system
141 * registers that are banked by security. This is 1 for the secure banked
142 * register, and 0 for the nonsecure banked register or if the register is
143 * not banked by security.
144 */
145#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
146#define KVM_REG_ARM_SECURE_SHIFT 28
138 147
139#define ARM_CP15_REG_SHIFT_MASK(x,n) \ 148#define ARM_CP15_REG_SHIFT_MASK(x,n) \
140 (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) 149 (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index fb3a6de7440b..6847d85400a8 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -53,12 +53,6 @@
53# define NEED_MOVBE 0 53# define NEED_MOVBE 0
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_5LEVEL
57# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
58#else
59# define NEED_LA57 0
60#endif
61
62#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
63#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
64/* Paravirtualized systems may not have PSE or PGE available */ 58/* Paravirtualized systems may not have PSE or PGE available */
@@ -104,7 +98,7 @@
104#define REQUIRED_MASK13 0 98#define REQUIRED_MASK13 0
105#define REQUIRED_MASK14 0 99#define REQUIRED_MASK14 0
106#define REQUIRED_MASK15 0 100#define REQUIRED_MASK15 0
107#define REQUIRED_MASK16 (NEED_LA57) 101#define REQUIRED_MASK16 0
108#define REQUIRED_MASK17 0 102#define REQUIRED_MASK17 0
109#define REQUIRED_MASK18 0 103#define REQUIRED_MASK18 0
110#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) 104#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index f3a960488eae..c535c2fdea13 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -354,8 +354,25 @@ struct kvm_xcrs {
354 __u64 padding[16]; 354 __u64 padding[16];
355}; 355};
356 356
357/* definition of registers in kvm_run */ 357#define KVM_SYNC_X86_REGS (1UL << 0)
358#define KVM_SYNC_X86_SREGS (1UL << 1)
359#define KVM_SYNC_X86_EVENTS (1UL << 2)
360
361#define KVM_SYNC_X86_VALID_FIELDS \
362 (KVM_SYNC_X86_REGS| \
363 KVM_SYNC_X86_SREGS| \
364 KVM_SYNC_X86_EVENTS)
365
366/* kvm_sync_regs struct included by kvm_run struct */
358struct kvm_sync_regs { 367struct kvm_sync_regs {
368 /* Members of this structure are potentially malicious.
369 * Care must be taken by code reading, esp. interpreting,
370 * data fields from them inside KVM to prevent TOCTOU and
371 * double-fetch types of vulnerabilities.
372 */
373 struct kvm_regs regs;
374 struct kvm_sregs sregs;
375 struct kvm_vcpu_events events;
359}; 376};
360 377
361#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 378#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 04e32f965ad7..1827c2f973f9 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
151 * required ordering. 151 * required ordering.
152 */ 152 */
153 153
154#define READ_ONCE(x) \ 154#define READ_ONCE(x) \
155 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 155({ \
156 156 union { typeof(x) __val; char __c[1]; } __u = \
157#define WRITE_ONCE(x, val) \ 157 { .__c = { 0 } }; \
158 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 158 __read_once_size(&(x), __u.__c, sizeof(x)); \
159 __u.__val; \
160})
161
162#define WRITE_ONCE(x, val) \
163({ \
164 union { typeof(x) __val; char __c[1]; } __u = \
165 { .__val = (val) }; \
166 __write_once_size(&(x), __u.__c, sizeof(x)); \
167 __u.__val; \
168})
159 169
160 170
161#ifndef __fallthrough 171#ifndef __fallthrough
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index edfeaba95429..a1a959ba24ff 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -1,18 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#ifndef _LINUX_CORESIGHT_PMU_H 7#ifndef _LINUX_CORESIGHT_PMU_H
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index f8b134f5608f..e7ee32861d51 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -27,6 +27,9 @@
27# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ 27# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
28#endif 28#endif
29 29
30/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
31#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
32
30/* 33/*
31 * Flags for mlock 34 * Flags for mlock
32 */ 35 */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9d07465023a2..c5ec89732a8d 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -864,6 +864,7 @@ enum bpf_func_id {
864/* BPF_FUNC_skb_set_tunnel_key flags. */ 864/* BPF_FUNC_skb_set_tunnel_key flags. */
865#define BPF_F_ZERO_CSUM_TX (1ULL << 1) 865#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
866#define BPF_F_DONT_FRAGMENT (1ULL << 2) 866#define BPF_F_DONT_FRAGMENT (1ULL << 2)
867#define BPF_F_SEQ_NUMBER (1ULL << 3)
867 868
868/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 869/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
869 * BPF_FUNC_perf_event_read_value flags. 870 * BPF_FUNC_perf_event_read_value flags.
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 6d9447700e18..68699f654118 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -941,4 +941,43 @@ enum {
941 IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ 941 IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
942}; 942};
943 943
944/* tun section */
945
946enum {
947 IFLA_TUN_UNSPEC,
948 IFLA_TUN_OWNER,
949 IFLA_TUN_GROUP,
950 IFLA_TUN_TYPE,
951 IFLA_TUN_PI,
952 IFLA_TUN_VNET_HDR,
953 IFLA_TUN_PERSIST,
954 IFLA_TUN_MULTI_QUEUE,
955 IFLA_TUN_NUM_QUEUES,
956 IFLA_TUN_NUM_DISABLED_QUEUES,
957 __IFLA_TUN_MAX,
958};
959
960#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
961
962/* rmnet section */
963
964#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0)
965#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
966#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
967#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
968
969enum {
970 IFLA_RMNET_UNSPEC,
971 IFLA_RMNET_MUX_ID,
972 IFLA_RMNET_FLAGS,
973 __IFLA_RMNET_MAX,
974};
975
976#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1)
977
978struct ifla_rmnet_flags {
979 __u32 flags;
980 __u32 mask;
981};
982
944#endif /* _UAPI_LINUX_IF_LINK_H */ 983#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 6b89f87db200..1065006c9bf5 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -396,6 +396,10 @@ struct kvm_run {
396 char padding[256]; 396 char padding[256];
397 }; 397 };
398 398
399 /* 2048 is the size of the char array used to bound/pad the size
400 * of the union that holds sync regs.
401 */
402 #define SYNC_REGS_SIZE_BYTES 2048
399 /* 403 /*
400 * shared registers between kvm and userspace. 404 * shared registers between kvm and userspace.
401 * kvm_valid_regs specifies the register classes set by the host 405 * kvm_valid_regs specifies the register classes set by the host
@@ -407,7 +411,7 @@ struct kvm_run {
407 __u64 kvm_dirty_regs; 411 __u64 kvm_dirty_regs;
408 union { 412 union {
409 struct kvm_sync_regs regs; 413 struct kvm_sync_regs regs;
410 char padding[2048]; 414 char padding[SYNC_REGS_SIZE_BYTES];
411 } s; 415 } s;
412}; 416};
413 417
@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt {
936#define KVM_CAP_PPC_GET_CPU_CHAR 151 940#define KVM_CAP_PPC_GET_CPU_CHAR 151
937#define KVM_CAP_S390_BPB 152 941#define KVM_CAP_S390_BPB 152
938#define KVM_CAP_GET_MSR_FEATURES 153 942#define KVM_CAP_GET_MSR_FEATURES 153
943#define KVM_CAP_HYPERV_EVENTFD 154
939 944
940#ifdef KVM_CAP_IRQ_ROUTING 945#ifdef KVM_CAP_IRQ_ROUTING
941 946
@@ -1375,6 +1380,10 @@ struct kvm_enc_region {
1375#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) 1380#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
1376#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) 1381#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
1377 1382
1383/* Available with KVM_CAP_HYPERV_EVENTFD */
1384#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
1385
1386
1378/* Secure Encrypted Virtualization command */ 1387/* Secure Encrypted Virtualization command */
1379enum sev_cmd_id { 1388enum sev_cmd_id {
1380 /* Guest initialization commands */ 1389 /* Guest initialization commands */
@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry {
1515#define KVM_ARM_DEV_EL1_PTIMER (1 << 1) 1524#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
1516#define KVM_ARM_DEV_PMU (1 << 2) 1525#define KVM_ARM_DEV_PMU (1 << 2)
1517 1526
1527struct kvm_hyperv_eventfd {
1528 __u32 conn_id;
1529 __s32 fd;
1530 __u32 flags;
1531 __u32 padding[3];
1532};
1533
1534#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
1535#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
1536
1518#endif /* __LINUX_KVM_H */ 1537#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 912b85b52344..b8e288a1f740 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
650#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 650#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
651#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 651#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
652/* 652/*
653 * Indicates that the content of PERF_SAMPLE_IP points to 653 * These PERF_RECORD_MISC_* flags below are safely reused
654 * the actual instruction that triggered the event. See also 654 * for the following events:
655 * perf_event_attr::precise_ip. 655 *
656 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
657 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
658 *
659 *
660 * PERF_RECORD_MISC_EXACT_IP:
661 * Indicates that the content of PERF_SAMPLE_IP points to
662 * the actual instruction that triggered the event. See also
663 * perf_event_attr::precise_ip.
664 *
665 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
666 * Indicates that thread was preempted in TASK_RUNNING state.
656 */ 667 */
657#define PERF_RECORD_MISC_EXACT_IP (1 << 14) 668#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
669#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
658/* 670/*
659 * Reserve the last bit to indicate some extended misc field 671 * Reserve the last bit to indicate some extended misc field
660 */ 672 */
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 07d61583fd02..ed0a120d4f08 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t;
242#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ 242#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
243#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ 243#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
244#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE 244#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
245#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8
245 246
246#ifdef SNDRV_LITTLE_ENDIAN 247#ifdef SNDRV_LITTLE_ENDIAN
247#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE 248#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index f6a1babcbac4..cb7154eccbdc 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -433,7 +433,7 @@ match:
433 433
434 if (ambiguous_option) { 434 if (ambiguous_option) {
435 fprintf(stderr, 435 fprintf(stderr,
436 " Error: Ambiguous option: %s (could be --%s%s or --%s%s)", 436 " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n",
437 arg, 437 arg,
438 (ambiguous_flags & OPT_UNSET) ? "no-" : "", 438 (ambiguous_flags & OPT_UNSET) ? "no-" : "",
439 ambiguous_option->long_name, 439 ambiguous_option->long_name,
@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options)
458 return; 458 return;
459 459
460 if (strstarts(arg, "no-")) { 460 if (strstarts(arg, "no-")) {
461 fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); 461 fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
462 exit(129); 462 exit(129);
463 } 463 }
464 464
@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options)
466 if (!options->long_name) 466 if (!options->long_name)
467 continue; 467 continue;
468 if (strstarts(options->long_name, arg)) { 468 if (strstarts(options->long_name, arg)) {
469 fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); 469 fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
470 exit(129); 470 exit(129);
471 } 471 }
472 } 472 }
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 8ae824dbfca3..f76d9914686a 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \
31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ 31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include 32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
33WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed 33WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
34CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) 34CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES)
35LDFLAGS += -lelf $(LIBSUBCMD) 35LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS)
36 36
37# Allow old libelf to be used: 37# Allow old libelf to be used:
38elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) 38elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 5b4fff3adc4b..32f4a898e3f2 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -334,6 +334,11 @@ annotate.*::
334 334
335 99.93 │ mov %eax,%eax 335 99.93 │ mov %eax,%eax
336 336
337 annotate.offset_level::
338 Default is '1', meaning just jump targets will have offsets show right beside
339 the instruction. When set to '2' 'call' instructions will also have its offsets
340 shown, 3 or higher will show offsets for all instructions.
341
337hist.*:: 342hist.*::
338 hist.percentage:: 343 hist.percentage::
339 This option control the way to calculate overhead of filtered entries - 344 This option control the way to calculate overhead of filtered entries -
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index b0211410969b..f8d2167cf3e7 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -28,29 +28,46 @@ OPTIONS
28<command>...:: 28<command>...::
29 Any command you can specify in a shell. 29 Any command you can specify in a shell.
30 30
31-i::
32--input=<file>::
33 Input file name.
34
31-f:: 35-f::
32--force:: 36--force::
33 Don't do ownership validation 37 Don't do ownership validation
34 38
35-t:: 39-t::
36--type=:: 40--type=<type>::
37 Select the memory operation type: load or store (default: load,store) 41 Select the memory operation type: load or store (default: load,store)
38 42
39-D:: 43-D::
40--dump-raw-samples=:: 44--dump-raw-samples::
41 Dump the raw decoded samples on the screen in a format that is easy to parse with 45 Dump the raw decoded samples on the screen in a format that is easy to parse with
42 one sample per line. 46 one sample per line.
43 47
44-x:: 48-x::
45--field-separator:: 49--field-separator=<separator>::
46 Specify the field separator used when dump raw samples (-D option). By default, 50 Specify the field separator used when dump raw samples (-D option). By default,
47 The separator is the space character. 51 The separator is the space character.
48 52
49-C:: 53-C::
50--cpu-list:: 54--cpu=<cpu>::
51 Restrict dump of raw samples to those provided via this option. Note that the same 55 Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a
52 option can be passed in record mode. It will be interpreted the same way as perf 56 comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default
53 record. 57 is to monitor all CPUS.
58-U::
59--hide-unresolved::
60 Only display entries resolved to a symbol.
61
62-p::
63--phys-data::
64 Record/Report sample physical addresses
65
66RECORD OPTIONS
67--------------
68-e::
69--event <event>::
70 Event selector. Use 'perf mem record -e list' to list available events.
54 71
55-K:: 72-K::
56--all-kernel:: 73--all-kernel::
@@ -60,12 +77,15 @@ OPTIONS
60--all-user:: 77--all-user::
61 Configure all used events to run in user space. 78 Configure all used events to run in user space.
62 79
63--ldload:: 80-v::
81--verbose::
82 Be more verbose (show counter open errors, etc)
83
84--ldlat <n>::
64 Specify desired latency for loads event. 85 Specify desired latency for loads event.
65 86
66-p:: 87In addition, for report all perf report options are valid, and for record
67--phys-data:: 88all perf record options.
68 Record/Report sample physical addresses
69 89
70SEE ALSO 90SEE ALSO
71-------- 91--------
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index bb33601a823b..63f938b887dd 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist'
104 kallsyms pathname 104 kallsyms pathname
105 105
106-g:: 106-g::
107--no-call-graph:: 107--call-graph::
108 Do not display call chains if present. 108 Display call chains if present (default on).
109 109
110--max-stack:: 110--max-stack::
111 Maximum number of functions to display in backtrace, default 5. 111 Maximum number of functions to display in backtrace, default 5.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 36ec0257f8d3..afdafe2110a1 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -228,14 +228,15 @@ OPTIONS
228 For sample events it's possible to display misc field with -F +misc option, 228 For sample events it's possible to display misc field with -F +misc option,
229 following letters are displayed for each bit: 229 following letters are displayed for each bit:
230 230
231 PERF_RECORD_MISC_KERNEL K 231 PERF_RECORD_MISC_KERNEL K
232 PERF_RECORD_MISC_USER U 232 PERF_RECORD_MISC_USER U
233 PERF_RECORD_MISC_HYPERVISOR H 233 PERF_RECORD_MISC_HYPERVISOR H
234 PERF_RECORD_MISC_GUEST_KERNEL G 234 PERF_RECORD_MISC_GUEST_KERNEL G
235 PERF_RECORD_MISC_GUEST_USER g 235 PERF_RECORD_MISC_GUEST_USER g
236 PERF_RECORD_MISC_MMAP_DATA* M 236 PERF_RECORD_MISC_MMAP_DATA* M
237 PERF_RECORD_MISC_COMM_EXEC E 237 PERF_RECORD_MISC_COMM_EXEC E
238 PERF_RECORD_MISC_SWITCH_OUT S 238 PERF_RECORD_MISC_SWITCH_OUT S
239 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp
239 240
240 $ perf script -F +misc ... 241 $ perf script -F +misc ...
241 sched-messaging 1414 K 28690.636582: 4590 cycles ... 242 sched-messaging 1414 K 28690.636582: 4590 cycles ...
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index f15b306be183..e6c3b4e555c2 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m
153 153
154-I msecs:: 154-I msecs::
155--interval-print msecs:: 155--interval-print msecs::
156Print count deltas every N milliseconds (minimum: 10ms) 156Print count deltas every N milliseconds (minimum: 1ms)
157The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. 157The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
158 example: 'perf stat -I 1000 -e cycles -a sleep 5' 158 example: 'perf stat -I 1000 -e cycles -a sleep 5'
159 159
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index c7abd83a8e19..ae7dc46e8f8a 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0)
68endif 68endif
69 69
70ifneq ($(NO_SYSCALL_TABLE),1) 70ifneq ($(NO_SYSCALL_TABLE),1)
71 CFLAGS += -DHAVE_SYSCALL_TABLE 71 CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
72endif 72endif
73 73
74# So far there's only x86 and arm libdw unwind support merged in perf. 74# So far there's only x86 and arm libdw unwind support merged in perf.
@@ -847,7 +847,7 @@ ifndef NO_JVMTI
847 ifeq ($(feature-jvmti), 1) 847 ifeq ($(feature-jvmti), 1)
848 $(call detected_var,JDIR) 848 $(call detected_var,JDIR)
849 else 849 else
850 $(warning No openjdk development package found, please install JDK package) 850 $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel)
851 NO_JVMTI := 1 851 NO_JVMTI := 1
852 endif 852 endif
853endif 853endif
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h
new file mode 100644
index 000000000000..90ec4c8cb880
--- /dev/null
+++ b/tools/perf/arch/arm/include/arch-tests.h
@@ -0,0 +1,12 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_TESTS_H
3#define ARCH_TESTS_H
4
5#ifdef HAVE_DWARF_UNWIND_SUPPORT
6struct thread;
7struct perf_sample;
8#endif
9
10extern struct test arch_tests[];
11
12#endif
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index b30eff9bcc83..883c57ff0c08 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,2 +1,4 @@
1libperf-y += regs_load.o 1libperf-y += regs_load.o
2libperf-y += dwarf-unwind.o 2libperf-y += dwarf-unwind.o
3
4libperf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
new file mode 100644
index 000000000000..5b1543c98022
--- /dev/null
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -0,0 +1,16 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <string.h>
3#include "tests/tests.h"
4#include "arch-tests.h"
5
6struct test arch_tests[] = {
7#ifdef HAVE_DWARF_UNWIND_SUPPORT
8 {
9 .desc = "DWARF unwind",
10 .func = test__dwarf_unwind,
11 },
12#endif
13 {
14 .func = NULL,
15 },
16};
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index fa639e3e52ac..1ce6bdbda561 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -1,18 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#include <stdbool.h> 7#include <stdbool.h>
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 5c655ad4621e..2f595cd73da6 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -1,18 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#include <api/fs/fs.h> 7#include <api/fs/fs.h>
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h
index 5256741be549..1a12e64f5127 100644
--- a/tools/perf/arch/arm/util/cs-etm.h
+++ b/tools/perf/arch/arm/util/cs-etm.h
@@ -1,18 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#ifndef INCLUDE__PERF_CS_ETM_H__ 7#ifndef INCLUDE__PERF_CS_ETM_H__
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index ac4dffc807b8..e047571e6080 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -1,18 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#include <string.h> 7#include <string.h>
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index 6cb48e4cffd9..3afe8256eff2 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -87,6 +87,7 @@ struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
87 struct perf_evsel *pos; 87 struct perf_evsel *pos;
88 int diagnose = 0; 88 int diagnose = 0;
89 89
90 *err = 0;
90 if (evlist->nr_entries == 0) 91 if (evlist->nr_entries == 0)
91 return NULL; 92 return NULL;
92 93
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
index a4c30f1c70be..163b92f33998 100644
--- a/tools/perf/arch/s390/util/header.c
+++ b/tools/perf/arch/s390/util/header.c
@@ -146,21 +146,3 @@ char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
146 zfree(&buf); 146 zfree(&buf);
147 return buf; 147 return buf;
148} 148}
149
150/*
151 * Compare the cpuid string returned by get_cpuid() function
152 * with the name generated by the jevents file read from
153 * pmu-events/arch/s390/mapfile.csv.
154 *
155 * Parameter mapcpuid is the cpuid as stored in the
156 * pmu-events/arch/s390/mapfile.csv. This is just the type number.
157 * Parameter cpuid is the cpuid returned by function get_cpuid().
158 */
159int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
160{
161 char *cp = strchr(cpuid, ',');
162
163 if (cp == NULL)
164 return -1;
165 return strncmp(cp + 1, mapcpuid, strlen(mapcpuid));
166}
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index d74eaa7aa927..1a38e78117ce 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
21$(header): $(sys)/syscall_64.tbl $(systbl) 21$(header): $(sys)/syscall_64.tbl $(systbl)
22 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ 22 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
23 (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ 23 (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
24 || echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true 24 || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true
25 $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ 25 $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
26 26
27clean:: 27clean::
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 5bd1ba8c0282..44f5aba78210 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -1,21 +1,43 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2static struct ins x86__instructions[] = { 2static struct ins x86__instructions[] = {
3 { .name = "adc", .ops = &mov_ops, },
4 { .name = "adcb", .ops = &mov_ops, },
5 { .name = "adcl", .ops = &mov_ops, },
3 { .name = "add", .ops = &mov_ops, }, 6 { .name = "add", .ops = &mov_ops, },
4 { .name = "addl", .ops = &mov_ops, }, 7 { .name = "addl", .ops = &mov_ops, },
5 { .name = "addq", .ops = &mov_ops, }, 8 { .name = "addq", .ops = &mov_ops, },
9 { .name = "addsd", .ops = &mov_ops, },
6 { .name = "addw", .ops = &mov_ops, }, 10 { .name = "addw", .ops = &mov_ops, },
7 { .name = "and", .ops = &mov_ops, }, 11 { .name = "and", .ops = &mov_ops, },
12 { .name = "andb", .ops = &mov_ops, },
13 { .name = "andl", .ops = &mov_ops, },
14 { .name = "andpd", .ops = &mov_ops, },
15 { .name = "andps", .ops = &mov_ops, },
16 { .name = "andq", .ops = &mov_ops, },
17 { .name = "andw", .ops = &mov_ops, },
18 { .name = "bsr", .ops = &mov_ops, },
19 { .name = "bt", .ops = &mov_ops, },
20 { .name = "btr", .ops = &mov_ops, },
8 { .name = "bts", .ops = &mov_ops, }, 21 { .name = "bts", .ops = &mov_ops, },
22 { .name = "btsq", .ops = &mov_ops, },
9 { .name = "call", .ops = &call_ops, }, 23 { .name = "call", .ops = &call_ops, },
10 { .name = "callq", .ops = &call_ops, }, 24 { .name = "callq", .ops = &call_ops, },
25 { .name = "cmovbe", .ops = &mov_ops, },
26 { .name = "cmove", .ops = &mov_ops, },
27 { .name = "cmovae", .ops = &mov_ops, },
11 { .name = "cmp", .ops = &mov_ops, }, 28 { .name = "cmp", .ops = &mov_ops, },
12 { .name = "cmpb", .ops = &mov_ops, }, 29 { .name = "cmpb", .ops = &mov_ops, },
13 { .name = "cmpl", .ops = &mov_ops, }, 30 { .name = "cmpl", .ops = &mov_ops, },
14 { .name = "cmpq", .ops = &mov_ops, }, 31 { .name = "cmpq", .ops = &mov_ops, },
15 { .name = "cmpw", .ops = &mov_ops, }, 32 { .name = "cmpw", .ops = &mov_ops, },
16 { .name = "cmpxch", .ops = &mov_ops, }, 33 { .name = "cmpxch", .ops = &mov_ops, },
34 { .name = "cmpxchg", .ops = &mov_ops, },
35 { .name = "cs", .ops = &mov_ops, },
17 { .name = "dec", .ops = &dec_ops, }, 36 { .name = "dec", .ops = &dec_ops, },
18 { .name = "decl", .ops = &dec_ops, }, 37 { .name = "decl", .ops = &dec_ops, },
38 { .name = "divsd", .ops = &mov_ops, },
39 { .name = "divss", .ops = &mov_ops, },
40 { .name = "gs", .ops = &mov_ops, },
19 { .name = "imul", .ops = &mov_ops, }, 41 { .name = "imul", .ops = &mov_ops, },
20 { .name = "inc", .ops = &dec_ops, }, 42 { .name = "inc", .ops = &dec_ops, },
21 { .name = "incl", .ops = &dec_ops, }, 43 { .name = "incl", .ops = &dec_ops, },
@@ -57,25 +79,68 @@ static struct ins x86__instructions[] = {
57 { .name = "lea", .ops = &mov_ops, }, 79 { .name = "lea", .ops = &mov_ops, },
58 { .name = "lock", .ops = &lock_ops, }, 80 { .name = "lock", .ops = &lock_ops, },
59 { .name = "mov", .ops = &mov_ops, }, 81 { .name = "mov", .ops = &mov_ops, },
82 { .name = "movapd", .ops = &mov_ops, },
83 { .name = "movaps", .ops = &mov_ops, },
60 { .name = "movb", .ops = &mov_ops, }, 84 { .name = "movb", .ops = &mov_ops, },
61 { .name = "movdqa", .ops = &mov_ops, }, 85 { .name = "movdqa", .ops = &mov_ops, },
86 { .name = "movdqu", .ops = &mov_ops, },
62 { .name = "movl", .ops = &mov_ops, }, 87 { .name = "movl", .ops = &mov_ops, },
63 { .name = "movq", .ops = &mov_ops, }, 88 { .name = "movq", .ops = &mov_ops, },
89 { .name = "movsd", .ops = &mov_ops, },
64 { .name = "movslq", .ops = &mov_ops, }, 90 { .name = "movslq", .ops = &mov_ops, },
91 { .name = "movss", .ops = &mov_ops, },
92 { .name = "movupd", .ops = &mov_ops, },
93 { .name = "movups", .ops = &mov_ops, },
94 { .name = "movw", .ops = &mov_ops, },
65 { .name = "movzbl", .ops = &mov_ops, }, 95 { .name = "movzbl", .ops = &mov_ops, },
66 { .name = "movzwl", .ops = &mov_ops, }, 96 { .name = "movzwl", .ops = &mov_ops, },
97 { .name = "mulsd", .ops = &mov_ops, },
98 { .name = "mulss", .ops = &mov_ops, },
67 { .name = "nop", .ops = &nop_ops, }, 99 { .name = "nop", .ops = &nop_ops, },
68 { .name = "nopl", .ops = &nop_ops, }, 100 { .name = "nopl", .ops = &nop_ops, },
69 { .name = "nopw", .ops = &nop_ops, }, 101 { .name = "nopw", .ops = &nop_ops, },
70 { .name = "or", .ops = &mov_ops, }, 102 { .name = "or", .ops = &mov_ops, },
103 { .name = "orb", .ops = &mov_ops, },
71 { .name = "orl", .ops = &mov_ops, }, 104 { .name = "orl", .ops = &mov_ops, },
105 { .name = "orps", .ops = &mov_ops, },
106 { .name = "orq", .ops = &mov_ops, },
107 { .name = "pand", .ops = &mov_ops, },
108 { .name = "paddq", .ops = &mov_ops, },
109 { .name = "pcmpeqb", .ops = &mov_ops, },
110 { .name = "por", .ops = &mov_ops, },
111 { .name = "rclb", .ops = &mov_ops, },
112 { .name = "rcll", .ops = &mov_ops, },
113 { .name = "retq", .ops = &ret_ops, },
114 { .name = "sbb", .ops = &mov_ops, },
115 { .name = "sbbl", .ops = &mov_ops, },
116 { .name = "sete", .ops = &mov_ops, },
117 { .name = "sub", .ops = &mov_ops, },
118 { .name = "subl", .ops = &mov_ops, },
119 { .name = "subq", .ops = &mov_ops, },
120 { .name = "subsd", .ops = &mov_ops, },
121 { .name = "subw", .ops = &mov_ops, },
72 { .name = "test", .ops = &mov_ops, }, 122 { .name = "test", .ops = &mov_ops, },
73 { .name = "testb", .ops = &mov_ops, }, 123 { .name = "testb", .ops = &mov_ops, },
74 { .name = "testl", .ops = &mov_ops, }, 124 { .name = "testl", .ops = &mov_ops, },
125 { .name = "ucomisd", .ops = &mov_ops, },
126 { .name = "ucomiss", .ops = &mov_ops, },
127 { .name = "vaddsd", .ops = &mov_ops, },
128 { .name = "vandpd", .ops = &mov_ops, },
129 { .name = "vmovdqa", .ops = &mov_ops, },
130 { .name = "vmovq", .ops = &mov_ops, },
131 { .name = "vmovsd", .ops = &mov_ops, },
132 { .name = "vmulsd", .ops = &mov_ops, },
133 { .name = "vorpd", .ops = &mov_ops, },
134 { .name = "vsubsd", .ops = &mov_ops, },
135 { .name = "vucomisd", .ops = &mov_ops, },
75 { .name = "xadd", .ops = &mov_ops, }, 136 { .name = "xadd", .ops = &mov_ops, },
76 { .name = "xbeginl", .ops = &jump_ops, }, 137 { .name = "xbeginl", .ops = &jump_ops, },
77 { .name = "xbeginq", .ops = &jump_ops, }, 138 { .name = "xbeginq", .ops = &jump_ops, },
78 { .name = "retq", .ops = &ret_ops, }, 139 { .name = "xchg", .ops = &mov_ops, },
140 { .name = "xor", .ops = &mov_ops, },
141 { .name = "xorb", .ops = &mov_ops, },
142 { .name = "xorpd", .ops = &mov_ops, },
143 { .name = "xorps", .ops = &mov_ops, },
79}; 144};
80 145
81static bool x86__ins_is_fused(struct arch *arch, const char *ins1, 146static bool x86__ins_is_fused(struct arch *arch, const char *ins1,
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 5aef183e2f85..4dfe42666d0c 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -4,379 +4,383 @@
4# The format is: 4# The format is:
5# <number> <abi> <name> <entry point> 5# <number> <abi> <name> <entry point>
6# 6#
7# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls
8#
7# The abi is "common", "64" or "x32" for this file. 9# The abi is "common", "64" or "x32" for this file.
8# 10#
90 common read sys_read 110 common read __x64_sys_read
101 common write sys_write 121 common write __x64_sys_write
112 common open sys_open 132 common open __x64_sys_open
123 common close sys_close 143 common close __x64_sys_close
134 common stat sys_newstat 154 common stat __x64_sys_newstat
145 common fstat sys_newfstat 165 common fstat __x64_sys_newfstat
156 common lstat sys_newlstat 176 common lstat __x64_sys_newlstat
167 common poll sys_poll 187 common poll __x64_sys_poll
178 common lseek sys_lseek 198 common lseek __x64_sys_lseek
189 common mmap sys_mmap 209 common mmap __x64_sys_mmap
1910 common mprotect sys_mprotect 2110 common mprotect __x64_sys_mprotect
2011 common munmap sys_munmap 2211 common munmap __x64_sys_munmap
2112 common brk sys_brk 2312 common brk __x64_sys_brk
2213 64 rt_sigaction sys_rt_sigaction 2413 64 rt_sigaction __x64_sys_rt_sigaction
2314 common rt_sigprocmask sys_rt_sigprocmask 2514 common rt_sigprocmask __x64_sys_rt_sigprocmask
2415 64 rt_sigreturn sys_rt_sigreturn/ptregs 2615 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs
2516 64 ioctl sys_ioctl 2716 64 ioctl __x64_sys_ioctl
2617 common pread64 sys_pread64 2817 common pread64 __x64_sys_pread64
2718 common pwrite64 sys_pwrite64 2918 common pwrite64 __x64_sys_pwrite64
2819 64 readv sys_readv 3019 64 readv __x64_sys_readv
2920 64 writev sys_writev 3120 64 writev __x64_sys_writev
3021 common access sys_access 3221 common access __x64_sys_access
3122 common pipe sys_pipe 3322 common pipe __x64_sys_pipe
3223 common select sys_select 3423 common select __x64_sys_select
3324 common sched_yield sys_sched_yield 3524 common sched_yield __x64_sys_sched_yield
3425 common mremap sys_mremap 3625 common mremap __x64_sys_mremap
3526 common msync sys_msync 3726 common msync __x64_sys_msync
3627 common mincore sys_mincore 3827 common mincore __x64_sys_mincore
3728 common madvise sys_madvise 3928 common madvise __x64_sys_madvise
3829 common shmget sys_shmget 4029 common shmget __x64_sys_shmget
3930 common shmat sys_shmat 4130 common shmat __x64_sys_shmat
4031 common shmctl sys_shmctl 4231 common shmctl __x64_sys_shmctl
4132 common dup sys_dup 4332 common dup __x64_sys_dup
4233 common dup2 sys_dup2 4433 common dup2 __x64_sys_dup2
4334 common pause sys_pause 4534 common pause __x64_sys_pause
4435 common nanosleep sys_nanosleep 4635 common nanosleep __x64_sys_nanosleep
4536 common getitimer sys_getitimer 4736 common getitimer __x64_sys_getitimer
4637 common alarm sys_alarm 4837 common alarm __x64_sys_alarm
4738 common setitimer sys_setitimer 4938 common setitimer __x64_sys_setitimer
4839 common getpid sys_getpid 5039 common getpid __x64_sys_getpid
4940 common sendfile sys_sendfile64 5140 common sendfile __x64_sys_sendfile64
5041 common socket sys_socket 5241 common socket __x64_sys_socket
5142 common connect sys_connect 5342 common connect __x64_sys_connect
5243 common accept sys_accept 5443 common accept __x64_sys_accept
5344 common sendto sys_sendto 5544 common sendto __x64_sys_sendto
5445 64 recvfrom sys_recvfrom 5645 64 recvfrom __x64_sys_recvfrom
5546 64 sendmsg sys_sendmsg 5746 64 sendmsg __x64_sys_sendmsg
5647 64 recvmsg sys_recvmsg 5847 64 recvmsg __x64_sys_recvmsg
5748 common shutdown sys_shutdown 5948 common shutdown __x64_sys_shutdown
5849 common bind sys_bind 6049 common bind __x64_sys_bind
5950 common listen sys_listen 6150 common listen __x64_sys_listen
6051 common getsockname sys_getsockname 6251 common getsockname __x64_sys_getsockname
6152 common getpeername sys_getpeername 6352 common getpeername __x64_sys_getpeername
6253 common socketpair sys_socketpair 6453 common socketpair __x64_sys_socketpair
6354 64 setsockopt sys_setsockopt 6554 64 setsockopt __x64_sys_setsockopt
6455 64 getsockopt sys_getsockopt 6655 64 getsockopt __x64_sys_getsockopt
6556 common clone sys_clone/ptregs 6756 common clone __x64_sys_clone/ptregs
6657 common fork sys_fork/ptregs 6857 common fork __x64_sys_fork/ptregs
6758 common vfork sys_vfork/ptregs 6958 common vfork __x64_sys_vfork/ptregs
6859 64 execve sys_execve/ptregs 7059 64 execve __x64_sys_execve/ptregs
6960 common exit sys_exit 7160 common exit __x64_sys_exit
7061 common wait4 sys_wait4 7261 common wait4 __x64_sys_wait4
7162 common kill sys_kill 7362 common kill __x64_sys_kill
7263 common uname sys_newuname 7463 common uname __x64_sys_newuname
7364 common semget sys_semget 7564 common semget __x64_sys_semget
7465 common semop sys_semop 7665 common semop __x64_sys_semop
7566 common semctl sys_semctl 7766 common semctl __x64_sys_semctl
7667 common shmdt sys_shmdt 7867 common shmdt __x64_sys_shmdt
7768 common msgget sys_msgget 7968 common msgget __x64_sys_msgget
7869 common msgsnd sys_msgsnd 8069 common msgsnd __x64_sys_msgsnd
7970 common msgrcv sys_msgrcv 8170 common msgrcv __x64_sys_msgrcv
8071 common msgctl sys_msgctl 8271 common msgctl __x64_sys_msgctl
8172 common fcntl sys_fcntl 8372 common fcntl __x64_sys_fcntl
8273 common flock sys_flock 8473 common flock __x64_sys_flock
8374 common fsync sys_fsync 8574 common fsync __x64_sys_fsync
8475 common fdatasync sys_fdatasync 8675 common fdatasync __x64_sys_fdatasync
8576 common truncate sys_truncate 8776 common truncate __x64_sys_truncate
8677 common ftruncate sys_ftruncate 8877 common ftruncate __x64_sys_ftruncate
8778 common getdents sys_getdents 8978 common getdents __x64_sys_getdents
8879 common getcwd sys_getcwd 9079 common getcwd __x64_sys_getcwd
8980 common chdir sys_chdir 9180 common chdir __x64_sys_chdir
9081 common fchdir sys_fchdir 9281 common fchdir __x64_sys_fchdir
9182 common rename sys_rename 9382 common rename __x64_sys_rename
9283 common mkdir sys_mkdir 9483 common mkdir __x64_sys_mkdir
9384 common rmdir sys_rmdir 9584 common rmdir __x64_sys_rmdir
9485 common creat sys_creat 9685 common creat __x64_sys_creat
9586 common link sys_link 9786 common link __x64_sys_link
9687 common unlink sys_unlink 9887 common unlink __x64_sys_unlink
9788 common symlink sys_symlink 9988 common symlink __x64_sys_symlink
9889 common readlink sys_readlink 10089 common readlink __x64_sys_readlink
9990 common chmod sys_chmod 10190 common chmod __x64_sys_chmod
10091 common fchmod sys_fchmod 10291 common fchmod __x64_sys_fchmod
10192 common chown sys_chown 10392 common chown __x64_sys_chown
10293 common fchown sys_fchown 10493 common fchown __x64_sys_fchown
10394 common lchown sys_lchown 10594 common lchown __x64_sys_lchown
10495 common umask sys_umask 10695 common umask __x64_sys_umask
10596 common gettimeofday sys_gettimeofday 10796 common gettimeofday __x64_sys_gettimeofday
10697 common getrlimit sys_getrlimit 10897 common getrlimit __x64_sys_getrlimit
10798 common getrusage sys_getrusage 10998 common getrusage __x64_sys_getrusage
10899 common sysinfo sys_sysinfo 11099 common sysinfo __x64_sys_sysinfo
109100 common times sys_times 111100 common times __x64_sys_times
110101 64 ptrace sys_ptrace 112101 64 ptrace __x64_sys_ptrace
111102 common getuid sys_getuid 113102 common getuid __x64_sys_getuid
112103 common syslog sys_syslog 114103 common syslog __x64_sys_syslog
113104 common getgid sys_getgid 115104 common getgid __x64_sys_getgid
114105 common setuid sys_setuid 116105 common setuid __x64_sys_setuid
115106 common setgid sys_setgid 117106 common setgid __x64_sys_setgid
116107 common geteuid sys_geteuid 118107 common geteuid __x64_sys_geteuid
117108 common getegid sys_getegid 119108 common getegid __x64_sys_getegid
118109 common setpgid sys_setpgid 120109 common setpgid __x64_sys_setpgid
119110 common getppid sys_getppid 121110 common getppid __x64_sys_getppid
120111 common getpgrp sys_getpgrp 122111 common getpgrp __x64_sys_getpgrp
121112 common setsid sys_setsid 123112 common setsid __x64_sys_setsid
122113 common setreuid sys_setreuid 124113 common setreuid __x64_sys_setreuid
123114 common setregid sys_setregid 125114 common setregid __x64_sys_setregid
124115 common getgroups sys_getgroups 126115 common getgroups __x64_sys_getgroups
125116 common setgroups sys_setgroups 127116 common setgroups __x64_sys_setgroups
126117 common setresuid sys_setresuid 128117 common setresuid __x64_sys_setresuid
127118 common getresuid sys_getresuid 129118 common getresuid __x64_sys_getresuid
128119 common setresgid sys_setresgid 130119 common setresgid __x64_sys_setresgid
129120 common getresgid sys_getresgid 131120 common getresgid __x64_sys_getresgid
130121 common getpgid sys_getpgid 132121 common getpgid __x64_sys_getpgid
131122 common setfsuid sys_setfsuid 133122 common setfsuid __x64_sys_setfsuid
132123 common setfsgid sys_setfsgid 134123 common setfsgid __x64_sys_setfsgid
133124 common getsid sys_getsid 135124 common getsid __x64_sys_getsid
134125 common capget sys_capget 136125 common capget __x64_sys_capget
135126 common capset sys_capset 137126 common capset __x64_sys_capset
136127 64 rt_sigpending sys_rt_sigpending 138127 64 rt_sigpending __x64_sys_rt_sigpending
137128 64 rt_sigtimedwait sys_rt_sigtimedwait 139128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait
138129 64 rt_sigqueueinfo sys_rt_sigqueueinfo 140129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo
139130 common rt_sigsuspend sys_rt_sigsuspend 141130 common rt_sigsuspend __x64_sys_rt_sigsuspend
140131 64 sigaltstack sys_sigaltstack 142131 64 sigaltstack __x64_sys_sigaltstack
141132 common utime sys_utime 143132 common utime __x64_sys_utime
142133 common mknod sys_mknod 144133 common mknod __x64_sys_mknod
143134 64 uselib 145134 64 uselib
144135 common personality sys_personality 146135 common personality __x64_sys_personality
145136 common ustat sys_ustat 147136 common ustat __x64_sys_ustat
146137 common statfs sys_statfs 148137 common statfs __x64_sys_statfs
147138 common fstatfs sys_fstatfs 149138 common fstatfs __x64_sys_fstatfs
148139 common sysfs sys_sysfs 150139 common sysfs __x64_sys_sysfs
149140 common getpriority sys_getpriority 151140 common getpriority __x64_sys_getpriority
150141 common setpriority sys_setpriority 152141 common setpriority __x64_sys_setpriority
151142 common sched_setparam sys_sched_setparam 153142 common sched_setparam __x64_sys_sched_setparam
152143 common sched_getparam sys_sched_getparam 154143 common sched_getparam __x64_sys_sched_getparam
153144 common sched_setscheduler sys_sched_setscheduler 155144 common sched_setscheduler __x64_sys_sched_setscheduler
154145 common sched_getscheduler sys_sched_getscheduler 156145 common sched_getscheduler __x64_sys_sched_getscheduler
155146 common sched_get_priority_max sys_sched_get_priority_max 157146 common sched_get_priority_max __x64_sys_sched_get_priority_max
156147 common sched_get_priority_min sys_sched_get_priority_min 158147 common sched_get_priority_min __x64_sys_sched_get_priority_min
157148 common sched_rr_get_interval sys_sched_rr_get_interval 159148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval
158149 common mlock sys_mlock 160149 common mlock __x64_sys_mlock
159150 common munlock sys_munlock 161150 common munlock __x64_sys_munlock
160151 common mlockall sys_mlockall 162151 common mlockall __x64_sys_mlockall
161152 common munlockall sys_munlockall 163152 common munlockall __x64_sys_munlockall
162153 common vhangup sys_vhangup 164153 common vhangup __x64_sys_vhangup
163154 common modify_ldt sys_modify_ldt 165154 common modify_ldt __x64_sys_modify_ldt
164155 common pivot_root sys_pivot_root 166155 common pivot_root __x64_sys_pivot_root
165156 64 _sysctl sys_sysctl 167156 64 _sysctl __x64_sys_sysctl
166157 common prctl sys_prctl 168157 common prctl __x64_sys_prctl
167158 common arch_prctl sys_arch_prctl 169158 common arch_prctl __x64_sys_arch_prctl
168159 common adjtimex sys_adjtimex 170159 common adjtimex __x64_sys_adjtimex
169160 common setrlimit sys_setrlimit 171160 common setrlimit __x64_sys_setrlimit
170161 common chroot sys_chroot 172161 common chroot __x64_sys_chroot
171162 common sync sys_sync 173162 common sync __x64_sys_sync
172163 common acct sys_acct 174163 common acct __x64_sys_acct
173164 common settimeofday sys_settimeofday 175164 common settimeofday __x64_sys_settimeofday
174165 common mount sys_mount 176165 common mount __x64_sys_mount
175166 common umount2 sys_umount 177166 common umount2 __x64_sys_umount
176167 common swapon sys_swapon 178167 common swapon __x64_sys_swapon
177168 common swapoff sys_swapoff 179168 common swapoff __x64_sys_swapoff
178169 common reboot sys_reboot 180169 common reboot __x64_sys_reboot
179170 common sethostname sys_sethostname 181170 common sethostname __x64_sys_sethostname
180171 common setdomainname sys_setdomainname 182171 common setdomainname __x64_sys_setdomainname
181172 common iopl sys_iopl/ptregs 183172 common iopl __x64_sys_iopl/ptregs
182173 common ioperm sys_ioperm 184173 common ioperm __x64_sys_ioperm
183174 64 create_module 185174 64 create_module
184175 common init_module sys_init_module 186175 common init_module __x64_sys_init_module
185176 common delete_module sys_delete_module 187176 common delete_module __x64_sys_delete_module
186177 64 get_kernel_syms 188177 64 get_kernel_syms
187178 64 query_module 189178 64 query_module
188179 common quotactl sys_quotactl 190179 common quotactl __x64_sys_quotactl
189180 64 nfsservctl 191180 64 nfsservctl
190181 common getpmsg 192181 common getpmsg
191182 common putpmsg 193182 common putpmsg
192183 common afs_syscall 194183 common afs_syscall
193184 common tuxcall 195184 common tuxcall
194185 common security 196185 common security
195186 common gettid sys_gettid 197186 common gettid __x64_sys_gettid
196187 common readahead sys_readahead 198187 common readahead __x64_sys_readahead
197188 common setxattr sys_setxattr 199188 common setxattr __x64_sys_setxattr
198189 common lsetxattr sys_lsetxattr 200189 common lsetxattr __x64_sys_lsetxattr
199190 common fsetxattr sys_fsetxattr 201190 common fsetxattr __x64_sys_fsetxattr
200191 common getxattr sys_getxattr 202191 common getxattr __x64_sys_getxattr
201192 common lgetxattr sys_lgetxattr 203192 common lgetxattr __x64_sys_lgetxattr
202193 common fgetxattr sys_fgetxattr 204193 common fgetxattr __x64_sys_fgetxattr
203194 common listxattr sys_listxattr 205194 common listxattr __x64_sys_listxattr
204195 common llistxattr sys_llistxattr 206195 common llistxattr __x64_sys_llistxattr
205196 common flistxattr sys_flistxattr 207196 common flistxattr __x64_sys_flistxattr
206197 common removexattr sys_removexattr 208197 common removexattr __x64_sys_removexattr
207198 common lremovexattr sys_lremovexattr 209198 common lremovexattr __x64_sys_lremovexattr
208199 common fremovexattr sys_fremovexattr 210199 common fremovexattr __x64_sys_fremovexattr
209200 common tkill sys_tkill 211200 common tkill __x64_sys_tkill
210201 common time sys_time 212201 common time __x64_sys_time
211202 common futex sys_futex 213202 common futex __x64_sys_futex
212203 common sched_setaffinity sys_sched_setaffinity 214203 common sched_setaffinity __x64_sys_sched_setaffinity
213204 common sched_getaffinity sys_sched_getaffinity 215204 common sched_getaffinity __x64_sys_sched_getaffinity
214205 64 set_thread_area 216205 64 set_thread_area
215206 64 io_setup sys_io_setup 217206 64 io_setup __x64_sys_io_setup
216207 common io_destroy sys_io_destroy 218207 common io_destroy __x64_sys_io_destroy
217208 common io_getevents sys_io_getevents 219208 common io_getevents __x64_sys_io_getevents
218209 64 io_submit sys_io_submit 220209 64 io_submit __x64_sys_io_submit
219210 common io_cancel sys_io_cancel 221210 common io_cancel __x64_sys_io_cancel
220211 64 get_thread_area 222211 64 get_thread_area
221212 common lookup_dcookie sys_lookup_dcookie 223212 common lookup_dcookie __x64_sys_lookup_dcookie
222213 common epoll_create sys_epoll_create 224213 common epoll_create __x64_sys_epoll_create
223214 64 epoll_ctl_old 225214 64 epoll_ctl_old
224215 64 epoll_wait_old 226215 64 epoll_wait_old
225216 common remap_file_pages sys_remap_file_pages 227216 common remap_file_pages __x64_sys_remap_file_pages
226217 common getdents64 sys_getdents64 228217 common getdents64 __x64_sys_getdents64
227218 common set_tid_address sys_set_tid_address 229218 common set_tid_address __x64_sys_set_tid_address
228219 common restart_syscall sys_restart_syscall 230219 common restart_syscall __x64_sys_restart_syscall
229220 common semtimedop sys_semtimedop 231220 common semtimedop __x64_sys_semtimedop
230221 common fadvise64 sys_fadvise64 232221 common fadvise64 __x64_sys_fadvise64
231222 64 timer_create sys_timer_create 233222 64 timer_create __x64_sys_timer_create
232223 common timer_settime sys_timer_settime 234223 common timer_settime __x64_sys_timer_settime
233224 common timer_gettime sys_timer_gettime 235224 common timer_gettime __x64_sys_timer_gettime
234225 common timer_getoverrun sys_timer_getoverrun 236225 common timer_getoverrun __x64_sys_timer_getoverrun
235226 common timer_delete sys_timer_delete 237226 common timer_delete __x64_sys_timer_delete
236227 common clock_settime sys_clock_settime 238227 common clock_settime __x64_sys_clock_settime
237228 common clock_gettime sys_clock_gettime 239228 common clock_gettime __x64_sys_clock_gettime
238229 common clock_getres sys_clock_getres 240229 common clock_getres __x64_sys_clock_getres
239230 common clock_nanosleep sys_clock_nanosleep 241230 common clock_nanosleep __x64_sys_clock_nanosleep
240231 common exit_group sys_exit_group 242231 common exit_group __x64_sys_exit_group
241232 common epoll_wait sys_epoll_wait 243232 common epoll_wait __x64_sys_epoll_wait
242233 common epoll_ctl sys_epoll_ctl 244233 common epoll_ctl __x64_sys_epoll_ctl
243234 common tgkill sys_tgkill 245234 common tgkill __x64_sys_tgkill
244235 common utimes sys_utimes 246235 common utimes __x64_sys_utimes
245236 64 vserver 247236 64 vserver
246237 common mbind sys_mbind 248237 common mbind __x64_sys_mbind
247238 common set_mempolicy sys_set_mempolicy 249238 common set_mempolicy __x64_sys_set_mempolicy
248239 common get_mempolicy sys_get_mempolicy 250239 common get_mempolicy __x64_sys_get_mempolicy
249240 common mq_open sys_mq_open 251240 common mq_open __x64_sys_mq_open
250241 common mq_unlink sys_mq_unlink 252241 common mq_unlink __x64_sys_mq_unlink
251242 common mq_timedsend sys_mq_timedsend 253242 common mq_timedsend __x64_sys_mq_timedsend
252243 common mq_timedreceive sys_mq_timedreceive 254243 common mq_timedreceive __x64_sys_mq_timedreceive
253244 64 mq_notify sys_mq_notify 255244 64 mq_notify __x64_sys_mq_notify
254245 common mq_getsetattr sys_mq_getsetattr 256245 common mq_getsetattr __x64_sys_mq_getsetattr
255246 64 kexec_load sys_kexec_load 257246 64 kexec_load __x64_sys_kexec_load
256247 64 waitid sys_waitid 258247 64 waitid __x64_sys_waitid
257248 common add_key sys_add_key 259248 common add_key __x64_sys_add_key
258249 common request_key sys_request_key 260249 common request_key __x64_sys_request_key
259250 common keyctl sys_keyctl 261250 common keyctl __x64_sys_keyctl
260251 common ioprio_set sys_ioprio_set 262251 common ioprio_set __x64_sys_ioprio_set
261252 common ioprio_get sys_ioprio_get 263252 common ioprio_get __x64_sys_ioprio_get
262253 common inotify_init sys_inotify_init 264253 common inotify_init __x64_sys_inotify_init
263254 common inotify_add_watch sys_inotify_add_watch 265254 common inotify_add_watch __x64_sys_inotify_add_watch
264255 common inotify_rm_watch sys_inotify_rm_watch 266255 common inotify_rm_watch __x64_sys_inotify_rm_watch
265256 common migrate_pages sys_migrate_pages 267256 common migrate_pages __x64_sys_migrate_pages
266257 common openat sys_openat 268257 common openat __x64_sys_openat
267258 common mkdirat sys_mkdirat 269258 common mkdirat __x64_sys_mkdirat
268259 common mknodat sys_mknodat 270259 common mknodat __x64_sys_mknodat
269260 common fchownat sys_fchownat 271260 common fchownat __x64_sys_fchownat
270261 common futimesat sys_futimesat 272261 common futimesat __x64_sys_futimesat
271262 common newfstatat sys_newfstatat 273262 common newfstatat __x64_sys_newfstatat
272263 common unlinkat sys_unlinkat 274263 common unlinkat __x64_sys_unlinkat
273264 common renameat sys_renameat 275264 common renameat __x64_sys_renameat
274265 common linkat sys_linkat 276265 common linkat __x64_sys_linkat
275266 common symlinkat sys_symlinkat 277266 common symlinkat __x64_sys_symlinkat
276267 common readlinkat sys_readlinkat 278267 common readlinkat __x64_sys_readlinkat
277268 common fchmodat sys_fchmodat 279268 common fchmodat __x64_sys_fchmodat
278269 common faccessat sys_faccessat 280269 common faccessat __x64_sys_faccessat
279270 common pselect6 sys_pselect6 281270 common pselect6 __x64_sys_pselect6
280271 common ppoll sys_ppoll 282271 common ppoll __x64_sys_ppoll
281272 common unshare sys_unshare 283272 common unshare __x64_sys_unshare
282273 64 set_robust_list sys_set_robust_list 284273 64 set_robust_list __x64_sys_set_robust_list
283274 64 get_robust_list sys_get_robust_list 285274 64 get_robust_list __x64_sys_get_robust_list
284275 common splice sys_splice 286275 common splice __x64_sys_splice
285276 common tee sys_tee 287276 common tee __x64_sys_tee
286277 common sync_file_range sys_sync_file_range 288277 common sync_file_range __x64_sys_sync_file_range
287278 64 vmsplice sys_vmsplice 289278 64 vmsplice __x64_sys_vmsplice
288279 64 move_pages sys_move_pages 290279 64 move_pages __x64_sys_move_pages
289280 common utimensat sys_utimensat 291280 common utimensat __x64_sys_utimensat
290281 common epoll_pwait sys_epoll_pwait 292281 common epoll_pwait __x64_sys_epoll_pwait
291282 common signalfd sys_signalfd 293282 common signalfd __x64_sys_signalfd
292283 common timerfd_create sys_timerfd_create 294283 common timerfd_create __x64_sys_timerfd_create
293284 common eventfd sys_eventfd 295284 common eventfd __x64_sys_eventfd
294285 common fallocate sys_fallocate 296285 common fallocate __x64_sys_fallocate
295286 common timerfd_settime sys_timerfd_settime 297286 common timerfd_settime __x64_sys_timerfd_settime
296287 common timerfd_gettime sys_timerfd_gettime 298287 common timerfd_gettime __x64_sys_timerfd_gettime
297288 common accept4 sys_accept4 299288 common accept4 __x64_sys_accept4
298289 common signalfd4 sys_signalfd4 300289 common signalfd4 __x64_sys_signalfd4
299290 common eventfd2 sys_eventfd2 301290 common eventfd2 __x64_sys_eventfd2
300291 common epoll_create1 sys_epoll_create1 302291 common epoll_create1 __x64_sys_epoll_create1
301292 common dup3 sys_dup3 303292 common dup3 __x64_sys_dup3
302293 common pipe2 sys_pipe2 304293 common pipe2 __x64_sys_pipe2
303294 common inotify_init1 sys_inotify_init1 305294 common inotify_init1 __x64_sys_inotify_init1
304295 64 preadv sys_preadv 306295 64 preadv __x64_sys_preadv
305296 64 pwritev sys_pwritev 307296 64 pwritev __x64_sys_pwritev
306297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 308297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo
307298 common perf_event_open sys_perf_event_open 309298 common perf_event_open __x64_sys_perf_event_open
308299 64 recvmmsg sys_recvmmsg 310299 64 recvmmsg __x64_sys_recvmmsg
309300 common fanotify_init sys_fanotify_init 311300 common fanotify_init __x64_sys_fanotify_init
310301 common fanotify_mark sys_fanotify_mark 312301 common fanotify_mark __x64_sys_fanotify_mark
311302 common prlimit64 sys_prlimit64 313302 common prlimit64 __x64_sys_prlimit64
312303 common name_to_handle_at sys_name_to_handle_at 314303 common name_to_handle_at __x64_sys_name_to_handle_at
313304 common open_by_handle_at sys_open_by_handle_at 315304 common open_by_handle_at __x64_sys_open_by_handle_at
314305 common clock_adjtime sys_clock_adjtime 316305 common clock_adjtime __x64_sys_clock_adjtime
315306 common syncfs sys_syncfs 317306 common syncfs __x64_sys_syncfs
316307 64 sendmmsg sys_sendmmsg 318307 64 sendmmsg __x64_sys_sendmmsg
317308 common setns sys_setns 319308 common setns __x64_sys_setns
318309 common getcpu sys_getcpu 320309 common getcpu __x64_sys_getcpu
319310 64 process_vm_readv sys_process_vm_readv 321310 64 process_vm_readv __x64_sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 322311 64 process_vm_writev __x64_sys_process_vm_writev
321312 common kcmp sys_kcmp 323312 common kcmp __x64_sys_kcmp
322313 common finit_module sys_finit_module 324313 common finit_module __x64_sys_finit_module
323314 common sched_setattr sys_sched_setattr 325314 common sched_setattr __x64_sys_sched_setattr
324315 common sched_getattr sys_sched_getattr 326315 common sched_getattr __x64_sys_sched_getattr
325316 common renameat2 sys_renameat2 327316 common renameat2 __x64_sys_renameat2
326317 common seccomp sys_seccomp 328317 common seccomp __x64_sys_seccomp
327318 common getrandom sys_getrandom 329318 common getrandom __x64_sys_getrandom
328319 common memfd_create sys_memfd_create 330319 common memfd_create __x64_sys_memfd_create
329320 common kexec_file_load sys_kexec_file_load 331320 common kexec_file_load __x64_sys_kexec_file_load
330321 common bpf sys_bpf 332321 common bpf __x64_sys_bpf
331322 64 execveat sys_execveat/ptregs 333322 64 execveat __x64_sys_execveat/ptregs
332323 common userfaultfd sys_userfaultfd 334323 common userfaultfd __x64_sys_userfaultfd
333324 common membarrier sys_membarrier 335324 common membarrier __x64_sys_membarrier
334325 common mlock2 sys_mlock2 336325 common mlock2 __x64_sys_mlock2
335326 common copy_file_range sys_copy_file_range 337326 common copy_file_range __x64_sys_copy_file_range
336327 64 preadv2 sys_preadv2 338327 64 preadv2 __x64_sys_preadv2
337328 64 pwritev2 sys_pwritev2 339328 64 pwritev2 __x64_sys_pwritev2
338329 common pkey_mprotect sys_pkey_mprotect 340329 common pkey_mprotect __x64_sys_pkey_mprotect
339330 common pkey_alloc sys_pkey_alloc 341330 common pkey_alloc __x64_sys_pkey_alloc
340331 common pkey_free sys_pkey_free 342331 common pkey_free __x64_sys_pkey_free
341332 common statx sys_statx 343332 common statx __x64_sys_statx
342 344
343# 345#
344# x32-specific system call numbers start at 512 to avoid cache impact 346# x32-specific system call numbers start at 512 to avoid cache impact
345# for native 64-bit operation. 347# for native 64-bit operation. The __x32_compat_sys stubs are created
348# on-the-fly for compat_sys_*() compatibility system calls if X86_X32
349# is defined.
346# 350#
347512 x32 rt_sigaction compat_sys_rt_sigaction 351512 x32 rt_sigaction __x32_compat_sys_rt_sigaction
348513 x32 rt_sigreturn sys32_x32_rt_sigreturn 352513 x32 rt_sigreturn sys32_x32_rt_sigreturn
349514 x32 ioctl compat_sys_ioctl 353514 x32 ioctl __x32_compat_sys_ioctl
350515 x32 readv compat_sys_readv 354515 x32 readv __x32_compat_sys_readv
351516 x32 writev compat_sys_writev 355516 x32 writev __x32_compat_sys_writev
352517 x32 recvfrom compat_sys_recvfrom 356517 x32 recvfrom __x32_compat_sys_recvfrom
353518 x32 sendmsg compat_sys_sendmsg 357518 x32 sendmsg __x32_compat_sys_sendmsg
354519 x32 recvmsg compat_sys_recvmsg 358519 x32 recvmsg __x32_compat_sys_recvmsg
355520 x32 execve compat_sys_execve/ptregs 359520 x32 execve __x32_compat_sys_execve/ptregs
356521 x32 ptrace compat_sys_ptrace 360521 x32 ptrace __x32_compat_sys_ptrace
357522 x32 rt_sigpending compat_sys_rt_sigpending 361522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
358523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait 362523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
359524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo 363524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
360525 x32 sigaltstack compat_sys_sigaltstack 364525 x32 sigaltstack __x32_compat_sys_sigaltstack
361526 x32 timer_create compat_sys_timer_create 365526 x32 timer_create __x32_compat_sys_timer_create
362527 x32 mq_notify compat_sys_mq_notify 366527 x32 mq_notify __x32_compat_sys_mq_notify
363528 x32 kexec_load compat_sys_kexec_load 367528 x32 kexec_load __x32_compat_sys_kexec_load
364529 x32 waitid compat_sys_waitid 368529 x32 waitid __x32_compat_sys_waitid
365530 x32 set_robust_list compat_sys_set_robust_list 369530 x32 set_robust_list __x32_compat_sys_set_robust_list
366531 x32 get_robust_list compat_sys_get_robust_list 370531 x32 get_robust_list __x32_compat_sys_get_robust_list
367532 x32 vmsplice compat_sys_vmsplice 371532 x32 vmsplice __x32_compat_sys_vmsplice
368533 x32 move_pages compat_sys_move_pages 372533 x32 move_pages __x32_compat_sys_move_pages
369534 x32 preadv compat_sys_preadv64 373534 x32 preadv __x32_compat_sys_preadv64
370535 x32 pwritev compat_sys_pwritev64 374535 x32 pwritev __x32_compat_sys_pwritev64
371536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 375536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
372537 x32 recvmmsg compat_sys_recvmmsg 376537 x32 recvmmsg __x32_compat_sys_recvmmsg
373538 x32 sendmmsg compat_sys_sendmmsg 377538 x32 sendmmsg __x32_compat_sys_sendmmsg
374539 x32 process_vm_readv compat_sys_process_vm_readv 378539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
375540 x32 process_vm_writev compat_sys_process_vm_writev 379540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
376541 x32 setsockopt compat_sys_setsockopt 380541 x32 setsockopt __x32_compat_sys_setsockopt
377542 x32 getsockopt compat_sys_getsockopt 381542 x32 getsockopt __x32_compat_sys_getsockopt
378543 x32 io_setup compat_sys_io_setup 382543 x32 io_setup __x32_compat_sys_io_setup
379544 x32 io_submit compat_sys_io_submit 383544 x32 io_submit __x32_compat_sys_io_submit
380545 x32 execveat compat_sys_execveat/ptregs 384545 x32 execveat __x32_compat_sys_execveat/ptregs
381546 x32 preadv2 compat_sys_preadv64v2 385546 x32 preadv2 __x32_compat_sys_preadv64v2
382547 x32 pwritev2 compat_sys_pwritev64v2 386547 x32 pwritev2 __x32_compat_sys_pwritev64v2
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 4aca13f23b9d..1c41b4eaf73c 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv)
439#ifdef HAVE_LIBELF_SUPPORT 439#ifdef HAVE_LIBELF_SUPPORT
440 "probe", 440 "probe",
441#endif 441#endif
442#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 442#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
443 "trace", 443 "trace",
444#endif 444#endif
445 NULL }; 445 NULL };
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 506564651cda..57393e94d156 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
83 }; 83 };
84 84
85 argc = parse_options(argc, argv, options, record_mem_usage, 85 argc = parse_options(argc, argv, options, record_mem_usage,
86 PARSE_OPT_STOP_AT_NON_OPTION); 86 PARSE_OPT_KEEP_UNKNOWN);
87 87
88 rec_argc = argc + 9; /* max number of arguments */ 88 rec_argc = argc + 9; /* max number of arguments */
89 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 89 rec_argv = calloc(rec_argc + 1, sizeof(char *));
@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv)
436 } 436 }
437 437
438 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, 438 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
439 mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); 439 mem_usage, PARSE_OPT_KEEP_UNKNOWN);
440 440
441 if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) 441 if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
442 usage_with_options(mem_usage, mem_options); 442 usage_with_options(mem_usage, mem_options);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 313c42423393..e0a9845b6cbc 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
657 break; 657 break;
658 case PERF_RECORD_SWITCH: 658 case PERF_RECORD_SWITCH:
659 case PERF_RECORD_SWITCH_CPU_WIDE: 659 case PERF_RECORD_SWITCH_CPU_WIDE:
660 if (has(SWITCH_OUT)) 660 if (has(SWITCH_OUT)) {
661 ret += fprintf(fp, "S"); 661 ret += fprintf(fp, "S");
662 if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
663 ret += fprintf(fp, "p");
664 }
662 default: 665 default:
663 break; 666 break;
664 } 667 }
@@ -2801,11 +2804,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
2801 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 2804 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
2802 scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, 2805 scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
2803 lang_dirent->d_name); 2806 lang_dirent->d_name);
2804#ifdef NO_LIBPERL 2807#ifndef HAVE_LIBPERL_SUPPORT
2805 if (strstr(lang_path, "perl")) 2808 if (strstr(lang_path, "perl"))
2806 continue; 2809 continue;
2807#endif 2810#endif
2808#ifdef NO_LIBPYTHON 2811#ifndef HAVE_LIBPYTHON_SUPPORT
2809 if (strstr(lang_path, "python")) 2812 if (strstr(lang_path, "python"))
2810 continue; 2813 continue;
2811#endif 2814#endif
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f5c454855908..f17dc601b0f3 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -172,6 +172,7 @@ static bool interval_count;
172static const char *output_name; 172static const char *output_name;
173static int output_fd; 173static int output_fd;
174static int print_free_counters_hint; 174static int print_free_counters_hint;
175static int print_mixed_hw_group_error;
175 176
176struct perf_stat { 177struct perf_stat {
177 bool record; 178 bool record;
@@ -1126,6 +1127,30 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
1126 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 1127 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
1127} 1128}
1128 1129
1130static bool is_mixed_hw_group(struct perf_evsel *counter)
1131{
1132 struct perf_evlist *evlist = counter->evlist;
1133 u32 pmu_type = counter->attr.type;
1134 struct perf_evsel *pos;
1135
1136 if (counter->nr_members < 2)
1137 return false;
1138
1139 evlist__for_each_entry(evlist, pos) {
1140 /* software events can be part of any hardware group */
1141 if (pos->attr.type == PERF_TYPE_SOFTWARE)
1142 continue;
1143 if (pmu_type == PERF_TYPE_SOFTWARE) {
1144 pmu_type = pos->attr.type;
1145 continue;
1146 }
1147 if (pmu_type != pos->attr.type)
1148 return true;
1149 }
1150
1151 return false;
1152}
1153
1129static void printout(int id, int nr, struct perf_evsel *counter, double uval, 1154static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1130 char *prefix, u64 run, u64 ena, double noise, 1155 char *prefix, u64 run, u64 ena, double noise,
1131 struct runtime_stat *st) 1156 struct runtime_stat *st)
@@ -1178,8 +1203,11 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1178 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1203 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
1179 csv_sep); 1204 csv_sep);
1180 1205
1181 if (counter->supported) 1206 if (counter->supported) {
1182 print_free_counters_hint = 1; 1207 print_free_counters_hint = 1;
1208 if (is_mixed_hw_group(counter))
1209 print_mixed_hw_group_error = 1;
1210 }
1183 1211
1184 fprintf(stat_config.output, "%-*s%s", 1212 fprintf(stat_config.output, "%-*s%s",
1185 csv_output ? 0 : unit_width, 1213 csv_output ? 0 : unit_width,
@@ -1256,7 +1284,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
1256 char *new_name; 1284 char *new_name;
1257 char *config; 1285 char *config;
1258 1286
1259 if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name, 1287 if (counter->uniquified_name ||
1288 !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
1260 strlen(counter->pmu_name))) 1289 strlen(counter->pmu_name)))
1261 return; 1290 return;
1262 1291
@@ -1274,6 +1303,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
1274 counter->name = new_name; 1303 counter->name = new_name;
1275 } 1304 }
1276 } 1305 }
1306
1307 counter->uniquified_name = true;
1277} 1308}
1278 1309
1279static void collect_all_aliases(struct perf_evsel *counter, 1310static void collect_all_aliases(struct perf_evsel *counter,
@@ -1757,6 +1788,11 @@ static void print_footer(void)
1757" echo 0 > /proc/sys/kernel/nmi_watchdog\n" 1788" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
1758" perf stat ...\n" 1789" perf stat ...\n"
1759" echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 1790" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
1791
1792 if (print_mixed_hw_group_error)
1793 fprintf(output,
1794 "The events in group usually have to be from "
1795 "the same PMU. Try reorganizing the group.\n");
1760} 1796}
1761 1797
1762static void print_counters(struct timespec *ts, int argc, const char **argv) 1798static void print_counters(struct timespec *ts, int argc, const char **argv)
@@ -1943,7 +1979,8 @@ static const struct option stat_options[] = {
1943 OPT_STRING(0, "post", &post_cmd, "command", 1979 OPT_STRING(0, "post", &post_cmd, "command",
1944 "command to run after to the measured command"), 1980 "command to run after to the measured command"),
1945 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1981 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1946 "print counts at regular interval in ms (>= 10)"), 1982 "print counts at regular interval in ms "
1983 "(overhead is possible for values <= 100ms)"),
1947 OPT_INTEGER(0, "interval-count", &stat_config.times, 1984 OPT_INTEGER(0, "interval-count", &stat_config.times,
1948 "print counts for fixed number of times"), 1985 "print counts for fixed number of times"),
1949 OPT_UINTEGER(0, "timeout", &stat_config.timeout, 1986 OPT_UINTEGER(0, "timeout", &stat_config.timeout,
@@ -2923,17 +2960,6 @@ int cmd_stat(int argc, const char **argv)
2923 } 2960 }
2924 } 2961 }
2925 2962
2926 if (interval && interval < 100) {
2927 if (interval < 10) {
2928 pr_err("print interval must be >= 10ms\n");
2929 parse_options_usage(stat_usage, stat_options, "I", 1);
2930 goto out;
2931 } else
2932 pr_warning("print interval < 100ms. "
2933 "The overhead percentage could be high in some cases. "
2934 "Please proceed with caution.\n");
2935 }
2936
2937 if (stat_config.times && interval) 2963 if (stat_config.times && interval)
2938 interval_count = true; 2964 interval_count = true;
2939 else if (stat_config.times && !interval) { 2965 else if (stat_config.times && !interval) {
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 2abe3910d6b6..50df168be326 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -60,7 +60,10 @@ static void library_status(void)
60 STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); 60 STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
61 STATUS(HAVE_GLIBC_SUPPORT, glibc); 61 STATUS(HAVE_GLIBC_SUPPORT, glibc);
62 STATUS(HAVE_GTK2_SUPPORT, gtk2); 62 STATUS(HAVE_GTK2_SUPPORT, gtk2);
63#ifndef HAVE_SYSCALL_TABLE_SUPPORT
63 STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); 64 STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
65#endif
66 STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
64 STATUS(HAVE_LIBBFD_SUPPORT, libbfd); 67 STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
65 STATUS(HAVE_LIBELF_SUPPORT, libelf); 68 STATUS(HAVE_LIBELF_SUPPORT, libelf);
66 STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); 69 STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 1659029d03fc..20a08cb32332 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = {
73 { "lock", cmd_lock, 0 }, 73 { "lock", cmd_lock, 0 },
74 { "kvm", cmd_kvm, 0 }, 74 { "kvm", cmd_kvm, 0 },
75 { "test", cmd_test, 0 }, 75 { "test", cmd_test, 0 },
76#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 76#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
77 { "trace", cmd_trace, 0 }, 77 { "trace", cmd_trace, 0 },
78#endif 78#endif
79 { "inject", cmd_inject, 0 }, 79 { "inject", cmd_inject, 0 },
@@ -491,7 +491,7 @@ int main(int argc, const char **argv)
491 argv[0] = cmd; 491 argv[0] = cmd;
492 } 492 }
493 if (strstarts(cmd, "trace")) { 493 if (strstarts(cmd, "trace")) {
494#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 494#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
495 setup_path(); 495 setup_path();
496 argv[0] = "trace"; 496 argv[0] = "trace";
497 return cmd_trace(argc, argv); 497 return cmd_trace(argc, argv);
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index ca7682748a4b..78bcf7f8e206 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -1,6 +1,6 @@
1Family-model,Version,Filename,EventType 1Family-model,Version,Filename,EventType
2209[78],1,cf_z10,core 2^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core
3281[78],1,cf_z196,core 3^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core
4282[78],1,cf_zec12,core 4^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
5296[45],1,cf_z13,core 5^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
63906,3,cf_z14,core 6^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f906b793196f..8a33ca4f9e1f 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -35,3 +35,6 @@ inherit=0
35# sampling disabled 35# sampling disabled
36sample_freq=0 36sample_freq=0
37sample_period=0 37sample_period=0
38freq=0
39write_backward=0
40sample_id_all=0
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index e4123c1b0e88..1ca5106df5f1 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = {
31 .max_entries = 1, 31 .max_entries = 1,
32}; 32};
33 33
34SEC("func=SyS_epoll_pwait") 34SEC("func=do_epoll_wait")
35int bpf_func__SyS_epoll_pwait(void *ctx) 35int bpf_func__SyS_epoll_pwait(void *ctx)
36{ 36{
37 int ind =0; 37 int ind =0;
diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c
index 3626924740d8..ff3ec8337f0a 100644
--- a/tools/perf/tests/bpf-script-test-kbuild.c
+++ b/tools/perf/tests/bpf-script-test-kbuild.c
@@ -9,7 +9,6 @@
9#define SEC(NAME) __attribute__((section(NAME), used)) 9#define SEC(NAME) __attribute__((section(NAME), used))
10 10
11#include <uapi/linux/fs.h> 11#include <uapi/linux/fs.h>
12#include <uapi/asm/ptrace.h>
13 12
14SEC("func=vfs_llseek") 13SEC("func=vfs_llseek")
15int bpf_func__vfs_llseek(void *ctx) 14int bpf_func__vfs_llseek(void *ctx)
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 625f5a6772af..cac8f8889bc3 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -118,6 +118,7 @@ static struct test generic_tests[] = {
118 { 118 {
119 .desc = "Breakpoint accounting", 119 .desc = "Breakpoint accounting",
120 .func = test__bp_accounting, 120 .func = test__bp_accounting,
121 .is_supported = test__bp_signal_is_supported,
121 }, 122 },
122 { 123 {
123 .desc = "Number of exit events of a simple workload", 124 .desc = "Number of exit events of a simple workload",
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index bb8e6bcb0d96..0919b0793e5b 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
75 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 75 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
76 evsels[i] = perf_evsel__newtp("syscalls", name); 76 evsels[i] = perf_evsel__newtp("syscalls", name);
77 if (IS_ERR(evsels[i])) { 77 if (IS_ERR(evsels[i])) {
78 pr_debug("perf_evsel__new\n"); 78 pr_debug("perf_evsel__new(%s)\n", name);
79 goto out_delete_evlist; 79 goto out_delete_evlist;
80 } 80 }
81 81
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 1ecc1f0ff84a..016882dbbc16 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -19,12 +19,10 @@ trace_libc_inet_pton_backtrace() {
19 expected[1]=".*inet_pton[[:space:]]\($libc\)$" 19 expected[1]=".*inet_pton[[:space:]]\($libc\)$"
20 case "$(uname -m)" in 20 case "$(uname -m)" in
21 s390x) 21 s390x)
22 eventattr='call-graph=dwarf' 22 eventattr='call-graph=dwarf,max-stack=4'
23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" 23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
24 expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" 24 expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$"
25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" 25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$"
26 expected[5]="__libc_start_main[[:space:]]\($libc\)$"
27 expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$"
28 ;; 26 ;;
29 *) 27 *)
30 eventattr='max-stack=3' 28 eventattr='max-stack=3'
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 417e3ecfe9d7..9f68077b241b 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
54 P_MMAP_FLAG(EXECUTABLE); 54 P_MMAP_FLAG(EXECUTABLE);
55 P_MMAP_FLAG(FILE); 55 P_MMAP_FLAG(FILE);
56 P_MMAP_FLAG(FIXED); 56 P_MMAP_FLAG(FIXED);
57#ifdef MAP_FIXED_NOREPLACE
58 P_MMAP_FLAG(FIXED_NOREPLACE);
59#endif
57 P_MMAP_FLAG(GROWSDOWN); 60 P_MMAP_FLAG(GROWSDOWN);
58 P_MMAP_FLAG(HUGETLB); 61 P_MMAP_FLAG(HUGETLB);
59 P_MMAP_FLAG(LOCKED); 62 P_MMAP_FLAG(LOCKED);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 12c099a87f8b..3781d74088a7 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
692 "J Toggle showing number of jump sources on targets\n" 692 "J Toggle showing number of jump sources on targets\n"
693 "n Search next string\n" 693 "n Search next string\n"
694 "o Toggle disassembler output/simplified view\n" 694 "o Toggle disassembler output/simplified view\n"
695 "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n"
695 "s Toggle source code view\n" 696 "s Toggle source code view\n"
696 "t Circulate percent, total period, samples view\n" 697 "t Circulate percent, total period, samples view\n"
697 "/ Search string\n" 698 "/ Search string\n"
@@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser,
719 notes->options->use_offset = !notes->options->use_offset; 720 notes->options->use_offset = !notes->options->use_offset;
720 annotation__update_column_widths(notes); 721 annotation__update_column_widths(notes);
721 continue; 722 continue;
723 case 'O':
724 if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
725 notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
726 continue;
722 case 'j': 727 case 'j':
723 notes->options->jump_arrows = !notes->options->jump_arrows; 728 notes->options->jump_arrows = !notes->options->jump_arrows;
724 continue; 729 continue;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 0eec06c105c6..e5f247247daa 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2714 "h/?/F1 Show this window\n" \ 2714 "h/?/F1 Show this window\n" \
2715 "UP/DOWN/PGUP\n" \ 2715 "UP/DOWN/PGUP\n" \
2716 "PGDN/SPACE Navigate\n" \ 2716 "PGDN/SPACE Navigate\n" \
2717 "q/ESC/CTRL+C Exit browser\n\n" \ 2717 "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \
2718 "For multiple event sessions:\n\n" \ 2718 "For multiple event sessions:\n\n" \
2719 "TAB/UNTAB Switch events\n\n" \ 2719 "TAB/UNTAB Switch events\n\n" \
2720 "For symbolic views (--sort has sym):\n\n" \ 2720 "For symbolic views (--sort has sym):\n\n" \
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index fbad8dfbb186..536ee148bff8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -46,6 +46,7 @@
46struct annotation_options annotation__default_options = { 46struct annotation_options annotation__default_options = {
47 .use_offset = true, 47 .use_offset = true,
48 .jump_arrows = true, 48 .jump_arrows = true,
49 .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS,
49}; 50};
50 51
51const char *disassembler_style; 52const char *disassembler_style;
@@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
2512 if (!notes->options->use_offset) { 2513 if (!notes->options->use_offset) {
2513 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 2514 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2514 } else { 2515 } else {
2515 if (al->jump_sources) { 2516 if (al->jump_sources &&
2517 notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2516 if (notes->options->show_nr_jumps) { 2518 if (notes->options->show_nr_jumps) {
2517 int prev; 2519 int prev;
2518 printed = scnprintf(bf, sizeof(bf), "%*d ", 2520 printed = scnprintf(bf, sizeof(bf), "%*d ",
@@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
2523 obj__printf(obj, bf); 2525 obj__printf(obj, bf);
2524 obj__set_color(obj, prev); 2526 obj__set_color(obj, prev);
2525 } 2527 }
2526 2528print_addr:
2527 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 2529 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2528 notes->widths.target, addr); 2530 notes->widths.target, addr);
2531 } else if (ins__is_call(&disasm_line(al)->ins) &&
2532 notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
2533 goto print_addr;
2534 } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2535 goto print_addr;
2529 } else { 2536 } else {
2530 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2537 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2531 notes->widths.addr, " "); 2538 notes->widths.addr, " ");
@@ -2642,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
2642 */ 2649 */
2643static struct annotation_config { 2650static struct annotation_config {
2644 const char *name; 2651 const char *name;
2645 bool *value; 2652 void *value;
2646} annotation__configs[] = { 2653} annotation__configs[] = {
2647 ANNOTATION__CFG(hide_src_code), 2654 ANNOTATION__CFG(hide_src_code),
2648 ANNOTATION__CFG(jump_arrows), 2655 ANNOTATION__CFG(jump_arrows),
2656 ANNOTATION__CFG(offset_level),
2649 ANNOTATION__CFG(show_linenr), 2657 ANNOTATION__CFG(show_linenr),
2650 ANNOTATION__CFG(show_nr_jumps), 2658 ANNOTATION__CFG(show_nr_jumps),
2651 ANNOTATION__CFG(show_nr_samples), 2659 ANNOTATION__CFG(show_nr_samples),
@@ -2677,8 +2685,16 @@ static int annotation__config(const char *var, const char *value,
2677 2685
2678 if (cfg == NULL) 2686 if (cfg == NULL)
2679 pr_debug("%s variable unknown, ignoring...", var); 2687 pr_debug("%s variable unknown, ignoring...", var);
2680 else 2688 else if (strcmp(var, "annotate.offset_level") == 0) {
2681 *cfg->value = perf_config_bool(name, value); 2689 perf_config_int(cfg->value, name, value);
2690
2691 if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL)
2692 *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL;
2693 else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL)
2694 *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL;
2695 } else {
2696 *(bool *)cfg->value = perf_config_bool(name, value);
2697 }
2682 return 0; 2698 return 0;
2683} 2699}
2684 2700
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index db8d09bea07e..f28a9e43421d 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -70,8 +70,17 @@ struct annotation_options {
70 show_nr_jumps, 70 show_nr_jumps,
71 show_nr_samples, 71 show_nr_samples,
72 show_total_period; 72 show_total_period;
73 u8 offset_level;
73}; 74};
74 75
76enum {
77 ANNOTATION__OFFSET_JUMP_TARGETS = 1,
78 ANNOTATION__OFFSET_CALL,
79 ANNOTATION__MAX_OFFSET_LEVEL,
80};
81
82#define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS
83
75extern struct annotation_options annotation__default_options; 84extern struct annotation_options annotation__default_options;
76 85
77struct annotation; 86struct annotation;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 640af88331b4..c8b98fa22997 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -1,6 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * Copyright(C) 2015-2018 Linaro Limited. 3 * Copyright(C) 2015-2018 Linaro Limited.
5 * 4 *
6 * Author: Tor Jeremiassen <tor@ti.com> 5 * Author: Tor Jeremiassen <tor@ti.com>
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 1b0d422373be..40020b1ca54f 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1,6 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * Copyright(C) 2015-2018 Linaro Limited. 3 * Copyright(C) 2015-2018 Linaro Limited.
5 * 4 *
6 * Author: Tor Jeremiassen <tor@ti.com> 5 * Author: Tor Jeremiassen <tor@ti.com>
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 5864d5dca616..37f8d48179ca 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -1,18 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 5 */
17 6
18#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ 7#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f0a6cbd033cc..98ff3a6a3d50 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1421size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1421size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1422{ 1422{
1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1424 const char *in_out = out ? "OUT" : "IN "; 1424 const char *in_out = !out ? "IN " :
1425 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1426 "OUT " : "OUT preempt";
1425 1427
1426 if (event->header.type == PERF_RECORD_SWITCH) 1428 if (event->header.type == PERF_RECORD_SWITCH)
1427 return fprintf(fp, " %s\n", in_out); 1429 return fprintf(fp, " %s\n", in_out);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1ac8d9236efd..4cd2cf93f726 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -930,8 +930,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
930 * than leader in case leader 'leads' the sampling. 930 * than leader in case leader 'leads' the sampling.
931 */ 931 */
932 if ((leader != evsel) && leader->sample_read) { 932 if ((leader != evsel) && leader->sample_read) {
933 attr->sample_freq = 0; 933 attr->freq = 0;
934 attr->sample_period = 0; 934 attr->sample_freq = 0;
935 attr->sample_period = 0;
936 attr->write_backward = 0;
937 attr->sample_id_all = 0;
935 } 938 }
936 939
937 if (opts->no_samples) 940 if (opts->no_samples)
@@ -1922,7 +1925,8 @@ try_fallback:
1922 goto fallback_missing_features; 1925 goto fallback_missing_features;
1923 } else if (!perf_missing_features.group_read && 1926 } else if (!perf_missing_features.group_read &&
1924 evsel->attr.inherit && 1927 evsel->attr.inherit &&
1925 (evsel->attr.read_format & PERF_FORMAT_GROUP)) { 1928 (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
1929 perf_evsel__is_group_leader(evsel)) {
1926 perf_missing_features.group_read = true; 1930 perf_missing_features.group_read = true;
1927 pr_debug2("switching off group read\n"); 1931 pr_debug2("switching off group read\n");
1928 goto fallback_missing_features; 1932 goto fallback_missing_features;
@@ -2754,8 +2758,14 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2754 (paranoid = perf_event_paranoid()) > 1) { 2758 (paranoid = perf_event_paranoid()) > 1) {
2755 const char *name = perf_evsel__name(evsel); 2759 const char *name = perf_evsel__name(evsel);
2756 char *new_name; 2760 char *new_name;
2761 const char *sep = ":";
2757 2762
2758 if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0) 2763 /* Is there already the separator in the name. */
2764 if (strchr(name, '/') ||
2765 strchr(name, ':'))
2766 sep = "";
2767
2768 if (asprintf(&new_name, "%s%su", name, sep) < 0)
2759 return false; 2769 return false;
2760 2770
2761 if (evsel->name) 2771 if (evsel->name)
@@ -2870,8 +2880,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2870#if defined(__i386__) || defined(__x86_64__) 2880#if defined(__i386__) || defined(__x86_64__)
2871 if (evsel->attr.type == PERF_TYPE_HARDWARE) 2881 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2872 return scnprintf(msg, size, "%s", 2882 return scnprintf(msg, size, "%s",
2873 "No hardware sampling interrupt available.\n" 2883 "No hardware sampling interrupt available.\n");
2874 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2875#endif 2884#endif
2876 break; 2885 break;
2877 case EBUSY: 2886 case EBUSY:
@@ -2894,8 +2903,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2894 2903
2895 return scnprintf(msg, size, 2904 return scnprintf(msg, size,
2896 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2905 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2897 "/bin/dmesg may provide additional information.\n" 2906 "/bin/dmesg | grep -i perf may provide additional information.\n",
2898 "No CONFIG_PERF_EVENTS=y kernel support configured?",
2899 err, str_error_r(err, sbuf, sizeof(sbuf)), 2907 err, str_error_r(err, sbuf, sizeof(sbuf)),
2900 perf_evsel__name(evsel)); 2908 perf_evsel__name(evsel));
2901} 2909}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d3ee3af618ef..92ec009a292d 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -115,6 +115,7 @@ struct perf_evsel {
115 unsigned int sample_size; 115 unsigned int sample_size;
116 int id_pos; 116 int id_pos;
117 int is_pos; 117 int is_pos;
118 bool uniquified_name;
118 bool snapshot; 119 bool snapshot;
119 bool supported; 120 bool supported;
120 bool needs_swap; 121 bool needs_swap;
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index ff17920a5ebc..c3cef36d4176 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -38,7 +38,7 @@ do
38done 38done
39echo "#endif /* HAVE_LIBELF_SUPPORT */" 39echo "#endif /* HAVE_LIBELF_SUPPORT */"
40 40
41echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" 41echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)"
42sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | 42sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
43sort | 43sort |
44while read cmd 44while read cmd
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 121df1683c36..a8bff2178fbc 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1320 1320
1321 dir = opendir(path); 1321 dir = opendir(path);
1322 if (!dir) { 1322 if (!dir) {
1323 pr_warning("failed: can't open node sysfs data\n"); 1323 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1324 __func__, path);
1324 return -1; 1325 return -1;
1325 } 1326 }
1326 1327
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 2eca8478e24f..32d50492505d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1019,13 +1019,6 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
1019 return ret; 1019 return ret;
1020} 1020}
1021 1021
1022static void map_groups__fixup_end(struct map_groups *mg)
1023{
1024 int i;
1025 for (i = 0; i < MAP__NR_TYPES; ++i)
1026 __map_groups__fixup_end(mg, i);
1027}
1028
1029static char *get_kernel_version(const char *root_dir) 1022static char *get_kernel_version(const char *root_dir)
1030{ 1023{
1031 char version[PATH_MAX]; 1024 char version[PATH_MAX];
@@ -1233,6 +1226,7 @@ int machine__create_kernel_maps(struct machine *machine)
1233{ 1226{
1234 struct dso *kernel = machine__get_kernel(machine); 1227 struct dso *kernel = machine__get_kernel(machine);
1235 const char *name = NULL; 1228 const char *name = NULL;
1229 struct map *map;
1236 u64 addr = 0; 1230 u64 addr = 0;
1237 int ret; 1231 int ret;
1238 1232
@@ -1259,13 +1253,25 @@ int machine__create_kernel_maps(struct machine *machine)
1259 machine__destroy_kernel_maps(machine); 1253 machine__destroy_kernel_maps(machine);
1260 return -1; 1254 return -1;
1261 } 1255 }
1262 machine__set_kernel_mmap(machine, addr, 0); 1256
1257 /* we have a real start address now, so re-order the kmaps */
1258 map = machine__kernel_map(machine);
1259
1260 map__get(map);
1261 map_groups__remove(&machine->kmaps, map);
1262
1263 /* assume it's the last in the kmaps */
1264 machine__set_kernel_mmap(machine, addr, ~0ULL);
1265
1266 map_groups__insert(&machine->kmaps, map);
1267 map__put(map);
1263 } 1268 }
1264 1269
1265 /* 1270 /* update end address of the kernel map using adjacent module address */
1266 * Now that we have all the maps created, just set the ->end of them: 1271 map = map__next(machine__kernel_map(machine));
1267 */ 1272 if (map)
1268 map_groups__fixup_end(&machine->kmaps); 1273 machine__set_kernel_mmap(machine, addr, map->start);
1274
1269 return 0; 1275 return 0;
1270} 1276}
1271 1277
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 7afeb80cc39e..d14464c42714 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -224,15 +224,15 @@ event_def: event_pmu |
224 event_bpf_file 224 event_bpf_file
225 225
226event_pmu: 226event_pmu:
227PE_NAME opt_event_config 227PE_NAME '/' event_config '/'
228{ 228{
229 struct list_head *list, *orig_terms, *terms; 229 struct list_head *list, *orig_terms, *terms;
230 230
231 if (parse_events_copy_term_list($2, &orig_terms)) 231 if (parse_events_copy_term_list($3, &orig_terms))
232 YYABORT; 232 YYABORT;
233 233
234 ALLOC_LIST(list); 234 ALLOC_LIST(list);
235 if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) { 235 if (parse_events_add_pmu(_parse_state, list, $1, $3, false)) {
236 struct perf_pmu *pmu = NULL; 236 struct perf_pmu *pmu = NULL;
237 int ok = 0; 237 int ok = 0;
238 char *pattern; 238 char *pattern;
@@ -262,7 +262,7 @@ PE_NAME opt_event_config
262 if (!ok) 262 if (!ok)
263 YYABORT; 263 YYABORT;
264 } 264 }
265 parse_events_terms__delete($2); 265 parse_events_terms__delete($3);
266 parse_events_terms__delete(orig_terms); 266 parse_events_terms__delete(orig_terms);
267 $$ = list; 267 $$ = list;
268} 268}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 064bdcb7bd78..d2fb597c9a8c 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -539,9 +539,10 @@ static bool pmu_is_uncore(const char *name)
539 539
540/* 540/*
541 * PMU CORE devices have different name other than cpu in sysfs on some 541 * PMU CORE devices have different name other than cpu in sysfs on some
542 * platforms. looking for possible sysfs files to identify as core device. 542 * platforms.
543 * Looking for possible sysfs files to identify the arm core device.
543 */ 544 */
544static int is_pmu_core(const char *name) 545static int is_arm_pmu_core(const char *name)
545{ 546{
546 struct stat st; 547 struct stat st;
547 char path[PATH_MAX]; 548 char path[PATH_MAX];
@@ -550,18 +551,18 @@ static int is_pmu_core(const char *name)
550 if (!sysfs) 551 if (!sysfs)
551 return 0; 552 return 0;
552 553
553 /* Look for cpu sysfs (x86 and others) */
554 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
555 if ((stat(path, &st) == 0) &&
556 (strncmp(name, "cpu", strlen("cpu")) == 0))
557 return 1;
558
559 /* Look for cpu sysfs (specific to arm) */ 554 /* Look for cpu sysfs (specific to arm) */
560 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus", 555 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
561 sysfs, name); 556 sysfs, name);
562 if (stat(path, &st) == 0) 557 if (stat(path, &st) == 0)
563 return 1; 558 return 1;
564 559
560 /* Look for cpu sysfs (specific to s390) */
561 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
562 sysfs, name);
563 if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
564 return 1;
565
565 return 0; 566 return 0;
566} 567}
567 568
@@ -580,7 +581,7 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
580 * cpuid string generated on this platform. 581 * cpuid string generated on this platform.
581 * Otherwise return non-zero. 582 * Otherwise return non-zero.
582 */ 583 */
583int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 584int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
584{ 585{
585 regex_t re; 586 regex_t re;
586 regmatch_t pmatch[1]; 587 regmatch_t pmatch[1];
@@ -662,6 +663,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
662 struct pmu_events_map *map; 663 struct pmu_events_map *map;
663 struct pmu_event *pe; 664 struct pmu_event *pe;
664 const char *name = pmu->name; 665 const char *name = pmu->name;
666 const char *pname;
665 667
666 map = perf_pmu__find_map(pmu); 668 map = perf_pmu__find_map(pmu);
667 if (!map) 669 if (!map)
@@ -680,11 +682,9 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
680 break; 682 break;
681 } 683 }
682 684
683 if (!is_pmu_core(name)) { 685 if (!is_arm_pmu_core(name)) {
684 /* check for uncore devices */ 686 pname = pe->pmu ? pe->pmu : "cpu";
685 if (pe->pmu == NULL) 687 if (strncmp(pname, name, strlen(pname)))
686 continue;
687 if (strncmp(pe->pmu, name, strlen(pe->pmu)))
688 continue; 688 continue;
689 } 689 }
690 690
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 62b2dd2253eb..1466814ebada 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void)
2091 2091
2092int symbol__annotation_init(void) 2092int symbol__annotation_init(void)
2093{ 2093{
2094 if (symbol_conf.init_annotation)
2095 return 0;
2096
2094 if (symbol_conf.initialized) { 2097 if (symbol_conf.initialized) {
2095 pr_err("Annotation needs to be init before symbol__init()\n"); 2098 pr_err("Annotation needs to be init before symbol__init()\n");
2096 return -1; 2099 return -1;
2097 } 2100 }
2098 2101
2099 if (symbol_conf.init_annotation) {
2100 pr_warning("Annotation being initialized multiple times\n");
2101 return 0;
2102 }
2103
2104 symbol_conf.priv_size += sizeof(struct annotation); 2102 symbol_conf.priv_size += sizeof(struct annotation);
2105 symbol_conf.init_annotation = true; 2103 symbol_conf.init_annotation = true;
2106 return 0; 2104 return 0;
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 895122d638dd..0ee7f568d60c 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -17,7 +17,7 @@
17#include <stdlib.h> 17#include <stdlib.h>
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19 19
20#ifdef HAVE_SYSCALL_TABLE 20#ifdef HAVE_SYSCALL_TABLE_SUPPORT
21#include <string.h> 21#include <string.h>
22#include "string2.h" 22#include "string2.h"
23#include "util.h" 23#include "util.h"
@@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
139 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); 139 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
140} 140}
141 141
142#else /* HAVE_SYSCALL_TABLE */ 142#else /* HAVE_SYSCALL_TABLE_SUPPORT */
143 143
144#include <libaudit.h> 144#include <libaudit.h>
145 145
@@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
176{ 176{
177 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); 177 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
178} 178}
179#endif /* HAVE_SYSCALL_TABLE */ 179#endif /* HAVE_SYSCALL_TABLE_SUPPORT */
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 0ac9077f62a2..b1e5c3a2b8e3 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
98 } 98 }
99} 99}
100 100
101#ifdef NO_LIBPYTHON 101#ifndef HAVE_LIBPYTHON_SUPPORT
102void setup_python_scripting(void) 102void setup_python_scripting(void)
103{ 103{
104 register_python_scripting(&python_scripting_unsupported_ops); 104 register_python_scripting(&python_scripting_unsupported_ops);
@@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
161 } 161 }
162} 162}
163 163
164#ifdef NO_LIBPERL 164#ifndef HAVE_LIBPERL_SUPPORT
165void setup_perl_scripting(void) 165void setup_perl_scripting(void)
166{ 166{
167 register_perl_scripting(&perl_scripting_unsupported_ops); 167 register_perl_scripting(&perl_scripting_unsupported_ops);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index cb166be4918d..4ea385be528f 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -138,6 +138,7 @@ static u32 handle[] = {
138}; 138};
139 139
140static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 140static unsigned long dimm_fail_cmd_flags[NUM_DCR];
141static int dimm_fail_cmd_code[NUM_DCR];
141 142
142struct nfit_test_fw { 143struct nfit_test_fw {
143 enum intel_fw_update_state state; 144 enum intel_fw_update_state state;
@@ -892,8 +893,11 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
892 if (i >= ARRAY_SIZE(handle)) 893 if (i >= ARRAY_SIZE(handle))
893 return -ENXIO; 894 return -ENXIO;
894 895
895 if ((1 << func) & dimm_fail_cmd_flags[i]) 896 if ((1 << func) & dimm_fail_cmd_flags[i]) {
897 if (dimm_fail_cmd_code[i])
898 return dimm_fail_cmd_code[i];
896 return -EIO; 899 return -EIO;
900 }
897 901
898 return i; 902 return i;
899} 903}
@@ -1162,12 +1166,12 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state)
1162 1166
1163static void put_dimms(void *data) 1167static void put_dimms(void *data)
1164{ 1168{
1165 struct device **dimm_dev = data; 1169 struct nfit_test *t = data;
1166 int i; 1170 int i;
1167 1171
1168 for (i = 0; i < NUM_DCR; i++) 1172 for (i = 0; i < t->num_dcr; i++)
1169 if (dimm_dev[i]) 1173 if (t->dimm_dev[i])
1170 device_unregister(dimm_dev[i]); 1174 device_unregister(t->dimm_dev[i]);
1171} 1175}
1172 1176
1173static struct class *nfit_test_dimm; 1177static struct class *nfit_test_dimm;
@@ -1176,13 +1180,11 @@ static int dimm_name_to_id(struct device *dev)
1176{ 1180{
1177 int dimm; 1181 int dimm;
1178 1182
1179 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 1183 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1)
1180 || dimm >= NUM_DCR || dimm < 0)
1181 return -ENXIO; 1184 return -ENXIO;
1182 return dimm; 1185 return dimm;
1183} 1186}
1184 1187
1185
1186static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 1188static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
1187 char *buf) 1189 char *buf)
1188{ 1190{
@@ -1191,7 +1193,7 @@ static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
1191 if (dimm < 0) 1193 if (dimm < 0)
1192 return dimm; 1194 return dimm;
1193 1195
1194 return sprintf(buf, "%#x", handle[dimm]); 1196 return sprintf(buf, "%#x\n", handle[dimm]);
1195} 1197}
1196DEVICE_ATTR_RO(handle); 1198DEVICE_ATTR_RO(handle);
1197 1199
@@ -1225,8 +1227,39 @@ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
1225} 1227}
1226static DEVICE_ATTR_RW(fail_cmd); 1228static DEVICE_ATTR_RW(fail_cmd);
1227 1229
1230static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
1231 char *buf)
1232{
1233 int dimm = dimm_name_to_id(dev);
1234
1235 if (dimm < 0)
1236 return dimm;
1237
1238 return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]);
1239}
1240
1241static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
1242 const char *buf, size_t size)
1243{
1244 int dimm = dimm_name_to_id(dev);
1245 unsigned long val;
1246 ssize_t rc;
1247
1248 if (dimm < 0)
1249 return dimm;
1250
1251 rc = kstrtol(buf, 0, &val);
1252 if (rc)
1253 return rc;
1254
1255 dimm_fail_cmd_code[dimm] = val;
1256 return size;
1257}
1258static DEVICE_ATTR_RW(fail_cmd_code);
1259
1228static struct attribute *nfit_test_dimm_attributes[] = { 1260static struct attribute *nfit_test_dimm_attributes[] = {
1229 &dev_attr_fail_cmd.attr, 1261 &dev_attr_fail_cmd.attr,
1262 &dev_attr_fail_cmd_code.attr,
1230 &dev_attr_handle.attr, 1263 &dev_attr_handle.attr,
1231 NULL, 1264 NULL,
1232}; 1265};
@@ -1240,6 +1273,23 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
1240 NULL, 1273 NULL,
1241}; 1274};
1242 1275
1276static int nfit_test_dimm_init(struct nfit_test *t)
1277{
1278 int i;
1279
1280 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
1281 return -ENOMEM;
1282 for (i = 0; i < t->num_dcr; i++) {
1283 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
1284 &t->pdev.dev, 0, NULL,
1285 nfit_test_dimm_attribute_groups,
1286 "test_dimm%d", i + t->dcr_idx);
1287 if (!t->dimm_dev[i])
1288 return -ENOMEM;
1289 }
1290 return 0;
1291}
1292
1243static void smart_init(struct nfit_test *t) 1293static void smart_init(struct nfit_test *t)
1244{ 1294{
1245 int i; 1295 int i;
@@ -1335,17 +1385,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
1335 if (!t->_fit) 1385 if (!t->_fit)
1336 return -ENOMEM; 1386 return -ENOMEM;
1337 1387
1338 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) 1388 if (nfit_test_dimm_init(t))
1339 return -ENOMEM; 1389 return -ENOMEM;
1340 for (i = 0; i < NUM_DCR; i++) {
1341 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
1342 &t->pdev.dev, 0, NULL,
1343 nfit_test_dimm_attribute_groups,
1344 "test_dimm%d", i);
1345 if (!t->dimm_dev[i])
1346 return -ENOMEM;
1347 }
1348
1349 smart_init(t); 1390 smart_init(t);
1350 return ars_state_init(&t->pdev.dev, &t->ars_state); 1391 return ars_state_init(&t->pdev.dev, &t->ars_state);
1351} 1392}
@@ -1377,6 +1418,8 @@ static int nfit_test1_alloc(struct nfit_test *t)
1377 if (!t->spa_set[1]) 1418 if (!t->spa_set[1])
1378 return -ENOMEM; 1419 return -ENOMEM;
1379 1420
1421 if (nfit_test_dimm_init(t))
1422 return -ENOMEM;
1380 smart_init(t); 1423 smart_init(t);
1381 return ars_state_init(&t->pdev.dev, &t->ars_state); 1424 return ars_state_init(&t->pdev.dev, &t->ars_state);
1382} 1425}
@@ -2222,6 +2265,9 @@ static void nfit_test1_setup(struct nfit_test *t)
2222 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2265 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
2223 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2266 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
2224 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2267 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
2268 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
2269 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2270 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
2225} 2271}
2226 2272
2227static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 2273static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 9cf83f895d98..5e1ab2f0eb79 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -12,3 +12,6 @@ test_tcpbpf_user
12test_verifier_log 12test_verifier_log
13feature 13feature
14test_libbpf_open 14test_libbpf_open
15test_sock
16test_sock_addr
17urandom_read
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 73bb20cfb9b7..f4d99fabc56d 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_rlimit.h"
16 17
17#ifndef ARRAY_SIZE 18#ifndef ARRAY_SIZE
18# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 19# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index d488f20926e8..2950f80ba7fb 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -15,6 +15,7 @@
15#include <bpf/libbpf.h> 15#include <bpf/libbpf.h>
16 16
17#include "cgroup_helpers.h" 17#include "cgroup_helpers.h"
18#include "bpf_rlimit.h"
18 19
19#define CG_PATH "/foo" 20#define CG_PATH "/foo"
20#define CONNECT4_PROG_PATH "./connect4_prog.o" 21#define CONNECT4_PROG_PATH "./connect4_prog.o"
diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh
index c6e1dcf992c4..9832a875a828 100755
--- a/tools/testing/selftests/bpf/test_sock_addr.sh
+++ b/tools/testing/selftests/bpf/test_sock_addr.sh
@@ -4,7 +4,7 @@ set -eu
4 4
5ping_once() 5ping_once()
6{ 6{
7 ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1 7 ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
8} 8}
9 9
10wait_for_ip() 10wait_for_ip()
@@ -13,7 +13,7 @@ wait_for_ip()
13 echo -n "Wait for testing IPv4/IPv6 to become available " 13 echo -n "Wait for testing IPv4/IPv6 to become available "
14 for _i in $(seq ${MAX_PING_TRIES}); do 14 for _i in $(seq ${MAX_PING_TRIES}); do
15 echo -n "." 15 echo -n "."
16 if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then 16 if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then
17 echo " OK" 17 echo " OK"
18 return 18 return
19 fi 19 fi
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
index 4e6d09fb166f..5c7d7001ad37 100644
--- a/tools/testing/selftests/filesystems/Makefile
+++ b/tools/testing/selftests/filesystems/Makefile
@@ -1,8 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TEST_PROGS := dnotify_test devpts_pts
3all: $(TEST_PROGS)
4 2
5include ../lib.mk 3TEST_GEN_PROGS := devpts_pts
4TEST_GEN_PROGS_EXTENDED := dnotify_test
6 5
7clean: 6include ../lib.mk
8 rm -fr $(TEST_PROGS)
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 826f38d5dd19..261c81f08606 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -4,6 +4,7 @@
4all: 4all:
5 5
6TEST_PROGS := fw_run_tests.sh 6TEST_PROGS := fw_run_tests.sh
7TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
7 8
8include ../lib.mk 9include ../lib.mk
9 10
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 9ea31b57d71a..962d7f4ac627 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -154,11 +154,13 @@ test_finish()
154 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then 154 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
155 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout 155 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
156 fi 156 fi
157 if [ "$OLD_FWPATH" = "" ]; then
158 OLD_FWPATH=" "
159 fi
160 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then 157 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then
161 echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path 158 if [ "$OLD_FWPATH" = "" ]; then
159 # A zero-length write won't work; write a null byte
160 printf '\000' >/sys/module/firmware_class/parameters/path
161 else
162 echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
163 fi
162 fi 164 fi
163 if [ -f $FW ]; then 165 if [ -f $FW ]; then
164 rm -f "$FW" 166 rm -f "$FW"
diff --git a/tools/testing/selftests/firmware/fw_run_tests.sh b/tools/testing/selftests/firmware/fw_run_tests.sh
index 06d638e9dc62..cffdd4eb0a57 100755
--- a/tools/testing/selftests/firmware/fw_run_tests.sh
+++ b/tools/testing/selftests/firmware/fw_run_tests.sh
@@ -66,5 +66,5 @@ if [ -f $FW_FORCE_SYSFS_FALLBACK ]; then
66 run_test_config_0003 66 run_test_config_0003
67else 67else
68 echo "Running basic kernel configuration, working with your config" 68 echo "Running basic kernel configuration, working with your config"
69 run_test 69 run_tests
70fi 70fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
index 786dce7e48be..2aabab363cfb 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
@@ -29,7 +29,7 @@ do_reset
29 29
30echo "Test extended error support" 30echo "Test extended error support"
31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger 31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
32echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger &>/dev/null 32! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then 33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
34 fail "Failed to generate extended error in histogram" 34 fail "Failed to generate extended error in histogram"
35fi 35fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
new file mode 100644
index 000000000000..c193dce611a2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -0,0 +1,44 @@
1#!/bin/sh
2# description: event trigger - test multiple actions on hist trigger
3
4
5do_reset() {
6 reset_trigger
7 echo > set_event
8 clear_trace
9}
10
11fail() { #msg
12 do_reset
13 echo $1
14 exit_fail
15}
16
17if [ ! -f set_event ]; then
18 echo "event tracing is not supported"
19 exit_unsupported
20fi
21
22if [ ! -f synthetic_events ]; then
23 echo "synthetic event is not supported"
24 exit_unsupported
25fi
26
27clear_synthetic_events
28reset_tracer
29do_reset
30
31echo "Test multiple actions on hist trigger"
32echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
33TRIGGER1=events/sched/sched_wakeup/trigger
34TRIGGER2=events/sched/sched_switch/trigger
35
36echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
37echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
38echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
39echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
40echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
41
42do_reset
43
44exit 0
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index dc44de904797..2ddcc96ae456 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -4,17 +4,18 @@ top_srcdir = ../../../../
4UNAME_M := $(shell uname -m) 4UNAME_M := $(shell uname -m)
5 5
6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
7LIBKVM_x86_64 = lib/x86.c 7LIBKVM_x86_64 = lib/x86.c lib/vmx.c
8 8
9TEST_GEN_PROGS_x86_64 = set_sregs_test 9TEST_GEN_PROGS_x86_64 = set_sregs_test
10TEST_GEN_PROGS_x86_64 += sync_regs_test 10TEST_GEN_PROGS_x86_64 += sync_regs_test
11TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
11 12
12TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) 13TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
13LIBKVM += $(LIBKVM_$(UNAME_M)) 14LIBKVM += $(LIBKVM_$(UNAME_M))
14 15
15INSTALL_HDR_PATH = $(top_srcdir)/usr 16INSTALL_HDR_PATH = $(top_srcdir)/usr
16LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 17LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
17CFLAGS += -O2 -g -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) 18CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D)
18 19
19# After inclusion, $(OUTPUT) is defined and 20# After inclusion, $(OUTPUT) is defined and
20# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 21# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 57974ad46373..637b7017b6ee 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -112,24 +112,27 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
112vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, 112vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
113 vm_paddr_t paddr_min, uint32_t memslot); 113 vm_paddr_t paddr_min, uint32_t memslot);
114 114
115void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); 115struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
116void vcpu_set_cpuid( 116void vcpu_set_cpuid(
117 struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); 117 struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
118 118
119struct kvm_cpuid2 *allocate_kvm_cpuid2(void);
120struct kvm_cpuid_entry2 * 119struct kvm_cpuid_entry2 *
121find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, 120kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
122 uint32_t index);
123 121
124static inline struct kvm_cpuid_entry2 * 122static inline struct kvm_cpuid_entry2 *
125find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) 123kvm_get_supported_cpuid_entry(uint32_t function)
126{ 124{
127 return find_cpuid_index_entry(cpuid, function, 0); 125 return kvm_get_supported_cpuid_index(function, 0);
128} 126}
129 127
130struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); 128struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code);
131void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); 129void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
132 130
131typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
132 vm_paddr_t vmxon_paddr,
133 vm_vaddr_t vmcs_vaddr,
134 vm_paddr_t vmcs_paddr);
135
133struct kvm_userspace_memory_region * 136struct kvm_userspace_memory_region *
134kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, 137kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
135 uint64_t end); 138 uint64_t end);
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h
new file mode 100644
index 000000000000..6ed8499807fd
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/vmx.h
@@ -0,0 +1,494 @@
1/*
2 * tools/testing/selftests/kvm/include/vmx.h
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 *
8 */
9
10#ifndef SELFTEST_KVM_VMX_H
11#define SELFTEST_KVM_VMX_H
12
13#include <stdint.h>
14#include "x86.h"
15
16#define CPUID_VMX_BIT 5
17
18#define CPUID_VMX (1 << 5)
19
20/*
21 * Definitions of Primary Processor-Based VM-Execution Controls.
22 */
23#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
24#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
25#define CPU_BASED_HLT_EXITING 0x00000080
26#define CPU_BASED_INVLPG_EXITING 0x00000200
27#define CPU_BASED_MWAIT_EXITING 0x00000400
28#define CPU_BASED_RDPMC_EXITING 0x00000800
29#define CPU_BASED_RDTSC_EXITING 0x00001000
30#define CPU_BASED_CR3_LOAD_EXITING 0x00008000
31#define CPU_BASED_CR3_STORE_EXITING 0x00010000
32#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
33#define CPU_BASED_CR8_STORE_EXITING 0x00100000
34#define CPU_BASED_TPR_SHADOW 0x00200000
35#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
36#define CPU_BASED_MOV_DR_EXITING 0x00800000
37#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
38#define CPU_BASED_USE_IO_BITMAPS 0x02000000
39#define CPU_BASED_MONITOR_TRAP 0x08000000
40#define CPU_BASED_USE_MSR_BITMAPS 0x10000000
41#define CPU_BASED_MONITOR_EXITING 0x20000000
42#define CPU_BASED_PAUSE_EXITING 0x40000000
43#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
44
45#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
46
47/*
48 * Definitions of Secondary Processor-Based VM-Execution Controls.
49 */
50#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
51#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
52#define SECONDARY_EXEC_DESC 0x00000004
53#define SECONDARY_EXEC_RDTSCP 0x00000008
54#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
55#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
56#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
57#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
58#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
59#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
60#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
61#define SECONDARY_EXEC_RDRAND_EXITING 0x00000800
62#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
63#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
64#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
65#define SECONDARY_EXEC_RDSEED_EXITING 0x00010000
66#define SECONDARY_EXEC_ENABLE_PML 0x00020000
67#define SECONDARY_EPT_VE 0x00040000
68#define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000
69#define SECONDARY_EXEC_TSC_SCALING 0x02000000
70
71#define PIN_BASED_EXT_INTR_MASK 0x00000001
72#define PIN_BASED_NMI_EXITING 0x00000008
73#define PIN_BASED_VIRTUAL_NMIS 0x00000020
74#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
75#define PIN_BASED_POSTED_INTR 0x00000080
76
77#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
78
79#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
80#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
81#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
82#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
83#define VM_EXIT_SAVE_IA32_PAT 0x00040000
84#define VM_EXIT_LOAD_IA32_PAT 0x00080000
85#define VM_EXIT_SAVE_IA32_EFER 0x00100000
86#define VM_EXIT_LOAD_IA32_EFER 0x00200000
87#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
88
89#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
90
91#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
92#define VM_ENTRY_IA32E_MODE 0x00000200
93#define VM_ENTRY_SMM 0x00000400
94#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
95#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
96#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
97#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
98
99#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
100
101#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
102#define VMX_MISC_SAVE_EFER_LMA 0x00000020
103
104#define EXIT_REASON_FAILED_VMENTRY 0x80000000
105#define EXIT_REASON_EXCEPTION_NMI 0
106#define EXIT_REASON_EXTERNAL_INTERRUPT 1
107#define EXIT_REASON_TRIPLE_FAULT 2
108#define EXIT_REASON_PENDING_INTERRUPT 7
109#define EXIT_REASON_NMI_WINDOW 8
110#define EXIT_REASON_TASK_SWITCH 9
111#define EXIT_REASON_CPUID 10
112#define EXIT_REASON_HLT 12
113#define EXIT_REASON_INVD 13
114#define EXIT_REASON_INVLPG 14
115#define EXIT_REASON_RDPMC 15
116#define EXIT_REASON_RDTSC 16
117#define EXIT_REASON_VMCALL 18
118#define EXIT_REASON_VMCLEAR 19
119#define EXIT_REASON_VMLAUNCH 20
120#define EXIT_REASON_VMPTRLD 21
121#define EXIT_REASON_VMPTRST 22
122#define EXIT_REASON_VMREAD 23
123#define EXIT_REASON_VMRESUME 24
124#define EXIT_REASON_VMWRITE 25
125#define EXIT_REASON_VMOFF 26
126#define EXIT_REASON_VMON 27
127#define EXIT_REASON_CR_ACCESS 28
128#define EXIT_REASON_DR_ACCESS 29
129#define EXIT_REASON_IO_INSTRUCTION 30
130#define EXIT_REASON_MSR_READ 31
131#define EXIT_REASON_MSR_WRITE 32
132#define EXIT_REASON_INVALID_STATE 33
133#define EXIT_REASON_MWAIT_INSTRUCTION 36
134#define EXIT_REASON_MONITOR_INSTRUCTION 39
135#define EXIT_REASON_PAUSE_INSTRUCTION 40
136#define EXIT_REASON_MCE_DURING_VMENTRY 41
137#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
138#define EXIT_REASON_APIC_ACCESS 44
139#define EXIT_REASON_EOI_INDUCED 45
140#define EXIT_REASON_EPT_VIOLATION 48
141#define EXIT_REASON_EPT_MISCONFIG 49
142#define EXIT_REASON_INVEPT 50
143#define EXIT_REASON_RDTSCP 51
144#define EXIT_REASON_PREEMPTION_TIMER 52
145#define EXIT_REASON_INVVPID 53
146#define EXIT_REASON_WBINVD 54
147#define EXIT_REASON_XSETBV 55
148#define EXIT_REASON_APIC_WRITE 56
149#define EXIT_REASON_INVPCID 58
150#define EXIT_REASON_PML_FULL 62
151#define EXIT_REASON_XSAVES 63
152#define EXIT_REASON_XRSTORS 64
153#define LAST_EXIT_REASON 64
154
155enum vmcs_field {
156 VIRTUAL_PROCESSOR_ID = 0x00000000,
157 POSTED_INTR_NV = 0x00000002,
158 GUEST_ES_SELECTOR = 0x00000800,
159 GUEST_CS_SELECTOR = 0x00000802,
160 GUEST_SS_SELECTOR = 0x00000804,
161 GUEST_DS_SELECTOR = 0x00000806,
162 GUEST_FS_SELECTOR = 0x00000808,
163 GUEST_GS_SELECTOR = 0x0000080a,
164 GUEST_LDTR_SELECTOR = 0x0000080c,
165 GUEST_TR_SELECTOR = 0x0000080e,
166 GUEST_INTR_STATUS = 0x00000810,
167 GUEST_PML_INDEX = 0x00000812,
168 HOST_ES_SELECTOR = 0x00000c00,
169 HOST_CS_SELECTOR = 0x00000c02,
170 HOST_SS_SELECTOR = 0x00000c04,
171 HOST_DS_SELECTOR = 0x00000c06,
172 HOST_FS_SELECTOR = 0x00000c08,
173 HOST_GS_SELECTOR = 0x00000c0a,
174 HOST_TR_SELECTOR = 0x00000c0c,
175 IO_BITMAP_A = 0x00002000,
176 IO_BITMAP_A_HIGH = 0x00002001,
177 IO_BITMAP_B = 0x00002002,
178 IO_BITMAP_B_HIGH = 0x00002003,
179 MSR_BITMAP = 0x00002004,
180 MSR_BITMAP_HIGH = 0x00002005,
181 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
182 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
183 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
184 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
185 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
186 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
187 PML_ADDRESS = 0x0000200e,
188 PML_ADDRESS_HIGH = 0x0000200f,
189 TSC_OFFSET = 0x00002010,
190 TSC_OFFSET_HIGH = 0x00002011,
191 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
192 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
193 APIC_ACCESS_ADDR = 0x00002014,
194 APIC_ACCESS_ADDR_HIGH = 0x00002015,
195 POSTED_INTR_DESC_ADDR = 0x00002016,
196 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
197 EPT_POINTER = 0x0000201a,
198 EPT_POINTER_HIGH = 0x0000201b,
199 EOI_EXIT_BITMAP0 = 0x0000201c,
200 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
201 EOI_EXIT_BITMAP1 = 0x0000201e,
202 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
203 EOI_EXIT_BITMAP2 = 0x00002020,
204 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
205 EOI_EXIT_BITMAP3 = 0x00002022,
206 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
207 VMREAD_BITMAP = 0x00002026,
208 VMREAD_BITMAP_HIGH = 0x00002027,
209 VMWRITE_BITMAP = 0x00002028,
210 VMWRITE_BITMAP_HIGH = 0x00002029,
211 XSS_EXIT_BITMAP = 0x0000202C,
212 XSS_EXIT_BITMAP_HIGH = 0x0000202D,
213 TSC_MULTIPLIER = 0x00002032,
214 TSC_MULTIPLIER_HIGH = 0x00002033,
215 GUEST_PHYSICAL_ADDRESS = 0x00002400,
216 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
217 VMCS_LINK_POINTER = 0x00002800,
218 VMCS_LINK_POINTER_HIGH = 0x00002801,
219 GUEST_IA32_DEBUGCTL = 0x00002802,
220 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
221 GUEST_IA32_PAT = 0x00002804,
222 GUEST_IA32_PAT_HIGH = 0x00002805,
223 GUEST_IA32_EFER = 0x00002806,
224 GUEST_IA32_EFER_HIGH = 0x00002807,
225 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
226 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
227 GUEST_PDPTR0 = 0x0000280a,
228 GUEST_PDPTR0_HIGH = 0x0000280b,
229 GUEST_PDPTR1 = 0x0000280c,
230 GUEST_PDPTR1_HIGH = 0x0000280d,
231 GUEST_PDPTR2 = 0x0000280e,
232 GUEST_PDPTR2_HIGH = 0x0000280f,
233 GUEST_PDPTR3 = 0x00002810,
234 GUEST_PDPTR3_HIGH = 0x00002811,
235 GUEST_BNDCFGS = 0x00002812,
236 GUEST_BNDCFGS_HIGH = 0x00002813,
237 HOST_IA32_PAT = 0x00002c00,
238 HOST_IA32_PAT_HIGH = 0x00002c01,
239 HOST_IA32_EFER = 0x00002c02,
240 HOST_IA32_EFER_HIGH = 0x00002c03,
241 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
242 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
243 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
244 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
245 EXCEPTION_BITMAP = 0x00004004,
246 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
247 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
248 CR3_TARGET_COUNT = 0x0000400a,
249 VM_EXIT_CONTROLS = 0x0000400c,
250 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
251 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
252 VM_ENTRY_CONTROLS = 0x00004012,
253 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
254 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
255 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
256 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
257 TPR_THRESHOLD = 0x0000401c,
258 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
259 PLE_GAP = 0x00004020,
260 PLE_WINDOW = 0x00004022,
261 VM_INSTRUCTION_ERROR = 0x00004400,
262 VM_EXIT_REASON = 0x00004402,
263 VM_EXIT_INTR_INFO = 0x00004404,
264 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
265 IDT_VECTORING_INFO_FIELD = 0x00004408,
266 IDT_VECTORING_ERROR_CODE = 0x0000440a,
267 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
268 VMX_INSTRUCTION_INFO = 0x0000440e,
269 GUEST_ES_LIMIT = 0x00004800,
270 GUEST_CS_LIMIT = 0x00004802,
271 GUEST_SS_LIMIT = 0x00004804,
272 GUEST_DS_LIMIT = 0x00004806,
273 GUEST_FS_LIMIT = 0x00004808,
274 GUEST_GS_LIMIT = 0x0000480a,
275 GUEST_LDTR_LIMIT = 0x0000480c,
276 GUEST_TR_LIMIT = 0x0000480e,
277 GUEST_GDTR_LIMIT = 0x00004810,
278 GUEST_IDTR_LIMIT = 0x00004812,
279 GUEST_ES_AR_BYTES = 0x00004814,
280 GUEST_CS_AR_BYTES = 0x00004816,
281 GUEST_SS_AR_BYTES = 0x00004818,
282 GUEST_DS_AR_BYTES = 0x0000481a,
283 GUEST_FS_AR_BYTES = 0x0000481c,
284 GUEST_GS_AR_BYTES = 0x0000481e,
285 GUEST_LDTR_AR_BYTES = 0x00004820,
286 GUEST_TR_AR_BYTES = 0x00004822,
287 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
288 GUEST_ACTIVITY_STATE = 0X00004826,
289 GUEST_SYSENTER_CS = 0x0000482A,
290 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
291 HOST_IA32_SYSENTER_CS = 0x00004c00,
292 CR0_GUEST_HOST_MASK = 0x00006000,
293 CR4_GUEST_HOST_MASK = 0x00006002,
294 CR0_READ_SHADOW = 0x00006004,
295 CR4_READ_SHADOW = 0x00006006,
296 CR3_TARGET_VALUE0 = 0x00006008,
297 CR3_TARGET_VALUE1 = 0x0000600a,
298 CR3_TARGET_VALUE2 = 0x0000600c,
299 CR3_TARGET_VALUE3 = 0x0000600e,
300 EXIT_QUALIFICATION = 0x00006400,
301 GUEST_LINEAR_ADDRESS = 0x0000640a,
302 GUEST_CR0 = 0x00006800,
303 GUEST_CR3 = 0x00006802,
304 GUEST_CR4 = 0x00006804,
305 GUEST_ES_BASE = 0x00006806,
306 GUEST_CS_BASE = 0x00006808,
307 GUEST_SS_BASE = 0x0000680a,
308 GUEST_DS_BASE = 0x0000680c,
309 GUEST_FS_BASE = 0x0000680e,
310 GUEST_GS_BASE = 0x00006810,
311 GUEST_LDTR_BASE = 0x00006812,
312 GUEST_TR_BASE = 0x00006814,
313 GUEST_GDTR_BASE = 0x00006816,
314 GUEST_IDTR_BASE = 0x00006818,
315 GUEST_DR7 = 0x0000681a,
316 GUEST_RSP = 0x0000681c,
317 GUEST_RIP = 0x0000681e,
318 GUEST_RFLAGS = 0x00006820,
319 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
320 GUEST_SYSENTER_ESP = 0x00006824,
321 GUEST_SYSENTER_EIP = 0x00006826,
322 HOST_CR0 = 0x00006c00,
323 HOST_CR3 = 0x00006c02,
324 HOST_CR4 = 0x00006c04,
325 HOST_FS_BASE = 0x00006c06,
326 HOST_GS_BASE = 0x00006c08,
327 HOST_TR_BASE = 0x00006c0a,
328 HOST_GDTR_BASE = 0x00006c0c,
329 HOST_IDTR_BASE = 0x00006c0e,
330 HOST_IA32_SYSENTER_ESP = 0x00006c10,
331 HOST_IA32_SYSENTER_EIP = 0x00006c12,
332 HOST_RSP = 0x00006c14,
333 HOST_RIP = 0x00006c16,
334};
335
336struct vmx_msr_entry {
337 uint32_t index;
338 uint32_t reserved;
339 uint64_t value;
340} __attribute__ ((aligned(16)));
341
342static inline int vmxon(uint64_t phys)
343{
344 uint8_t ret;
345
346 __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
347 : [ret]"=rm"(ret)
348 : [pa]"m"(phys)
349 : "cc", "memory");
350
351 return ret;
352}
353
354static inline void vmxoff(void)
355{
356 __asm__ __volatile__("vmxoff");
357}
358
359static inline int vmclear(uint64_t vmcs_pa)
360{
361 uint8_t ret;
362
363 __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
364 : [ret]"=rm"(ret)
365 : [pa]"m"(vmcs_pa)
366 : "cc", "memory");
367
368 return ret;
369}
370
371static inline int vmptrld(uint64_t vmcs_pa)
372{
373 uint8_t ret;
374
375 __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
376 : [ret]"=rm"(ret)
377 : [pa]"m"(vmcs_pa)
378 : "cc", "memory");
379
380 return ret;
381}
382
383/*
384 * No guest state (e.g. GPRs) is established by this vmlaunch.
385 */
386static inline int vmlaunch(void)
387{
388 int ret;
389
390 __asm__ __volatile__("push %%rbp;"
391 "push %%rcx;"
392 "push %%rdx;"
393 "push %%rsi;"
394 "push %%rdi;"
395 "push $0;"
396 "vmwrite %%rsp, %[host_rsp];"
397 "lea 1f(%%rip), %%rax;"
398 "vmwrite %%rax, %[host_rip];"
399 "vmlaunch;"
400 "incq (%%rsp);"
401 "1: pop %%rax;"
402 "pop %%rdi;"
403 "pop %%rsi;"
404 "pop %%rdx;"
405 "pop %%rcx;"
406 "pop %%rbp;"
407 : [ret]"=&a"(ret)
408 : [host_rsp]"r"((uint64_t)HOST_RSP),
409 [host_rip]"r"((uint64_t)HOST_RIP)
410 : "memory", "cc", "rbx", "r8", "r9", "r10",
411 "r11", "r12", "r13", "r14", "r15");
412 return ret;
413}
414
415/*
416 * No guest state (e.g. GPRs) is established by this vmresume.
417 */
418static inline int vmresume(void)
419{
420 int ret;
421
422 __asm__ __volatile__("push %%rbp;"
423 "push %%rcx;"
424 "push %%rdx;"
425 "push %%rsi;"
426 "push %%rdi;"
427 "push $0;"
428 "vmwrite %%rsp, %[host_rsp];"
429 "lea 1f(%%rip), %%rax;"
430 "vmwrite %%rax, %[host_rip];"
431 "vmresume;"
432 "incq (%%rsp);"
433 "1: pop %%rax;"
434 "pop %%rdi;"
435 "pop %%rsi;"
436 "pop %%rdx;"
437 "pop %%rcx;"
438 "pop %%rbp;"
439 : [ret]"=&a"(ret)
440 : [host_rsp]"r"((uint64_t)HOST_RSP),
441 [host_rip]"r"((uint64_t)HOST_RIP)
442 : "memory", "cc", "rbx", "r8", "r9", "r10",
443 "r11", "r12", "r13", "r14", "r15");
444 return ret;
445}
446
447static inline int vmread(uint64_t encoding, uint64_t *value)
448{
449 uint64_t tmp;
450 uint8_t ret;
451
452 __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
453 : [value]"=rm"(tmp), [ret]"=rm"(ret)
454 : [encoding]"r"(encoding)
455 : "cc", "memory");
456
457 *value = tmp;
458 return ret;
459}
460
461/*
462 * A wrapper around vmread that ignores errors and returns zero if the
463 * vmread instruction fails.
464 */
465static inline uint64_t vmreadz(uint64_t encoding)
466{
467 uint64_t value = 0;
468 vmread(encoding, &value);
469 return value;
470}
471
472static inline int vmwrite(uint64_t encoding, uint64_t value)
473{
474 uint8_t ret;
475
476 __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
477 : [ret]"=rm"(ret)
478 : [value]"rm"(value), [encoding]"r"(encoding)
479 : "cc", "memory");
480
481 return ret;
482}
483
484static inline uint32_t vmcs_revision(void)
485{
486 return rdmsr(MSR_IA32_VMX_BASIC);
487}
488
489void prepare_for_vmx_operation(void);
490void prepare_vmcs(void *guest_rip, void *guest_rsp);
491struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid,
492 vmx_guest_code_t guest_code);
493
494#endif /* !SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 7ca1bb40c498..2cedfda181d4 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -378,7 +378,7 @@ int kvm_memcmp_hva_gva(void *hva,
378 * complicated. This function uses a reasonable default length for 378 * complicated. This function uses a reasonable default length for
379 * the array and performs the appropriate allocation. 379 * the array and performs the appropriate allocation.
380 */ 380 */
381struct kvm_cpuid2 *allocate_kvm_cpuid2(void) 381static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
382{ 382{
383 struct kvm_cpuid2 *cpuid; 383 struct kvm_cpuid2 *cpuid;
384 int nent = 100; 384 int nent = 100;
@@ -402,17 +402,21 @@ struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
402 * Input Args: None 402 * Input Args: None
403 * 403 *
404 * Output Args: 404 * Output Args:
405 * cpuid - The supported KVM CPUID
406 * 405 *
407 * Return: void 406 * Return: The supported KVM CPUID
408 * 407 *
409 * Get the guest CPUID supported by KVM. 408 * Get the guest CPUID supported by KVM.
410 */ 409 */
411void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) 410struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
412{ 411{
412 static struct kvm_cpuid2 *cpuid;
413 int ret; 413 int ret;
414 int kvm_fd; 414 int kvm_fd;
415 415
416 if (cpuid)
417 return cpuid;
418
419 cpuid = allocate_kvm_cpuid2();
416 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 420 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
417 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", 421 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i",
418 KVM_DEV_PATH, kvm_fd, errno); 422 KVM_DEV_PATH, kvm_fd, errno);
@@ -422,6 +426,7 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid)
422 ret, errno); 426 ret, errno);
423 427
424 close(kvm_fd); 428 close(kvm_fd);
429 return cpuid;
425} 430}
426 431
427/* Locate a cpuid entry. 432/* Locate a cpuid entry.
@@ -435,12 +440,13 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid)
435 * Return: A pointer to the cpuid entry. Never returns NULL. 440 * Return: A pointer to the cpuid entry. Never returns NULL.
436 */ 441 */
437struct kvm_cpuid_entry2 * 442struct kvm_cpuid_entry2 *
438find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, 443kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
439 uint32_t index)
440{ 444{
445 struct kvm_cpuid2 *cpuid;
441 struct kvm_cpuid_entry2 *entry = NULL; 446 struct kvm_cpuid_entry2 *entry = NULL;
442 int i; 447 int i;
443 448
449 cpuid = kvm_get_supported_cpuid();
444 for (i = 0; i < cpuid->nent; i++) { 450 for (i = 0; i < cpuid->nent; i++) {
445 if (cpuid->entries[i].function == function && 451 if (cpuid->entries[i].function == function &&
446 cpuid->entries[i].index == index) { 452 cpuid->entries[i].index == index) {
@@ -1435,7 +1441,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
1435 sparsebit_idx_t pg; 1441 sparsebit_idx_t pg;
1436 1442
1437 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1443 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1438 "not divisable by page size.\n" 1444 "not divisible by page size.\n"
1439 " paddr_min: 0x%lx page_size: 0x%x", 1445 " paddr_min: 0x%lx page_size: 0x%x",
1440 paddr_min, vm->page_size); 1446 paddr_min, vm->page_size);
1441 1447
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index 0c5cf3e0cb6f..b132bc95d183 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -121,7 +121,7 @@
121 * avoided by moving the setting of the nodes mask bits into 121 * avoided by moving the setting of the nodes mask bits into
122 * the previous nodes num_after setting. 122 * the previous nodes num_after setting.
123 * 123 *
124 * + Node starting index is evenly divisable by the number of bits 124 * + Node starting index is evenly divisible by the number of bits
125 * within a nodes mask member. 125 * within a nodes mask member.
126 * 126 *
127 * + Nodes never represent a range of bits that wrap around the 127 * + Nodes never represent a range of bits that wrap around the
@@ -1741,7 +1741,7 @@ void sparsebit_validate_internal(struct sparsebit *s)
1741 1741
1742 /* Validate node index is divisible by the mask size */ 1742 /* Validate node index is divisible by the mask size */
1743 if (nodep->idx % MASK_BITS) { 1743 if (nodep->idx % MASK_BITS) {
1744 fprintf(stderr, "Node index not divisable by " 1744 fprintf(stderr, "Node index not divisible by "
1745 "mask size,\n" 1745 "mask size,\n"
1746 " nodep: %p nodep->idx: 0x%lx " 1746 " nodep: %p nodep->idx: 0x%lx "
1747 "MASK_BITS: %lu\n", 1747 "MASK_BITS: %lu\n",
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/vmx.c
new file mode 100644
index 000000000000..0231bc0aae7b
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/vmx.c
@@ -0,0 +1,243 @@
1/*
2 * tools/testing/selftests/kvm/lib/x86.c
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
8
9#define _GNU_SOURCE /* for program_invocation_name */
10
11#include "test_util.h"
12#include "kvm_util.h"
13#include "x86.h"
14#include "vmx.h"
15
16/* Create a default VM for VMX tests.
17 *
18 * Input Args:
19 * vcpuid - The id of the single VCPU to add to the VM.
20 * guest_code - The vCPU's entry point
21 *
22 * Output Args: None
23 *
24 * Return:
25 * Pointer to opaque structure that describes the created VM.
26 */
27struct kvm_vm *
28vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code)
29{
30 struct kvm_cpuid2 *cpuid;
31 struct kvm_vm *vm;
32 vm_vaddr_t vmxon_vaddr;
33 vm_paddr_t vmxon_paddr;
34 vm_vaddr_t vmcs_vaddr;
35 vm_paddr_t vmcs_paddr;
36
37 vm = vm_create_default(vcpuid, (void *) guest_code);
38
39 /* Enable nesting in CPUID */
40 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
41
42 /* Setup of a region of guest memory for the vmxon region. */
43 vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0);
44 vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr);
45
46 /* Setup of a region of guest memory for a vmcs. */
47 vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0);
48 vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr);
49
50 vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr,
51 vmcs_paddr);
52
53 return vm;
54}
55
56void prepare_for_vmx_operation(void)
57{
58 uint64_t feature_control;
59 uint64_t required;
60 unsigned long cr0;
61 unsigned long cr4;
62
63 /*
64 * Ensure bits in CR0 and CR4 are valid in VMX operation:
65 * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
66 * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
67 */
68 __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
69 cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
70 cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
71 __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
72
73 __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
74 cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
75 cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
76 /* Enable VMX operation */
77 cr4 |= X86_CR4_VMXE;
78 __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
79
80 /*
81 * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
82 * Bit 0: Lock bit. If clear, VMXON causes a #GP.
83 * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
84 * outside of SMX causes a #GP.
85 */
86 required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
87 required |= FEATURE_CONTROL_LOCKED;
88 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
89 if ((feature_control & required) != required)
90 wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required);
91}
92
93/*
94 * Initialize the control fields to the most basic settings possible.
95 */
96static inline void init_vmcs_control_fields(void)
97{
98 vmwrite(VIRTUAL_PROCESSOR_ID, 0);
99 vmwrite(POSTED_INTR_NV, 0);
100
101 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS));
102 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS));
103 vmwrite(EXCEPTION_BITMAP, 0);
104 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
105 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
106 vmwrite(CR3_TARGET_COUNT, 0);
107 vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
108 VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */
109 vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
110 vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
111 vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
112 VM_ENTRY_IA32E_MODE); /* 64-bit guest */
113 vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
114 vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
115 vmwrite(TPR_THRESHOLD, 0);
116 vmwrite(SECONDARY_VM_EXEC_CONTROL, 0);
117
118 vmwrite(CR0_GUEST_HOST_MASK, 0);
119 vmwrite(CR4_GUEST_HOST_MASK, 0);
120 vmwrite(CR0_READ_SHADOW, get_cr0());
121 vmwrite(CR4_READ_SHADOW, get_cr4());
122}
123
124/*
125 * Initialize the host state fields based on the current host state, with
126 * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
127 * or vmresume.
128 */
129static inline void init_vmcs_host_state(void)
130{
131 uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
132
133 vmwrite(HOST_ES_SELECTOR, get_es());
134 vmwrite(HOST_CS_SELECTOR, get_cs());
135 vmwrite(HOST_SS_SELECTOR, get_ss());
136 vmwrite(HOST_DS_SELECTOR, get_ds());
137 vmwrite(HOST_FS_SELECTOR, get_fs());
138 vmwrite(HOST_GS_SELECTOR, get_gs());
139 vmwrite(HOST_TR_SELECTOR, get_tr());
140
141 if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
142 vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
143 if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
144 vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
145 if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
146 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
147 rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
148
149 vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
150
151 vmwrite(HOST_CR0, get_cr0());
152 vmwrite(HOST_CR3, get_cr3());
153 vmwrite(HOST_CR4, get_cr4());
154 vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
155 vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
156 vmwrite(HOST_TR_BASE,
157 get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr())));
158 vmwrite(HOST_GDTR_BASE, get_gdt_base());
159 vmwrite(HOST_IDTR_BASE, get_idt_base());
160 vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
161 vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
162}
163
164/*
165 * Initialize the guest state fields essentially as a clone of
166 * the host state fields. Some host state fields have fixed
167 * values, and we set the corresponding guest state fields accordingly.
168 */
169static inline void init_vmcs_guest_state(void *rip, void *rsp)
170{
171 vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
172 vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
173 vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
174 vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
175 vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
176 vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
177 vmwrite(GUEST_LDTR_SELECTOR, 0);
178 vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
179 vmwrite(GUEST_INTR_STATUS, 0);
180 vmwrite(GUEST_PML_INDEX, 0);
181
182 vmwrite(VMCS_LINK_POINTER, -1ll);
183 vmwrite(GUEST_IA32_DEBUGCTL, 0);
184 vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
185 vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
186 vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
187 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
188
189 vmwrite(GUEST_ES_LIMIT, -1);
190 vmwrite(GUEST_CS_LIMIT, -1);
191 vmwrite(GUEST_SS_LIMIT, -1);
192 vmwrite(GUEST_DS_LIMIT, -1);
193 vmwrite(GUEST_FS_LIMIT, -1);
194 vmwrite(GUEST_GS_LIMIT, -1);
195 vmwrite(GUEST_LDTR_LIMIT, -1);
196 vmwrite(GUEST_TR_LIMIT, 0x67);
197 vmwrite(GUEST_GDTR_LIMIT, 0xffff);
198 vmwrite(GUEST_IDTR_LIMIT, 0xffff);
199 vmwrite(GUEST_ES_AR_BYTES,
200 vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
201 vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
202 vmwrite(GUEST_SS_AR_BYTES, 0xc093);
203 vmwrite(GUEST_DS_AR_BYTES,
204 vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
205 vmwrite(GUEST_FS_AR_BYTES,
206 vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
207 vmwrite(GUEST_GS_AR_BYTES,
208 vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
209 vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
210 vmwrite(GUEST_TR_AR_BYTES, 0x8b);
211 vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
212 vmwrite(GUEST_ACTIVITY_STATE, 0);
213 vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
214 vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
215
216 vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
217 vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
218 vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
219 vmwrite(GUEST_ES_BASE, 0);
220 vmwrite(GUEST_CS_BASE, 0);
221 vmwrite(GUEST_SS_BASE, 0);
222 vmwrite(GUEST_DS_BASE, 0);
223 vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
224 vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
225 vmwrite(GUEST_LDTR_BASE, 0);
226 vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
227 vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
228 vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
229 vmwrite(GUEST_DR7, 0x400);
230 vmwrite(GUEST_RSP, (uint64_t)rsp);
231 vmwrite(GUEST_RIP, (uint64_t)rip);
232 vmwrite(GUEST_RFLAGS, 2);
233 vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
234 vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
235 vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
236}
237
238void prepare_vmcs(void *guest_rip, void *guest_rsp)
239{
240 init_vmcs_control_fields();
241 init_vmcs_host_state();
242 init_vmcs_guest_state(guest_rip, guest_rsp);
243}
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
new file mode 100644
index 000000000000..8f7f62093add
--- /dev/null
+++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
@@ -0,0 +1,231 @@
1/*
2 * gtests/tests/vmx_tsc_adjust_test.c
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 *
8 *
9 * IA32_TSC_ADJUST test
10 *
11 * According to the SDM, "if an execution of WRMSR to the
12 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
13 * the logical processor also adds (or subtracts) value X from the
14 * IA32_TSC_ADJUST MSR.
15 *
16 * Note that when L1 doesn't intercept writes to IA32_TSC, a
17 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
18 * value.
19 *
20 * This test verifies that this unusual case is handled correctly.
21 */
22
23#include "test_util.h"
24#include "kvm_util.h"
25#include "x86.h"
26#include "vmx.h"
27
28#include <string.h>
29#include <sys/ioctl.h>
30
31#ifndef MSR_IA32_TSC_ADJUST
32#define MSR_IA32_TSC_ADJUST 0x3b
33#endif
34
35#define PAGE_SIZE 4096
36#define VCPU_ID 5
37
38#define TSC_ADJUST_VALUE (1ll << 32)
39#define TSC_OFFSET_VALUE -(1ll << 48)
40
41enum {
42 PORT_ABORT = 0x1000,
43 PORT_REPORT,
44 PORT_DONE,
45};
46
47struct vmx_page {
48 vm_vaddr_t virt;
49 vm_paddr_t phys;
50};
51
52enum {
53 VMXON_PAGE = 0,
54 VMCS_PAGE,
55 MSR_BITMAP_PAGE,
56
57 NUM_VMX_PAGES,
58};
59
60struct kvm_single_msr {
61 struct kvm_msrs header;
62 struct kvm_msr_entry entry;
63} __attribute__((packed));
64
65/* The virtual machine object. */
66static struct kvm_vm *vm;
67
68/* Array of vmx_page descriptors that is shared with the guest. */
69struct vmx_page *vmx_pages;
70
71#define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
72static void do_exit_to_l0(uint16_t port, unsigned long arg)
73{
74 __asm__ __volatile__("in %[port], %%al"
75 :
76 : [port]"d"(port), "D"(arg)
77 : "rax");
78}
79
80
81#define GUEST_ASSERT(_condition) do { \
82 if (!(_condition)) \
83 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
84} while (0)
85
86static void check_ia32_tsc_adjust(int64_t max)
87{
88 int64_t adjust;
89
90 adjust = rdmsr(MSR_IA32_TSC_ADJUST);
91 exit_to_l0(PORT_REPORT, adjust);
92 GUEST_ASSERT(adjust <= max);
93}
94
95static void l2_guest_code(void)
96{
97 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
98
99 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
100 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
101
102 /* Exit to L1 */
103 __asm__ __volatile__("vmcall");
104}
105
106static void l1_guest_code(struct vmx_page *vmx_pages)
107{
108#define L2_GUEST_STACK_SIZE 64
109 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
110 uint32_t control;
111 uintptr_t save_cr3;
112
113 GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
114 wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
115 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
116
117 prepare_for_vmx_operation();
118
119 /* Enter VMX root operation. */
120 *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
121 GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
122
123 /* Load a VMCS. */
124 *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
125 GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
126 GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
127
128 /* Prepare the VMCS for L2 execution. */
129 prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
130 control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
131 control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
132 vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
133 vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
134 vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
135
136 /* Jump into L2. First, test failure to load guest CR3. */
137 save_cr3 = vmreadz(GUEST_CR3);
138 vmwrite(GUEST_CR3, -1ull);
139 GUEST_ASSERT(!vmlaunch());
140 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
141 (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
142 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
143 vmwrite(GUEST_CR3, save_cr3);
144
145 GUEST_ASSERT(!vmlaunch());
146 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
147
148 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
149
150 exit_to_l0(PORT_DONE, 0);
151}
152
153static void allocate_vmx_page(struct vmx_page *page)
154{
155 vm_vaddr_t virt;
156
157 virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
158 memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
159
160 page->virt = virt;
161 page->phys = addr_gva2gpa(vm, virt);
162}
163
164static vm_vaddr_t allocate_vmx_pages(void)
165{
166 vm_vaddr_t vmx_pages_vaddr;
167 int i;
168
169 vmx_pages_vaddr = vm_vaddr_alloc(
170 vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
171
172 vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
173
174 for (i = 0; i < NUM_VMX_PAGES; i++)
175 allocate_vmx_page(&vmx_pages[i]);
176
177 return vmx_pages_vaddr;
178}
179
180void report(int64_t val)
181{
182 printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
183 val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
184}
185
186int main(int argc, char *argv[])
187{
188 vm_vaddr_t vmx_pages_vaddr;
189 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
190
191 if (!(entry->ecx & CPUID_VMX)) {
192 printf("nested VMX not enabled, skipping test");
193 return 0;
194 }
195
196 vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
197
198 /* Allocate VMX pages and shared descriptors (vmx_pages). */
199 vmx_pages_vaddr = allocate_vmx_pages();
200 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
201
202 for (;;) {
203 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
204 struct kvm_regs regs;
205
206 vcpu_run(vm, VCPU_ID);
207 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
208 "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
209 run->exit_reason,
210 exit_reason_str(run->exit_reason));
211
212 vcpu_regs_get(vm, VCPU_ID, &regs);
213
214 switch (run->io.port) {
215 case PORT_ABORT:
216 TEST_ASSERT(false, "%s", (const char *) regs.rdi);
217 /* NOT REACHED */
218 case PORT_REPORT:
219 report(regs.rdi);
220 break;
221 case PORT_DONE:
222 goto done;
223 default:
224 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
225 }
226 }
227
228 kvm_vm_free(vm);
229done:
230 return 0;
231}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 785fc18a16b4..8f1e13d2e547 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,7 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
5CFLAGS += -I../../../../usr/include/ 5CFLAGS += -I../../../../usr/include/
6 6
7TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh 7TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
8TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh 8TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh
9TEST_GEN_FILES = socket 9TEST_GEN_FILES = socket
10TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy 10TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
11TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa 11TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c
index 40370354d4c1..c9c3281077bc 100644
--- a/tools/testing/selftests/x86/test_syscall_vdso.c
+++ b/tools/testing/selftests/x86/test_syscall_vdso.c
@@ -100,12 +100,19 @@ asm (
100 " shl $32, %r8\n" 100 " shl $32, %r8\n"
101 " orq $0x7f7f7f7f, %r8\n" 101 " orq $0x7f7f7f7f, %r8\n"
102 " movq %r8, %r9\n" 102 " movq %r8, %r9\n"
103 " movq %r8, %r10\n" 103 " incq %r9\n"
104 " movq %r8, %r11\n" 104 " movq %r9, %r10\n"
105 " movq %r8, %r12\n" 105 " incq %r10\n"
106 " movq %r8, %r13\n" 106 " movq %r10, %r11\n"
107 " movq %r8, %r14\n" 107 " incq %r11\n"
108 " movq %r8, %r15\n" 108 " movq %r11, %r12\n"
109 " incq %r12\n"
110 " movq %r12, %r13\n"
111 " incq %r13\n"
112 " movq %r13, %r14\n"
113 " incq %r14\n"
114 " movq %r14, %r15\n"
115 " incq %r15\n"
109 " ret\n" 116 " ret\n"
110 " .code32\n" 117 " .code32\n"
111 " .popsection\n" 118 " .popsection\n"
@@ -128,12 +135,13 @@ int check_regs64(void)
128 int err = 0; 135 int err = 0;
129 int num = 8; 136 int num = 8;
130 uint64_t *r64 = &regs64.r8; 137 uint64_t *r64 = &regs64.r8;
138 uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
131 139
132 if (!kernel_is_64bit) 140 if (!kernel_is_64bit)
133 return 0; 141 return 0;
134 142
135 do { 143 do {
136 if (*r64 == 0x7f7f7f7f7f7f7f7fULL) 144 if (*r64 == expected++)
137 continue; /* register did not change */ 145 continue; /* register did not change */
138 if (syscall_addr != (long)&int80) { 146 if (syscall_addr != (long)&int80) {
139 /* 147 /*
@@ -147,18 +155,17 @@ int check_regs64(void)
147 continue; 155 continue;
148 } 156 }
149 } else { 157 } else {
150 /* INT80 syscall entrypoint can be used by 158 /*
159 * INT80 syscall entrypoint can be used by
151 * 64-bit programs too, unlike SYSCALL/SYSENTER. 160 * 64-bit programs too, unlike SYSCALL/SYSENTER.
152 * Therefore it must preserve R12+ 161 * Therefore it must preserve R12+
153 * (they are callee-saved registers in 64-bit C ABI). 162 * (they are callee-saved registers in 64-bit C ABI).
154 * 163 *
155 * This was probably historically not intended, 164 * Starting in Linux 4.17 (and any kernel that
156 * but R8..11 are clobbered (cleared to 0). 165 * backports the change), R8..11 are preserved.
157 * IOW: they are the only registers which aren't 166 * Historically (and probably unintentionally), they
158 * preserved across INT80 syscall. 167 * were clobbered or zeroed.
159 */ 168 */
160 if (*r64 == 0 && num <= 11)
161 continue;
162 } 169 }
163 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64); 170 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
164 err++; 171 err++;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
64static u32 kvm_next_vmid; 64static u32 kvm_next_vmid;
65static unsigned int kvm_vmid_bits __read_mostly; 65static unsigned int kvm_vmid_bits __read_mostly;
66static DEFINE_SPINLOCK(kvm_vmid_lock); 66static DEFINE_RWLOCK(kvm_vmid_lock);
67 67
68static bool vgic_present; 68static bool vgic_present;
69 69
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
473{ 473{
474 phys_addr_t pgd_phys; 474 phys_addr_t pgd_phys;
475 u64 vmid; 475 u64 vmid;
476 bool new_gen;
476 477
477 if (!need_new_vmid_gen(kvm)) 478 read_lock(&kvm_vmid_lock);
479 new_gen = need_new_vmid_gen(kvm);
480 read_unlock(&kvm_vmid_lock);
481
482 if (!new_gen)
478 return; 483 return;
479 484
480 spin_lock(&kvm_vmid_lock); 485 write_lock(&kvm_vmid_lock);
481 486
482 /* 487 /*
483 * We need to re-check the vmid_gen here to ensure that if another vcpu 488 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
485 * use the same vmid. 490 * use the same vmid.
486 */ 491 */
487 if (!need_new_vmid_gen(kvm)) { 492 if (!need_new_vmid_gen(kvm)) {
488 spin_unlock(&kvm_vmid_lock); 493 write_unlock(&kvm_vmid_lock);
489 return; 494 return;
490 } 495 }
491 496
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 524 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 525 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
521 526
522 spin_unlock(&kvm_vmid_lock); 527 write_unlock(&kvm_vmid_lock);
523} 528}
524 529
525static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 530static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 6919352cbf15..c4762bef13c6 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -18,6 +18,7 @@
18#include <linux/arm-smccc.h> 18#include <linux/arm-smccc.h>
19#include <linux/preempt.h> 19#include <linux/preempt.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
22 23
23#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
427 smccc_set_retval(vcpu, val, 0, 0, 0); 428 smccc_set_retval(vcpu, val, 0, 0, 0);
428 return 1; 429 return 1;
429} 430}
431
432int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
433{
434 return 1; /* PSCI version */
435}
436
437int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
438{
439 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
440 return -EFAULT;
441
442 return 0;
443}
444
445int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
446{
447 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
448 void __user *uaddr = (void __user *)(long)reg->addr;
449 u64 val;
450
451 val = kvm_psci_version(vcpu, vcpu->kvm);
452 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
453 return -EFAULT;
454
455 return 0;
456 }
457
458 return -EINVAL;
459}
460
461int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462{
463 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
464 void __user *uaddr = (void __user *)(long)reg->addr;
465 bool wants_02;
466 u64 val;
467
468 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
469 return -EFAULT;
470
471 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
472
473 switch (val) {
474 case KVM_ARM_PSCI_0_1:
475 if (wants_02)
476 return -EINVAL;
477 vcpu->kvm->arch.psci_version = val;
478 return 0;
479 case KVM_ARM_PSCI_0_2:
480 case KVM_ARM_PSCI_1_0:
481 if (!wants_02)
482 return -EINVAL;
483 vcpu->kvm->arch.psci_version = val;
484 return 0;
485 }
486 }
487
488 return -EINVAL;
489}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index e21e2f49b005..ffc587bf4742 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -14,6 +14,8 @@
14#include <linux/irqchip/arm-gic.h> 14#include <linux/irqchip/arm-gic.h>
15#include <linux/kvm.h> 15#include <linux/kvm.h>
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17#include <linux/nospec.h>
18
17#include <kvm/iodev.h> 19#include <kvm/iodev.h>
18#include <kvm/arm_vgic.h> 20#include <kvm/arm_vgic.h>
19 21
@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
324 326
325 if (n > vgic_v3_max_apr_idx(vcpu)) 327 if (n > vgic_v3_max_apr_idx(vcpu))
326 return 0; 328 return 0;
329
330 n = array_index_nospec(n, 4);
331
327 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ 332 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
328 return vgicv3->vgic_ap1r[n]; 333 return vgicv3->vgic_ap1r[n];
329 } 334 }
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e74baec76361..702936cbe173 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -14,11 +14,13 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
17#include <linux/kvm.h> 19#include <linux/kvm.h>
18#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
19#include <linux/list_sort.h> 21#include <linux/list_sort.h>
20#include <linux/interrupt.h> 22#include <linux/nospec.h>
21#include <linux/irq.h> 23
22#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
23 25
24#include "vgic.h" 26#include "vgic.h"
@@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
101 u32 intid) 103 u32 intid)
102{ 104{
103 /* SGIs and PPIs */ 105 /* SGIs and PPIs */
104 if (intid <= VGIC_MAX_PRIVATE) 106 if (intid <= VGIC_MAX_PRIVATE) {
107 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
105 return &vcpu->arch.vgic_cpu.private_irqs[intid]; 108 return &vcpu->arch.vgic_cpu.private_irqs[intid];
109 }
106 110
107 /* SPIs */ 111 /* SPIs */
108 if (intid <= VGIC_MAX_SPI) 112 if (intid <= VGIC_MAX_SPI) {
113 intid = array_index_nospec(intid, VGIC_MAX_SPI);
109 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; 114 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
115 }
110 116
111 /* LPIs */ 117 /* LPIs */
112 if (intid >= VGIC_MIN_LPI) 118 if (intid >= VGIC_MIN_LPI)
@@ -594,6 +600,7 @@ retry:
594 600
595 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
596 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
603 bool target_vcpu_needs_kick = false;
597 604
598 spin_lock(&irq->irq_lock); 605 spin_lock(&irq->irq_lock);
599 606
@@ -664,11 +671,18 @@ retry:
664 list_del(&irq->ap_list); 671 list_del(&irq->ap_list);
665 irq->vcpu = target_vcpu; 672 irq->vcpu = target_vcpu;
666 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 673 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
674 target_vcpu_needs_kick = true;
667 } 675 }
668 676
669 spin_unlock(&irq->irq_lock); 677 spin_unlock(&irq->irq_lock);
670 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 678 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
671 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); 679 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
680
681 if (target_vcpu_needs_kick) {
682 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
683 kvm_vcpu_kick(target_vcpu);
684 }
685
672 goto retry; 686 goto retry;
673 } 687 }
674 688