summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-02-26 09:32:00 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-02-26 09:32:00 -0500
commit36e9f7203e05090031a5356be043a9be342e383c (patch)
tree27877737c3bb3c00ee9caeece7d671f65cf91252
parent721dfe4133a9a41e4b4a74e5b41089b7dac8f539 (diff)
parent4a3928c6f8a53fa1aed28ccba227742486e8ddcb (diff)
Merge 4.16-rc3 into staging-next
We want the IIO/Staging fixes in here, and to resolve a merge problem with the move of the fsl-mc code. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--.gitignore4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-dock39
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu77
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dptf40
-rw-r--r--Documentation/atomic_bitops.txt7
-rw-r--r--Documentation/devicetree/bindings/power/mti,mips-cpc.txt8
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt62
-rw-r--r--Documentation/gpu/tve200.rst2
-rw-r--r--Documentation/i2c/busses/i2c-i8012
-rw-r--r--Documentation/locking/mutex-design.txt49
-rw-r--r--Documentation/networking/segmentation-offloads.txt38
-rw-r--r--Documentation/x86/topology.txt2
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/cmpxchg.h6
-rw-r--r--arch/alpha/include/asm/xchg.h38
-rw-r--r--arch/arc/include/asm/bug.h3
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/time.c2
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c35
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h26
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/pgalloc.h44
-rw-r--r--arch/arm64/include/asm/pgtable.h23
-rw-r--r--arch/arm64/include/asm/stacktrace.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h12
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c4
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpufeature.c6
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/hibernate.c148
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--arch/arm64/kernel/process.c11
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kernel/traps.c10
-rw-r--r--arch/arm64/kvm/hyp/switch.c4
-rw-r--r--arch/arm64/mm/dump.c54
-rw-r--r--arch/arm64/mm/fault.c44
-rw-r--r--arch/arm64/mm/hugetlbpage.c94
-rw-r--r--arch/arm64/mm/kasan_init.c70
-rw-r--r--arch/arm64/mm/mmu.c292
-rw-r--r--arch/arm64/mm/pageattr.c32
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--arch/arm64/net/bpf_jit_comp.c5
-rw-r--r--arch/cris/include/arch-v10/arch/bug.h11
-rw-r--r--arch/ia64/include/asm/bug.h6
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/m68k/include/asm/bug.h3
-rw-r--r--arch/mips/boot/Makefile1
-rw-r--r--arch/mips/include/asm/compat.h1
-rw-r--r--arch/mips/kernel/mips-cpc.c13
-rw-r--r--arch/mips/kernel/setup.c16
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h13
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/firmware.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h12
-rw-r--r--arch/powerpc/include/asm/kexec.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/topology.h13
-rw-r--r--arch/powerpc/kernel/eeh_driver.c3
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c6
-rw-r--r--arch/powerpc/kvm/book3s_xive.c2
-rw-r--r--arch/powerpc/mm/drmem.c14
-rw-r--r--arch/powerpc/mm/hash64_4k.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c10
-rw-r--r--arch/powerpc/mm/init-common.c4
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c117
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c9
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c16
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c4
-rw-r--r--arch/powerpc/platforms/pseries/ras.c31
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c16
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/kernel/entry.S5
-rw-r--r--arch/riscv/kernel/head.S2
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/bug.h6
-rw-r--r--arch/x86/.gitignore1
-rw-r--r--arch/x86/Kconfig77
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c10
-rw-r--r--arch/x86/entry/calling.h107
-rw-r--r--arch/x86/entry/entry_64.S92
-rw-r--r--arch/x86/entry/entry_64_compat.S30
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/p6.c2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/barrier.h2
-rw-r--r--arch/x86/include/asm/bug.h19
-rw-r--r--arch/x86/include/asm/cpufeature.h79
-rw-r--r--arch/x86/include/asm/nospec-branch.h51
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h27
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h18
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/vector.c25
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c15
-rw-r--r--arch/x86/kernel/asm-offsets_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c10
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c31
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c19
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c8
-rw-r--r--arch/x86/kernel/head_32.S4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/kernel/smpboot.c12
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/lib/cpu.c2
-rw-r--r--arch/x86/lib/error-inject.c1
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/pgtable_32.c2
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/xtensa/kernel/pci-dma.c40
-rw-r--r--arch/xtensa/mm/init.c70
-rw-r--r--block/blk-mq.c1
-rw-r--r--block/sed-opal.c2
-rw-r--r--certs/blacklist_nohashes.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c1
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c12
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/restrict.c21
-rw-r--r--crypto/sha3_generic.c218
-rw-r--r--drivers/acpi/bus.c75
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/property.c4
-rw-r--r--drivers/acpi/spcr.c1
-rw-r--r--drivers/android/binder.c29
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/base/property.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c6
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c6
-rw-r--r--drivers/crypto/caam/ctrl.c8
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/extcon/extcon-axp288.c36
-rw-r--r--drivers/extcon/extcon-intel-int3496.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c15
-rw-r--r--drivers/gpu/drm/drm_edid.c21
-rw-r--r--drivers/gpu/drm/drm_mm.c21
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h2
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c231
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c105
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c29
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c24
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h14
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h3
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c74
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c21
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/iio/adc/aspeed_adc.c7
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/imu/adis_trigger.c7
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/proximity/Kconfig2
-rw-r--r--drivers/infiniband/core/core_priv.h7
-rw-r--r--drivers/infiniband/core/rdma_core.c38
-rw-r--r--drivers/infiniband/core/restrack.c23
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c50
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c18
-rw-r--r--drivers/infiniband/core/uverbs_main.c29
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c12
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c54
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c46
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/misc/mei/bus.c6
-rw-r--r--drivers/misc/mei/client.c6
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/ocxl/file.c8
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c19
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c35
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h5
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c110
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c35
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c68
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/thunderbolt.c19
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/virtio_net.c58
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/nvme/host/core.c45
-rw-r--r--drivers/nvme/host/fabrics.h9
-rw-r--r--drivers/nvme/host/fc.c157
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c39
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/io-cmd.c7
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/opp/cpu.c2
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--drivers/perf/arm_pmu.c138
-rw-r--r--drivers/perf/arm_pmu_acpi.c61
-rw-r--r--drivers/perf/arm_pmu_platform.c37
-rw-r--r--drivers/platform/x86/dell-laptop.c20
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c34
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/qedi/qedi_main.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c46
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/imx/gpc.c2
-rw-r--r--drivers/staging/android/ashmem.c19
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c27
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c4
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/class/cdc-acm.c9
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c26
-rw-r--r--drivers/usb/dwc3/core.c86
-rw-r--r--drivers/usb/dwc3/core.h21
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c16
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c44
-rw-r--r--drivers/usb/gadget/function/f_uac2.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c1
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-q.c12
-rw-r--r--drivers/usb/host/ohci-hcd.c10
-rw-r--r--drivers/usb/host/ohci-hub.c4
-rw-r--r--drivers/usb/host/ohci-q.c17
-rw-r--r--drivers/usb/host/pci-quirks.c109
-rw-r--r--drivers/usb/host/pci-quirks.h5
-rw-r--r--drivers/usb/host/xhci-debugfs.c4
-rw-r--r--drivers/usb/host/xhci-hub.c25
-rw-r--r--drivers/usb/host/xhci-pci.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/video/fbdev/geode/video_gx.c2
-rw-r--r--drivers/xen/pvcalls-front.c197
-rw-r--r--drivers/xen/tmem.c4
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--fs/btrfs/backref.c11
-rw-r--r--fs/btrfs/delayed-ref.c3
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/inode.c41
-rw-r--r--fs/btrfs/qgroup.c9
-rw-r--r--fs/btrfs/tree-log.c32
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/efivarfs/file.c6
-rw-r--r--fs/gfs2/bmap.c43
-rw-r--r--fs/nfs/callback_proc.c14
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4client.c6
-rw-r--r--fs/proc/kcore.c4
-rw-r--r--fs/signalfd.c15
-rw-r--r--include/asm-generic/bitops/lock.h3
-rw-r--r--include/asm-generic/bug.h1
-rw-r--r--include/drm/drm_atomic.h9
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/compiler-gcc.h22
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/kconfig.h9
-rw-r--r--include/linux/kcore.h1
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nospec.h36
-rw-r--r--include/linux/perf/arm_pmu.h26
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/sched/mm.h13
-rw-r--r--include/linux/sched/user.h4
-rw-r--r--include/linux/semaphore.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/rdma/uverbs_ioctl.h43
-rw-r--r--include/sound/ac97/regs.h2
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/uapi/linux/if_ether.h6
-rw-r--r--include/uapi/linux/libc-compat.h6
-rw-r--r--include/uapi/linux/ptrace.h4
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h4
-rw-r--r--kernel/bpf/arraymap.c33
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/lpm_trie.c14
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/irq/irqdomain.c18
-rw-r--r--kernel/irq/matrix.c23
-rw-r--r--kernel/kprobes.c178
-rw-r--r--kernel/locking/qspinlock.c21
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/rt.c3
-rw-r--r--kernel/seccomp.c6
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/workqueue.c16
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/dma-direct.c5
-rw-r--r--lib/idr.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/swap.c84
-rw-r--r--mm/vmalloc.c10
-rw-r--r--mm/vmscan.c59
-rw-r--r--mm/zpool.c2
-rw-r--r--mm/zswap.c6
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/bridge/netfilter/ebt_among.c10
-rw-r--r--net/bridge/netfilter/ebt_limit.c4
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/decnet/af_decnet.c62
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c20
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c4
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_output.c34
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_checksum.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c4
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c6
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c6
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mesh.c17
-rw-r--r--net/mac80211/spectmgmt.c7
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/netfilter/nf_nat_proto_common.c7
-rw-r--r--net/netfilter/x_tables.c74
-rw-r--r--net/netfilter/xt_AUDIT.c4
-rw-r--r--net/netfilter/xt_CHECKSUM.c8
-rw-r--r--net/netfilter/xt_CONNSECMARK.c10
-rw-r--r--net/netfilter/xt_CT.c25
-rw-r--r--net/netfilter/xt_DSCP.c4
-rw-r--r--net/netfilter/xt_HL.c13
-rw-r--r--net/netfilter/xt_HMARK.c27
-rw-r--r--net/netfilter/xt_IDLETIMER.c9
-rw-r--r--net/netfilter/xt_LED.c16
-rw-r--r--net/netfilter/xt_NFQUEUE.c8
-rw-r--r--net/netfilter/xt_SECMARK.c18
-rw-r--r--net/netfilter/xt_TCPMSS.c10
-rw-r--r--net/netfilter/xt_TPROXY.c6
-rw-r--r--net/netfilter/xt_addrtype.c33
-rw-r--r--net/netfilter/xt_bpf.c4
-rw-r--r--net/netfilter/xt_cgroup.c8
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/netfilter/xt_connbytes.c4
-rw-r--r--net/netfilter/xt_connlabel.c7
-rw-r--r--net/netfilter/xt_connmark.c8
-rw-r--r--net/netfilter/xt_conntrack.c4
-rw-r--r--net/netfilter/xt_dscp.c4
-rw-r--r--net/netfilter/xt_ecn.c4
-rw-r--r--net/netfilter/xt_hashlimit.c26
-rw-r--r--net/netfilter/xt_helper.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/netfilter/xt_ipvs.c3
-rw-r--r--net/netfilter/xt_l2tp.c22
-rw-r--r--net/netfilter/xt_limit.c4
-rw-r--r--net/netfilter/xt_nat.c5
-rw-r--r--net/netfilter/xt_nfacct.c6
-rw-r--r--net/netfilter/xt_physdev.c4
-rw-r--r--net/netfilter/xt_policy.c23
-rw-r--r--net/netfilter/xt_recent.c14
-rw-r--r--net/netfilter/xt_set.c50
-rw-r--r--net/netfilter/xt_socket.c10
-rw-r--r--net/netfilter/xt_state.c4
-rw-r--r--net/netfilter/xt_time.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/nfc/llcp_commands.c4
-rw-r--r--net/nfc/netlink.c3
-rw-r--r--net/rds/connection.c2
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/rxrpc/recvmsg.c5
-rw-r--r--net/sched/cls_api.c33
-rw-r--r--net/sched/cls_u32.c24
-rw-r--r--net/sctp/debug.c6
-rw-r--r--net/sctp/input.c5
-rw-r--r--net/sctp/stream.c2
-rw-r--r--net/sctp/stream_interleave.c16
-rw-r--r--net/tipc/bearer.c82
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/net.c15
-rw-r--r--net/tipc/net.h1
-rw-r--r--net/tipc/netlink_compat.c43
-rw-r--r--net/tls/tls_main.c7
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/mesh.c25
-rw-r--r--net/wireless/sme.c2
-rw-r--r--security/integrity/digsig.c1
-rw-r--r--security/keys/big_key.c110
-rw-r--r--sound/ac97/Kconfig1
-rw-r--r--sound/core/seq/seq_clientmgr.c8
-rw-r--r--sound/pci/hda/patch_realtek.c72
-rw-r--r--sound/usb/mixer.c18
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--sound/usb/quirks.c7
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/s390/include/uapi/asm/unistd.h412
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/bpf/bpftool/main.c2
-rw-r--r--tools/bpf/bpftool/prog.c3
-rw-r--r--tools/cgroup/Makefile1
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/hv/Makefile1
-rw-r--r--tools/iio/Makefile2
-rw-r--r--tools/include/uapi/drm/i915_drm.h77
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/include/uapi/linux/kvm.h90
-rw-r--r--tools/laptop/freefall/Makefile1
-rw-r--r--tools/leds/Makefile1
-rw-r--r--tools/lib/bpf/libbpf.c5
-rw-r--r--tools/objtool/check.c53
-rw-r--r--tools/objtool/check.h1
-rw-r--r--tools/perf/Documentation/perf-data.txt4
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/s390/Makefile10
-rwxr-xr-xtools/perf/arch/s390/entry/syscalls/mksyscalltbl18
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl390
-rw-r--r--tools/perf/builtin-c2c.c4
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-top.c150
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json27
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json27
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/other.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/tests/backward-ring-buffer.c7
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh23
-rw-r--r--tools/perf/ui/browsers/hists.c38
-rw-r--r--tools/perf/ui/browsers/hists.h3
-rw-r--r--tools/perf/util/evlist.c17
-rw-r--r--tools/perf/util/evlist.h4
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/evsel.h14
-rw-r--r--tools/perf/util/hist.h6
-rw-r--r--tools/perf/util/mmap.c141
-rw-r--r--tools/perf/util/mmap.h10
-rw-r--r--tools/perf/util/util.c24
-rw-r--r--tools/power/acpi/Makefile.config3
-rw-r--r--tools/scripts/Makefile.include18
-rw-r--r--tools/spi/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c26
-rw-r--r--tools/testing/selftests/memfd/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c61
-rw-r--r--tools/testing/selftests/x86/Makefile24
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c32
-rw-r--r--tools/testing/selftests/x86/protection_keys.c28
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c5
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c4
-rw-r--r--tools/testing/selftests/x86/test_vdso.c55
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c11
-rw-r--r--tools/usb/Makefile1
-rw-r--r--tools/vm/Makefile1
-rw-r--r--tools/wmi/Makefile1
680 files changed, 6929 insertions, 4271 deletions
diff --git a/.gitignore b/.gitignore
index 705e09913dc2..1be78fd8163b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -127,3 +127,7 @@ all.config
127 127
128# Kdevelop4 128# Kdevelop4
129*.kdev4 129*.kdev4
130
131#Automatically generated by ASN.1 compiler
132net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
133net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-dock b/Documentation/ABI/testing/sysfs-devices-platform-dock
new file mode 100644
index 000000000000..1d8c18f905c7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-dock
@@ -0,0 +1,39 @@
1What: /sys/devices/platform/dock.N/docked
2Date: Dec, 2006
3KernelVersion: 2.6.19
4Contact: linux-acpi@vger.kernel.org
5Description:
6 (RO) Value 1 or 0 indicates whether the software believes the
7 laptop is docked in a docking station.
8
9What: /sys/devices/platform/dock.N/undock
10Date: Dec, 2006
11KernelVersion: 2.6.19
12Contact: linux-acpi@vger.kernel.org
13Description:
14 (WO) Writing to this file causes the software to initiate an
15 undock request to the firmware.
16
17What: /sys/devices/platform/dock.N/uid
18Date: Feb, 2007
19KernelVersion: v2.6.21
20Contact: linux-acpi@vger.kernel.org
21Description:
22 (RO) Displays the docking station the laptop is docked to.
23
24What: /sys/devices/platform/dock.N/flags
25Date: May, 2007
26KernelVersion: v2.6.21
27Contact: linux-acpi@vger.kernel.org
28Description:
29 (RO) Show dock station flags, useful for checking if undock
30 request has been made by the user (from the immediate_undock
31 option).
32
33What: /sys/devices/platform/dock.N/type
34Date: Aug, 2008
35KernelVersion: v2.6.27
36Contact: linux-acpi@vger.kernel.org
37Description:
38 (RO) Display the dock station type- dock_station, ata_bay or
39 battery_bay.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index bfd29bc8d37a..4ed63b6cfb15 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -108,6 +108,8 @@ Description: CPU topology files that describe a logical CPU's relationship
108 108
109What: /sys/devices/system/cpu/cpuidle/current_driver 109What: /sys/devices/system/cpu/cpuidle/current_driver
110 /sys/devices/system/cpu/cpuidle/current_governer_ro 110 /sys/devices/system/cpu/cpuidle/current_governer_ro
111 /sys/devices/system/cpu/cpuidle/available_governors
112 /sys/devices/system/cpu/cpuidle/current_governor
111Date: September 2007 113Date: September 2007
112Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 114Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
113Description: Discover cpuidle policy and mechanism 115Description: Discover cpuidle policy and mechanism
@@ -119,13 +121,84 @@ Description: Discover cpuidle policy and mechanism
119 Idle policy (governor) is differentiated from idle mechanism 121 Idle policy (governor) is differentiated from idle mechanism
120 (driver) 122 (driver)
121 123
122 current_driver: displays current idle mechanism 124 current_driver: (RO) displays current idle mechanism
123 125
124 current_governor_ro: displays current idle policy 126 current_governor_ro: (RO) displays current idle policy
127
128 With the cpuidle_sysfs_switch boot option enabled (meant for
129 developer testing), the following three attributes are visible
130 instead:
131
132 current_driver: same as described above
133
134 available_governors: (RO) displays a space separated list of
135 available governors
136
137 current_governor: (RW) displays current idle policy. Users can
138 switch the governor at runtime by writing to this file.
125 139
126 See files in Documentation/cpuidle/ for more information. 140 See files in Documentation/cpuidle/ for more information.
127 141
128 142
143What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
144 /sys/devices/system/cpu/cpuX/cpuidle/stateN/latency
145 /sys/devices/system/cpu/cpuX/cpuidle/stateN/power
146 /sys/devices/system/cpu/cpuX/cpuidle/stateN/time
147 /sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
148Date: September 2007
149KernelVersion: v2.6.24
150Contact: Linux power management list <linux-pm@vger.kernel.org>
151Description:
152 The directory /sys/devices/system/cpu/cpuX/cpuidle contains per
153 logical CPU specific cpuidle information for each online cpu X.
154 The processor idle states which are available for use have the
155 following attributes:
156
157 name: (RO) Name of the idle state (string).
158
159 latency: (RO) The latency to exit out of this idle state (in
160 microseconds).
161
162 power: (RO) The power consumed while in this idle state (in
163 milliwatts).
164
165 time: (RO) The total time spent in this idle state (in microseconds).
166
167 usage: (RO) Number of times this state was entered (a count).
168
169
170What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
171Date: February 2008
172KernelVersion: v2.6.25
173Contact: Linux power management list <linux-pm@vger.kernel.org>
174Description:
175 (RO) A small description about the idle state (string).
176
177
178What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable
179Date: March 2012
180KernelVersion: v3.10
181Contact: Linux power management list <linux-pm@vger.kernel.org>
182Description:
183 (RW) Option to disable this idle state (bool). The behavior and
184 the effect of the disable variable depends on the implementation
185 of a particular governor. In the ladder governor, for example,
186 it is not coherent, i.e. if one is disabling a light state, then
187 all deeper states are disabled as well, but the disable variable
188 does not reflect it. Likewise, if one enables a deep state but a
189 lighter state still is disabled, then this has no effect.
190
191
192What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
193Date: March 2014
194KernelVersion: v3.15
195Contact: Linux power management list <linux-pm@vger.kernel.org>
196Description:
197 (RO) Display the target residency i.e. the minimum amount of
198 time (in microseconds) this cpu should spend in this idle state
199 to make the transition worth the effort.
200
201
129What: /sys/devices/system/cpu/cpu#/cpufreq/* 202What: /sys/devices/system/cpu/cpu#/cpufreq/*
130Date: pre-git history 203Date: pre-git history
131Contact: linux-pm@vger.kernel.org 204Contact: linux-pm@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-platform-dptf b/Documentation/ABI/testing/sysfs-platform-dptf
new file mode 100644
index 000000000000..325dc0667dbb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dptf
@@ -0,0 +1,40 @@
1What: /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type
2Date: Jul, 2016
3KernelVersion: v4.10
4Contact: linux-acpi@vger.kernel.org
5Description:
6 (RO) The charger type - Traditional, Hybrid or NVDC.
7
8What: /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw
9Date: Jul, 2016
10KernelVersion: v4.10
11Contact: linux-acpi@vger.kernel.org
12Description:
13 (RO) Adapter rating in milliwatts (the maximum Adapter power).
14 Must be 0 if no AC Adaptor is plugged in.
15
16What: /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw
17Date: Jul, 2016
18KernelVersion: v4.10
19Contact: linux-acpi@vger.kernel.org
20Description:
21 (RO) Maximum platform power that can be supported by the battery
22 in milliwatts.
23
24What: /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source
25Date: Jul, 2016
26KernelVersion: v4.10
27Contact: linux-acpi@vger.kernel.org
28Description:
29 (RO) Display the platform power source
30 0x00 = DC
31 0x01 = AC
32 0x02 = USB
33 0x03 = Wireless Charger
34
35What: /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power
36Date: Jul, 2016
37KernelVersion: v4.10
38Contact: linux-acpi@vger.kernel.org
39Description:
40 (RO) The maximum sustained power for battery in milliwatts.
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
index 5550bfdcce5f..be70b32c95d9 100644
--- a/Documentation/atomic_bitops.txt
+++ b/Documentation/atomic_bitops.txt
@@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is:
58 58
59 - RMW operations that have a return value are fully ordered. 59 - RMW operations that have a return value are fully ordered.
60 60
61Except for test_and_set_bit_lock() which has ACQUIRE semantics and 61 - RMW operations that are conditional are unordered on FAILURE,
62 otherwise the above rules apply. In the case of test_and_{}_bit() operations,
63 if the bit in memory is unchanged by the operation then it is deemed to have
64 failed.
65
66Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
62clear_bit_unlock() which has RELEASE semantics. 67clear_bit_unlock() which has RELEASE semantics.
63 68
64Since a platform only has a single means of achieving atomic operations 69Since a platform only has a single means of achieving atomic operations
diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
new file mode 100644
index 000000000000..c6b82511ae8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
@@ -0,0 +1,8 @@
1Binding for MIPS Cluster Power Controller (CPC).
2
3This binding allows a system to specify where the CPC registers are
4located.
5
6Required properties:
7compatible : Should be "mti,mips-cpc".
8regs: Should describe the address & size of the CPC register region.
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
new file mode 100644
index 000000000000..2c815a7f1ba7
--- /dev/null
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -0,0 +1,62 @@
1#
2# Feature name: membarrier-sync-core
3# Kconfig: ARCH_HAS_MEMBARRIER_SYNC_CORE
4# description: arch supports core serializing membarrier
5#
6# Architecture requirements
7#
8# * arm64
9#
10# Rely on eret context synchronization when returning from IPI handler, and
11# when returning to user-space.
12#
13# * x86
14#
15# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
16# However, it uses both IRET and SYSEXIT to go back to user-space. The IRET
17# instruction is core serializing, but not SYSEXIT.
18#
19# x86-64 uses IRET as return from interrupt, which takes care of the IPI.
20# However, it can return to user-space through either SYSRETL (compat code),
21# SYSRETQ, or IRET.
22#
23# Given that neither SYSRET{L,Q}, nor SYSEXIT, are core serializing, we rely
24# instead on write_cr3() performed by switch_mm() to provide core serialization
25# after changing the current mm, and deal with the special case of kthread ->
26# uthread (temporarily keeping current mm into active_mm) by issuing a
27# sync_core_before_usermode() in that specific case.
28#
29 -----------------------
30 | arch |status|
31 -----------------------
32 | alpha: | TODO |
33 | arc: | TODO |
34 | arm: | TODO |
35 | arm64: | ok |
36 | blackfin: | TODO |
37 | c6x: | TODO |
38 | cris: | TODO |
39 | frv: | TODO |
40 | h8300: | TODO |
41 | hexagon: | TODO |
42 | ia64: | TODO |
43 | m32r: | TODO |
44 | m68k: | TODO |
45 | metag: | TODO |
46 | microblaze: | TODO |
47 | mips: | TODO |
48 | mn10300: | TODO |
49 | nios2: | TODO |
50 | openrisc: | TODO |
51 | parisc: | TODO |
52 | powerpc: | TODO |
53 | s390: | TODO |
54 | score: | TODO |
55 | sh: | TODO |
56 | sparc: | TODO |
57 | tile: | TODO |
58 | um: | TODO |
59 | unicore32: | TODO |
60 | x86: | ok |
61 | xtensa: | TODO |
62 -----------------------
diff --git a/Documentation/gpu/tve200.rst b/Documentation/gpu/tve200.rst
index 69b17b324e12..152ea9398f7e 100644
--- a/Documentation/gpu/tve200.rst
+++ b/Documentation/gpu/tve200.rst
@@ -3,4 +3,4 @@
3================================== 3==================================
4 4
5.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c 5.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
6 :doc: Faraday TV Encoder 200 6 :doc: Faraday TV Encoder TVE200 DRM Driver
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d47702456926..65514c251318 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -28,8 +28,10 @@ Supported adapters:
28 * Intel Wildcat Point (PCH) 28 * Intel Wildcat Point (PCH)
29 * Intel Wildcat Point-LP (PCH) 29 * Intel Wildcat Point-LP (PCH)
30 * Intel BayTrail (SOC) 30 * Intel BayTrail (SOC)
31 * Intel Braswell (SOC)
31 * Intel Sunrise Point-H (PCH) 32 * Intel Sunrise Point-H (PCH)
32 * Intel Sunrise Point-LP (PCH) 33 * Intel Sunrise Point-LP (PCH)
34 * Intel Kaby Lake-H (PCH)
33 * Intel DNV (SOC) 35 * Intel DNV (SOC)
34 * Intel Broxton (SOC) 36 * Intel Broxton (SOC)
35 * Intel Lewisburg (PCH) 37 * Intel Lewisburg (PCH)
diff --git a/Documentation/locking/mutex-design.txt b/Documentation/locking/mutex-design.txt
index 60c482df1a38..818aca19612f 100644
--- a/Documentation/locking/mutex-design.txt
+++ b/Documentation/locking/mutex-design.txt
@@ -21,37 +21,23 @@ Implementation
21-------------- 21--------------
22 22
23Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h 23Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h
24and implemented in kernel/locking/mutex.c. These locks use a three 24and implemented in kernel/locking/mutex.c. These locks use an atomic variable
25state atomic counter (->count) to represent the different possible 25(->owner) to keep track of the lock state during its lifetime. Field owner
26transitions that can occur during the lifetime of a lock: 26actually contains 'struct task_struct *' to the current lock owner and it is
27 27therefore NULL if not currently owned. Since task_struct pointers are aligned
28 1: unlocked 28at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g.,
29 0: locked, no waiters 29if waiter list is non-empty). In its most basic form it also includes a
30 negative: locked, with potential waiters 30wait-queue and a spinlock that serializes access to it. Furthermore,
31 31CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described
32In its most basic form it also includes a wait-queue and a spinlock 32below in (ii).
33that serializes access to it. CONFIG_SMP systems can also include
34a pointer to the lock task owner (->owner) as well as a spinner MCS
35lock (->osq), both described below in (ii).
36 33
37When acquiring a mutex, there are three possible paths that can be 34When acquiring a mutex, there are three possible paths that can be
38taken, depending on the state of the lock: 35taken, depending on the state of the lock:
39 36
40(i) fastpath: tries to atomically acquire the lock by decrementing the 37(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with
41 counter. If it was already taken by another task it goes to the next 38 the current task. This only works in the uncontended case (cmpxchg() checks
42 possible path. This logic is architecture specific. On x86-64, the 39 against 0UL, so all 3 state bits above have to be 0). If the lock is
43 locking fastpath is 2 instructions: 40 contended it goes to the next possible path.
44
45 0000000000000e10 <mutex_lock>:
46 e21: f0 ff 0b lock decl (%rbx)
47 e24: 79 08 jns e2e <mutex_lock+0x1e>
48
49 the unlocking fastpath is equally tight:
50
51 0000000000000bc0 <mutex_unlock>:
52 bc8: f0 ff 07 lock incl (%rdi)
53 bcb: 7f 0a jg bd7 <mutex_unlock+0x17>
54
55 41
56(ii) midpath: aka optimistic spinning, tries to spin for acquisition 42(ii) midpath: aka optimistic spinning, tries to spin for acquisition
57 while the lock owner is running and there are no other tasks ready 43 while the lock owner is running and there are no other tasks ready
@@ -143,11 +129,10 @@ Test if the mutex is taken:
143Disadvantages 129Disadvantages
144------------- 130-------------
145 131
146Unlike its original design and purpose, 'struct mutex' is larger than 132Unlike its original design and purpose, 'struct mutex' is among the largest
147most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice 133locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore'
148as large as 'struct semaphore' (24 bytes) and tied, along with rwsems, 134is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU
149for the largest lock in the kernel. Larger structure sizes mean more 135cache and memory footprint.
150CPU cache and memory footprint.
151 136
152When to use mutexes 137When to use mutexes
153------------------- 138-------------------
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
index 2f09455a993a..d47480b61ac6 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -13,6 +13,7 @@ The following technologies are described:
13 * Generic Segmentation Offload - GSO 13 * Generic Segmentation Offload - GSO
14 * Generic Receive Offload - GRO 14 * Generic Receive Offload - GRO
15 * Partial Generic Segmentation Offload - GSO_PARTIAL 15 * Partial Generic Segmentation Offload - GSO_PARTIAL
16 * SCTP accelleration with GSO - GSO_BY_FRAGS
16 17
17TCP Segmentation Offload 18TCP Segmentation Offload
18======================== 19========================
@@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments. Many of the requirements for UDP
49fragmentation offload are the same as TSO. However the IPv4 ID for 50fragmentation offload are the same as TSO. However the IPv4 ID for
50fragments should not increment as a single IPv4 datagram is fragmented. 51fragments should not increment as a single IPv4 datagram is fragmented.
51 52
53UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
54still receive them from tuntap and similar devices. Offload of UDP-based
55tunnel protocols is still supported.
56
52IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads 57IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
53======================================================== 58========================================================
54 59
@@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the
83fact that the outer header also requests to have a non-zero checksum 88fact that the outer header also requests to have a non-zero checksum
84included in the outer header. 89included in the outer header.
85 90
86Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header 91Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel
87has requested a remote checksum offload. In this case the inner headers 92header has requested a remote checksum offload. In this case the inner
88will be left with a partial checksum and only the outer header checksum 93headers will be left with a partial checksum and only the outer header
89will be computed. 94checksum will be computed.
90 95
91Generic Segmentation Offload 96Generic Segmentation Offload
92============================ 97============================
@@ -128,3 +133,28 @@ values for if the header was simply duplicated. The one exception to this
128is the outer IPv4 ID field. It is up to the device drivers to guarantee 133is the outer IPv4 ID field. It is up to the device drivers to guarantee
129that the IPv4 ID field is incremented in the case that a given header does 134that the IPv4 ID field is incremented in the case that a given header does
130not have the DF bit set. 135not have the DF bit set.
136
137SCTP accelleration with GSO
138===========================
139
140SCTP - despite the lack of hardware support - can still take advantage of
141GSO to pass one large packet through the network stack, rather than
142multiple small packets.
143
144This requires a different approach to other offloads, as SCTP packets
145cannot be just segmented to (P)MTU. Rather, the chunks must be contained in
146IP segments, padding respected. So unlike regular GSO, SCTP can't just
147generate a big skb, set gso_size to the fragmentation point and deliver it
148to IP layer.
149
150Instead, the SCTP protocol layer builds an skb with the segments correctly
151padded and stored as chained skbs, and skb_segment() splits based on those.
152To signal this, gso_size is set to the special value GSO_BY_FRAGS.
153
154Therefore, any code in the core networking stack must be aware of the
155possibility that gso_size will be GSO_BY_FRAGS and handle that case
156appropriately. (For size checks, the skb_gso_validate_*_len family of
157helpers do this automatically.)
158
159This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
160set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
index f3e9d7e9ed6c..2953e3ec9a02 100644
--- a/Documentation/x86/topology.txt
+++ b/Documentation/x86/topology.txt
@@ -108,7 +108,7 @@ The topology of a system is described in the units of:
108 108
109 The number of online threads is also printed in /proc/cpuinfo "siblings." 109 The number of online threads is also printed in /proc/cpuinfo "siblings."
110 110
111 - topology_sibling_mask(): 111 - topology_sibling_cpumask():
112 112
113 The cpumask contains all online threads in the core to which a thread 113 The cpumask contains all online threads in the core to which a thread
114 belongs. 114 belongs.
diff --git a/MAINTAINERS b/MAINTAINERS
index 885d20072d97..c3c2b7595cba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7909,7 +7909,6 @@ S: Maintained
7909F: scripts/leaking_addresses.pl 7909F: scripts/leaking_addresses.pl
7910 7910
7911LED SUBSYSTEM 7911LED SUBSYSTEM
7912M: Richard Purdie <rpurdie@rpsys.net>
7913M: Jacek Anaszewski <jacek.anaszewski@gmail.com> 7912M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
7914M: Pavel Machek <pavel@ucw.cz> 7913M: Pavel Machek <pavel@ucw.cz>
7915L: linux-leds@vger.kernel.org 7914L: linux-leds@vger.kernel.org
@@ -9213,6 +9212,7 @@ MIPS GENERIC PLATFORM
9213M: Paul Burton <paul.burton@mips.com> 9212M: Paul Burton <paul.burton@mips.com>
9214L: linux-mips@linux-mips.org 9213L: linux-mips@linux-mips.org
9215S: Supported 9214S: Supported
9215F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
9216F: arch/mips/generic/ 9216F: arch/mips/generic/
9217F: arch/mips/tools/generic-board-config.sh 9217F: arch/mips/tools/generic-board-config.sh
9218 9218
@@ -9952,6 +9952,7 @@ F: drivers/nfc/nxp-nci
9952 9952
9953OBJTOOL 9953OBJTOOL
9954M: Josh Poimboeuf <jpoimboe@redhat.com> 9954M: Josh Poimboeuf <jpoimboe@redhat.com>
9955M: Peter Zijlstra <peterz@infradead.org>
9955S: Supported 9956S: Supported
9956F: tools/objtool/ 9957F: tools/objtool/
9957 9958
diff --git a/Makefile b/Makefile
index 79ad2bfa24b6..659a7780aeb3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 16 3PATCHLEVEL = 16
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc3
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
6 * Atomic exchange routines. 6 * Atomic exchange routines.
7 */ 7 */
8 8
9#define __ASM__MB
10#define ____xchg(type, args...) __xchg ## type ## _local(args) 9#define ____xchg(type, args...) __xchg ## type ## _local(args)
11#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) 10#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
12#include <asm/xchg.h> 11#include <asm/xchg.h>
@@ -33,10 +32,6 @@
33 cmpxchg_local((ptr), (o), (n)); \ 32 cmpxchg_local((ptr), (o), (n)); \
34}) 33})
35 34
36#ifdef CONFIG_SMP
37#undef __ASM__MB
38#define __ASM__MB "\tmb\n"
39#endif
40#undef ____xchg 35#undef ____xchg
41#undef ____cmpxchg 36#undef ____cmpxchg
42#define ____xchg(type, args...) __xchg ##type(args) 37#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
64 cmpxchg((ptr), (o), (n)); \ 59 cmpxchg((ptr), (o), (n)); \
65}) 60})
66 61
67#undef __ASM__MB
68#undef ____cmpxchg 62#undef ____cmpxchg
69 63
70#endif /* _ALPHA_CMPXCHG_H */ 64#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 68dfb3cb7145..e2b59fac5257 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,6 +12,10 @@
12 * Atomic exchange. 12 * Atomic exchange.
13 * Since it can be used to implement critical sections 13 * Since it can be used to implement critical sections
14 * it must clobber "memory" (also for interrupts in UP). 14 * it must clobber "memory" (also for interrupts in UP).
15 *
16 * The leading and the trailing memory barriers guarantee that these
17 * operations are fully ordered.
18 *
15 */ 19 */
16 20
17static inline unsigned long 21static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
19{ 23{
20 unsigned long ret, tmp, addr64; 24 unsigned long ret, tmp, addr64;
21 25
26 smp_mb();
22 __asm__ __volatile__( 27 __asm__ __volatile__(
23 " andnot %4,7,%3\n" 28 " andnot %4,7,%3\n"
24 " insbl %1,%4,%1\n" 29 " insbl %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
28 " or %1,%2,%2\n" 33 " or %1,%2,%2\n"
29 " stq_c %2,0(%3)\n" 34 " stq_c %2,0(%3)\n"
30 " beq %2,2f\n" 35 " beq %2,2f\n"
31 __ASM__MB
32 ".subsection 2\n" 36 ".subsection 2\n"
33 "2: br 1b\n" 37 "2: br 1b\n"
34 ".previous" 38 ".previous"
35 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 39 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
36 : "r" ((long)m), "1" (val) : "memory"); 40 : "r" ((long)m), "1" (val) : "memory");
41 smp_mb();
37 42
38 return ret; 43 return ret;
39} 44}
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
43{ 48{
44 unsigned long ret, tmp, addr64; 49 unsigned long ret, tmp, addr64;
45 50
51 smp_mb();
46 __asm__ __volatile__( 52 __asm__ __volatile__(
47 " andnot %4,7,%3\n" 53 " andnot %4,7,%3\n"
48 " inswl %1,%4,%1\n" 54 " inswl %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
52 " or %1,%2,%2\n" 58 " or %1,%2,%2\n"
53 " stq_c %2,0(%3)\n" 59 " stq_c %2,0(%3)\n"
54 " beq %2,2f\n" 60 " beq %2,2f\n"
55 __ASM__MB
56 ".subsection 2\n" 61 ".subsection 2\n"
57 "2: br 1b\n" 62 "2: br 1b\n"
58 ".previous" 63 ".previous"
59 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 64 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
60 : "r" ((long)m), "1" (val) : "memory"); 65 : "r" ((long)m), "1" (val) : "memory");
66 smp_mb();
61 67
62 return ret; 68 return ret;
63} 69}
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
67{ 73{
68 unsigned long dummy; 74 unsigned long dummy;
69 75
76 smp_mb();
70 __asm__ __volatile__( 77 __asm__ __volatile__(
71 "1: ldl_l %0,%4\n" 78 "1: ldl_l %0,%4\n"
72 " bis $31,%3,%1\n" 79 " bis $31,%3,%1\n"
73 " stl_c %1,%2\n" 80 " stl_c %1,%2\n"
74 " beq %1,2f\n" 81 " beq %1,2f\n"
75 __ASM__MB
76 ".subsection 2\n" 82 ".subsection 2\n"
77 "2: br 1b\n" 83 "2: br 1b\n"
78 ".previous" 84 ".previous"
79 : "=&r" (val), "=&r" (dummy), "=m" (*m) 85 : "=&r" (val), "=&r" (dummy), "=m" (*m)
80 : "rI" (val), "m" (*m) : "memory"); 86 : "rI" (val), "m" (*m) : "memory");
87 smp_mb();
81 88
82 return val; 89 return val;
83} 90}
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
87{ 94{
88 unsigned long dummy; 95 unsigned long dummy;
89 96
97 smp_mb();
90 __asm__ __volatile__( 98 __asm__ __volatile__(
91 "1: ldq_l %0,%4\n" 99 "1: ldq_l %0,%4\n"
92 " bis $31,%3,%1\n" 100 " bis $31,%3,%1\n"
93 " stq_c %1,%2\n" 101 " stq_c %1,%2\n"
94 " beq %1,2f\n" 102 " beq %1,2f\n"
95 __ASM__MB
96 ".subsection 2\n" 103 ".subsection 2\n"
97 "2: br 1b\n" 104 "2: br 1b\n"
98 ".previous" 105 ".previous"
99 : "=&r" (val), "=&r" (dummy), "=m" (*m) 106 : "=&r" (val), "=&r" (dummy), "=m" (*m)
100 : "rI" (val), "m" (*m) : "memory"); 107 : "rI" (val), "m" (*m) : "memory");
108 smp_mb();
101 109
102 return val; 110 return val;
103} 111}
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
128 * store NEW in MEM. Return the initial value in MEM. Success is 136 * store NEW in MEM. Return the initial value in MEM. Success is
129 * indicated by comparing RETURN with OLD. 137 * indicated by comparing RETURN with OLD.
130 * 138 *
131 * The memory barrier should be placed in SMP only when we actually 139 * The leading and the trailing memory barriers guarantee that these
132 * make the change. If we don't change anything (so if the returned 140 * operations are fully ordered.
133 * prev is equal to old) then we aren't acquiring anything new and 141 *
134 * we don't need any memory barrier as far I can tell. 142 * The trailing memory barrier is placed in SMP unconditionally, in
143 * order to guarantee that dependency ordering is preserved when a
144 * dependency is headed by an unsuccessful operation.
135 */ 145 */
136 146
137static inline unsigned long 147static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
139{ 149{
140 unsigned long prev, tmp, cmp, addr64; 150 unsigned long prev, tmp, cmp, addr64;
141 151
152 smp_mb();
142 __asm__ __volatile__( 153 __asm__ __volatile__(
143 " andnot %5,7,%4\n" 154 " andnot %5,7,%4\n"
144 " insbl %1,%5,%1\n" 155 " insbl %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
150 " or %1,%2,%2\n" 161 " or %1,%2,%2\n"
151 " stq_c %2,0(%4)\n" 162 " stq_c %2,0(%4)\n"
152 " beq %2,3f\n" 163 " beq %2,3f\n"
153 __ASM__MB
154 "2:\n" 164 "2:\n"
155 ".subsection 2\n" 165 ".subsection 2\n"
156 "3: br 1b\n" 166 "3: br 1b\n"
157 ".previous" 167 ".previous"
158 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 168 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
159 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 169 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
170 smp_mb();
160 171
161 return prev; 172 return prev;
162} 173}
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
166{ 177{
167 unsigned long prev, tmp, cmp, addr64; 178 unsigned long prev, tmp, cmp, addr64;
168 179
180 smp_mb();
169 __asm__ __volatile__( 181 __asm__ __volatile__(
170 " andnot %5,7,%4\n" 182 " andnot %5,7,%4\n"
171 " inswl %1,%5,%1\n" 183 " inswl %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
177 " or %1,%2,%2\n" 189 " or %1,%2,%2\n"
178 " stq_c %2,0(%4)\n" 190 " stq_c %2,0(%4)\n"
179 " beq %2,3f\n" 191 " beq %2,3f\n"
180 __ASM__MB
181 "2:\n" 192 "2:\n"
182 ".subsection 2\n" 193 ".subsection 2\n"
183 "3: br 1b\n" 194 "3: br 1b\n"
184 ".previous" 195 ".previous"
185 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 196 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
186 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 197 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
198 smp_mb();
187 199
188 return prev; 200 return prev;
189} 201}
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
193{ 205{
194 unsigned long prev, cmp; 206 unsigned long prev, cmp;
195 207
208 smp_mb();
196 __asm__ __volatile__( 209 __asm__ __volatile__(
197 "1: ldl_l %0,%5\n" 210 "1: ldl_l %0,%5\n"
198 " cmpeq %0,%3,%1\n" 211 " cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
200 " mov %4,%1\n" 213 " mov %4,%1\n"
201 " stl_c %1,%2\n" 214 " stl_c %1,%2\n"
202 " beq %1,3f\n" 215 " beq %1,3f\n"
203 __ASM__MB
204 "2:\n" 216 "2:\n"
205 ".subsection 2\n" 217 ".subsection 2\n"
206 "3: br 1b\n" 218 "3: br 1b\n"
207 ".previous" 219 ".previous"
208 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 220 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
209 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 221 : "r"((long) old), "r"(new), "m"(*m) : "memory");
222 smp_mb();
210 223
211 return prev; 224 return prev;
212} 225}
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
216{ 229{
217 unsigned long prev, cmp; 230 unsigned long prev, cmp;
218 231
232 smp_mb();
219 __asm__ __volatile__( 233 __asm__ __volatile__(
220 "1: ldq_l %0,%5\n" 234 "1: ldq_l %0,%5\n"
221 " cmpeq %0,%3,%1\n" 235 " cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
223 " mov %4,%1\n" 237 " mov %4,%1\n"
224 " stq_c %1,%2\n" 238 " stq_c %1,%2\n"
225 " beq %1,3f\n" 239 " beq %1,3f\n"
226 __ASM__MB
227 "2:\n" 240 "2:\n"
228 ".subsection 2\n" 241 ".subsection 2\n"
229 "3: br 1b\n" 242 "3: br 1b\n"
230 ".previous" 243 ".previous"
231 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 244 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
232 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 245 : "r"((long) old), "r"(new), "m"(*m) : "memory");
246 smp_mb();
233 247
234 return prev; 248 return prev;
235} 249}
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index ea022d47896c..21ec82466d62 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
23 23
24#define BUG() do { \ 24#define BUG() do { \
25 pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ 25 pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
26 dump_stack(); \ 26 barrier_before_unreachable(); \
27 __builtin_trap(); \
27} while (0) 28} while (0)
28 29
29#define HAVE_ARCH_BUG 30#define HAVE_ARCH_BUG
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 9d27331fe69a..ec12fe1c2f07 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -373,7 +373,7 @@ static void arc_chk_core_config(void)
373{ 373{
374 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 374 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
375 int saved = 0, present = 0; 375 int saved = 0, present = 0;
376 char *opt_nm = NULL;; 376 char *opt_nm = NULL;
377 377
378 if (!cpu->extn.timer0) 378 if (!cpu->extn.timer0)
379 panic("Timer0 is not present!\n"); 379 panic("Timer0 is not present!\n");
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 333daab7def0..183391d4d33a 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table,
366 return; 366 return;
367 367
368ret_err: 368ret_err:
369 panic("Attention !!! Dwarf FDE parsing errors\n");; 369 panic("Attention !!! Dwarf FDE parsing errors\n");
370} 370}
371 371
372#ifdef CONFIG_MODULES 372#ifdef CONFIG_MODULES
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 629f8e9981f1..cf2701cb0de8 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts)
83} 83}
84 84
85static clock_access_fn __read_persistent_clock = dummy_clock_access; 85static clock_access_fn __read_persistent_clock = dummy_clock_access;
86static clock_access_fn __read_boot_clock = dummy_clock_access;; 86static clock_access_fn __read_boot_clock = dummy_clock_access;
87 87
88void read_persistent_clock64(struct timespec64 *ts) 88void read_persistent_clock64(struct timespec64 *ts)
89{ 89{
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 57058ac46f49..7e5d7a083707 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -23,7 +23,6 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/of_address.h> 24#include <linux/of_address.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26#include <linux/perf/arm_pmu.h>
27#include <linux/regulator/machine.h> 26#include <linux/regulator/machine.h>
28 27
29#include <asm/outercache.h> 28#include <asm/outercache.h>
@@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
112 prcmu_system_reset(0); 111 prcmu_system_reset(0);
113} 112}
114 113
115/*
116 * The PMU IRQ lines of two cores are wired together into a single interrupt.
117 * Bounce the interrupt to the other core if it's not ours.
118 */
119static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
120{
121 irqreturn_t ret = handler(irq, dev);
122 int other = !smp_processor_id();
123
124 if (ret == IRQ_NONE && cpu_online(other))
125 irq_set_affinity(irq, cpumask_of(other));
126
127 /*
128 * We should be able to get away with the amount of IRQ_NONEs we give,
129 * while still having the spurious IRQ detection code kick in if the
130 * interrupt really starts hitting spuriously.
131 */
132 return ret;
133}
134
135static struct arm_pmu_platdata db8500_pmu_platdata = {
136 .handle_irq = db8500_pmu_handler,
137 .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
138};
139
140static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
141 /* Requires call-back bindings. */
142 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
143 {},
144};
145
146static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = { 114static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
147 OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL), 115 OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
148 {}, 116 {},
@@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
165 if (of_machine_is_compatible("st-ericsson,u8540")) 133 if (of_machine_is_compatible("st-ericsson,u8540"))
166 of_platform_populate(NULL, u8500_local_bus_nodes, 134 of_platform_populate(NULL, u8500_local_bus_nodes,
167 u8540_auxdata_lookup, NULL); 135 u8540_auxdata_lookup, NULL);
168 else
169 of_platform_populate(NULL, u8500_local_bus_nodes,
170 u8500_auxdata_lookup, NULL);
171} 136}
172 137
173static const char * stericsson_dt_platform_compat[] = { 138static const char * stericsson_dt_platform_compat[] = {
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index be7bd19c87ec..350c76a1d15b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -20,7 +20,7 @@
20 20
21#define MPIDR_UP_BITMASK (0x1 << 30) 21#define MPIDR_UP_BITMASK (0x1 << 30)
22#define MPIDR_MT_BITMASK (0x1 << 24) 22#define MPIDR_MT_BITMASK (0x1 << 24)
23#define MPIDR_HWID_BITMASK 0xff00ffffff 23#define MPIDR_HWID_BITMASK UL(0xff00ffffff)
24 24
25#define MPIDR_LEVEL_BITS_SHIFT 3 25#define MPIDR_LEVEL_BITS_SHIFT 3
26#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) 26#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1dca41bea16a..e73f68569624 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -22,7 +22,7 @@
22 22
23static inline pte_t huge_ptep_get(pte_t *ptep) 23static inline pte_t huge_ptep_get(pte_t *ptep)
24{ 24{
25 return *ptep; 25 return READ_ONCE(*ptep);
26} 26}
27 27
28 28
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 9679067a1574..7faed6e48b46 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -185,42 +185,42 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
185 return pmd; 185 return pmd;
186} 186}
187 187
188static inline void kvm_set_s2pte_readonly(pte_t *pte) 188static inline void kvm_set_s2pte_readonly(pte_t *ptep)
189{ 189{
190 pteval_t old_pteval, pteval; 190 pteval_t old_pteval, pteval;
191 191
192 pteval = READ_ONCE(pte_val(*pte)); 192 pteval = READ_ONCE(pte_val(*ptep));
193 do { 193 do {
194 old_pteval = pteval; 194 old_pteval = pteval;
195 pteval &= ~PTE_S2_RDWR; 195 pteval &= ~PTE_S2_RDWR;
196 pteval |= PTE_S2_RDONLY; 196 pteval |= PTE_S2_RDONLY;
197 pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval); 197 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
198 } while (pteval != old_pteval); 198 } while (pteval != old_pteval);
199} 199}
200 200
201static inline bool kvm_s2pte_readonly(pte_t *pte) 201static inline bool kvm_s2pte_readonly(pte_t *ptep)
202{ 202{
203 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; 203 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
204} 204}
205 205
206static inline bool kvm_s2pte_exec(pte_t *pte) 206static inline bool kvm_s2pte_exec(pte_t *ptep)
207{ 207{
208 return !(pte_val(*pte) & PTE_S2_XN); 208 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
209} 209}
210 210
211static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) 211static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
212{ 212{
213 kvm_set_s2pte_readonly((pte_t *)pmd); 213 kvm_set_s2pte_readonly((pte_t *)pmdp);
214} 214}
215 215
216static inline bool kvm_s2pmd_readonly(pmd_t *pmd) 216static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
217{ 217{
218 return kvm_s2pte_readonly((pte_t *)pmd); 218 return kvm_s2pte_readonly((pte_t *)pmdp);
219} 219}
220 220
221static inline bool kvm_s2pmd_exec(pmd_t *pmd) 221static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
222{ 222{
223 return !(pmd_val(*pmd) & PMD_S2_XN); 223 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
224} 224}
225 225
226static inline bool kvm_page_empty(void *ptr) 226static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 8d3331985d2e..39ec0b8a689e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void)
141 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, 141 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
142 * avoiding the possibility of conflicting TLB entries being allocated. 142 * avoiding the possibility of conflicting TLB entries being allocated.
143 */ 143 */
144static inline void cpu_replace_ttbr1(pgd_t *pgd) 144static inline void cpu_replace_ttbr1(pgd_t *pgdp)
145{ 145{
146 typedef void (ttbr_replace_func)(phys_addr_t); 146 typedef void (ttbr_replace_func)(phys_addr_t);
147 extern ttbr_replace_func idmap_cpu_replace_ttbr1; 147 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
148 ttbr_replace_func *replace_phys; 148 ttbr_replace_func *replace_phys;
149 149
150 phys_addr_t pgd_phys = virt_to_phys(pgd); 150 phys_addr_t pgd_phys = virt_to_phys(pgdp);
151 151
152 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); 152 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
153 153
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e9d9f1b006ef..2e05bcd944c8 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
36 return (pmd_t *)__get_free_page(PGALLOC_GFP); 36 return (pmd_t *)__get_free_page(PGALLOC_GFP);
37} 37}
38 38
39static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 39static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
40{ 40{
41 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 41 BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
42 free_page((unsigned long)pmd); 42 free_page((unsigned long)pmdp);
43} 43}
44 44
45static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) 45static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
46{ 46{
47 set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot)); 47 set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
48} 48}
49 49
50static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 50static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
51{ 51{
52 __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE); 52 __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
53} 53}
54#else 54#else
55static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) 55static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
56{ 56{
57 BUILD_BUG(); 57 BUILD_BUG();
58} 58}
@@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
65 return (pud_t *)__get_free_page(PGALLOC_GFP); 65 return (pud_t *)__get_free_page(PGALLOC_GFP);
66} 66}
67 67
68static inline void pud_free(struct mm_struct *mm, pud_t *pud) 68static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
69{ 69{
70 BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); 70 BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
71 free_page((unsigned long)pud); 71 free_page((unsigned long)pudp);
72} 72}
73 73
74static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) 74static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
75{ 75{
76 set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot)); 76 set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
77} 77}
78 78
79static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 79static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
80{ 80{
81 __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE); 81 __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
82} 82}
83#else 83#else
84static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) 84static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
85{ 85{
86 BUILD_BUG(); 86 BUILD_BUG();
87} 87}
88#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 88#endif /* CONFIG_PGTABLE_LEVELS > 3 */
89 89
90extern pgd_t *pgd_alloc(struct mm_struct *mm); 90extern pgd_t *pgd_alloc(struct mm_struct *mm);
91extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 91extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
92 92
93static inline pte_t * 93static inline pte_t *
94pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 94pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
@@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
114/* 114/*
115 * Free a PTE table. 115 * Free a PTE table.
116 */ 116 */
117static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 117static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
118{ 118{
119 if (pte) 119 if (ptep)
120 free_page((unsigned long)pte); 120 free_page((unsigned long)ptep);
121} 121}
122 122
123static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 123static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
126 __free_page(pte); 126 __free_page(pte);
127} 127}
128 128
129static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, 129static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
130 pmdval_t prot) 130 pmdval_t prot)
131{ 131{
132 set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot)); 132 set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
133} 133}
134 134
135/* 135/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 094374c82db0..7e2c27e63cd8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
218 218
219static inline void set_pte(pte_t *ptep, pte_t pte) 219static inline void set_pte(pte_t *ptep, pte_t pte)
220{ 220{
221 *ptep = pte; 221 WRITE_ONCE(*ptep, pte);
222 222
223 /* 223 /*
224 * Only if the new pte is valid and kernel, otherwise TLB maintenance 224 * Only if the new pte is valid and kernel, otherwise TLB maintenance
@@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
250static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 250static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
251 pte_t *ptep, pte_t pte) 251 pte_t *ptep, pte_t pte)
252{ 252{
253 pte_t old_pte;
254
253 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
254 __sync_icache_dcache(pte, addr); 256 __sync_icache_dcache(pte, addr);
255 257
@@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
258 * hardware updates of the pte (ptep_set_access_flags safely changes 260 * hardware updates of the pte (ptep_set_access_flags safely changes
259 * valid ptes without going through an invalid entry). 261 * valid ptes without going through an invalid entry).
260 */ 262 */
261 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) && 263 old_pte = READ_ONCE(*ptep);
264 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
262 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { 265 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
263 VM_WARN_ONCE(!pte_young(pte), 266 VM_WARN_ONCE(!pte_young(pte),
264 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 267 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
265 __func__, pte_val(*ptep), pte_val(pte)); 268 __func__, pte_val(old_pte), pte_val(pte));
266 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), 269 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
267 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 270 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
268 __func__, pte_val(*ptep), pte_val(pte)); 271 __func__, pte_val(old_pte), pte_val(pte));
269 } 272 }
270 273
271 set_pte(ptep, pte); 274 set_pte(ptep, pte);
@@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
431 434
432static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 435static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
433{ 436{
434 *pmdp = pmd; 437 WRITE_ONCE(*pmdp, pmd);
435 dsb(ishst); 438 dsb(ishst);
436 isb(); 439 isb();
437} 440}
@@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
482 485
483static inline void set_pud(pud_t *pudp, pud_t pud) 486static inline void set_pud(pud_t *pudp, pud_t pud)
484{ 487{
485 *pudp = pud; 488 WRITE_ONCE(*pudp, pud);
486 dsb(ishst); 489 dsb(ishst);
487 isb(); 490 isb();
488} 491}
@@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
500/* Find an entry in the second-level page table. */ 503/* Find an entry in the second-level page table. */
501#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 504#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
502 505
503#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) 506#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
504#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) 507#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
505 508
506#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 509#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
@@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
535 538
536static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 539static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
537{ 540{
538 *pgdp = pgd; 541 WRITE_ONCE(*pgdp, pgd);
539 dsb(ishst); 542 dsb(ishst);
540} 543}
541 544
@@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
552/* Find an entry in the frst-level page table. */ 555/* Find an entry in the frst-level page table. */
553#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 556#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
554 557
555#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) 558#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
556#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) 559#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
557 560
558#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) 561#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 472ef944e932..902f9edacbea 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -28,7 +28,7 @@ struct stackframe {
28 unsigned long fp; 28 unsigned long fp;
29 unsigned long pc; 29 unsigned long pc;
30#ifdef CONFIG_FUNCTION_GRAPH_TRACER 30#ifdef CONFIG_FUNCTION_GRAPH_TRACER
31 unsigned int graph; 31 int graph;
32#endif 32#endif
33}; 33};
34 34
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 543e11f0f657..e66b0fca99c2 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
72 * This is equivalent to the following test: 72 * This is equivalent to the following test:
73 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 73 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
74 */ 74 */
75static inline unsigned long __range_ok(unsigned long addr, unsigned long size) 75static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
76{ 76{
77 unsigned long limit = current_thread_info()->addr_limit; 77 unsigned long ret, limit = current_thread_info()->addr_limit;
78 78
79 __chk_user_ptr(addr); 79 __chk_user_ptr(addr);
80 asm volatile( 80 asm volatile(
81 // A + B <= C + 1 for all A,B,C, in four easy steps: 81 // A + B <= C + 1 for all A,B,C, in four easy steps:
82 // 1: X = A + B; X' = X % 2^64 82 // 1: X = A + B; X' = X % 2^64
83 " adds %0, %0, %2\n" 83 " adds %0, %3, %2\n"
84 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 84 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
85 " csel %1, xzr, %1, hi\n" 85 " csel %1, xzr, %1, hi\n"
86 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 86 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
92 // testing X' - C == 0, subject to the previous adjustments. 92 // testing X' - C == 0, subject to the previous adjustments.
93 " sbcs xzr, %0, %1\n" 93 " sbcs xzr, %0, %1\n"
94 " cset %0, ls\n" 94 " cset %0, ls\n"
95 : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); 95 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
96 96
97 return addr; 97 return ret;
98} 98}
99 99
100/* 100/*
@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
104 */ 104 */
105#define untagged_addr(addr) sign_extend64(addr, 55) 105#define untagged_addr(addr) sign_extend64(addr, 55)
106 106
107#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) 107#define access_ok(type, addr, size) __range_ok(addr, size)
108#define user_addr_max get_fs 108#define user_addr_max get_fs
109 109
110#define _ASM_EXTABLE(from, to) \ 110#define _ASM_EXTABLE(from, to) \
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c33b5e4010ab..68450e954d47 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
370static int swp_handler(struct pt_regs *regs, u32 instr) 370static int swp_handler(struct pt_regs *regs, u32 instr)
371{ 371{
372 u32 destreg, data, type, address = 0; 372 u32 destreg, data, type, address = 0;
373 const void __user *user_ptr;
373 int rn, rt2, res = 0; 374 int rn, rt2, res = 0;
374 375
375 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); 376 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
401 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); 402 aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
402 403
403 /* Check access in reasonable access range for both SWP and SWPB */ 404 /* Check access in reasonable access range for both SWP and SWPB */
404 if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { 405 user_ptr = (const void __user *)(unsigned long)(address & ~3);
406 if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
405 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", 407 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
406 address); 408 address);
407 goto fault; 409 goto fault;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 07823595b7f0..52f15cd896e1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -408,6 +408,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
408 }, 408 },
409 { 409 {
410 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 410 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
411 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
412 .enable = qcom_enable_link_stack_sanitization,
413 },
414 {
415 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
416 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
417 },
418 {
419 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
411 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 420 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
412 .enable = enable_smccc_arch_workaround_1, 421 .enable = enable_smccc_arch_workaround_1,
413 }, 422 },
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 29b1f873e337..2985a067fc13 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
199}; 199};
200 200
201static const struct arm64_ftr_bits ftr_ctr[] = { 201static const struct arm64_ftr_bits ftr_ctr[] = {
202 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 202 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
203 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
204 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
203 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 205 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
204 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 206 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
205 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 207 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
206 /* 208 /*
207 * Linux can handle differing I-cache policies. Userspace JITs will 209 * Linux can handle differing I-cache policies. Userspace JITs will
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index f85ac58d08a3..a8bf1c892b90 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
90 unsigned long addr, void *data) 90 unsigned long addr, void *data)
91{ 91{
92 efi_memory_desc_t *md = data; 92 efi_memory_desc_t *md = data;
93 pte_t pte = *ptep; 93 pte_t pte = READ_ONCE(*ptep);
94 94
95 if (md->attribute & EFI_MEMORY_RO) 95 if (md->attribute & EFI_MEMORY_RO)
96 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 96 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f20cf7e99249..1ec5f28c39fc 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length,
202 gfp_t mask) 202 gfp_t mask)
203{ 203{
204 int rc = 0; 204 int rc = 0;
205 pgd_t *pgd; 205 pgd_t *pgdp;
206 pud_t *pud; 206 pud_t *pudp;
207 pmd_t *pmd; 207 pmd_t *pmdp;
208 pte_t *pte; 208 pte_t *ptep;
209 unsigned long dst = (unsigned long)allocator(mask); 209 unsigned long dst = (unsigned long)allocator(mask);
210 210
211 if (!dst) { 211 if (!dst) {
@@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length,
216 memcpy((void *)dst, src_start, length); 216 memcpy((void *)dst, src_start, length);
217 flush_icache_range(dst, dst + length); 217 flush_icache_range(dst, dst + length);
218 218
219 pgd = pgd_offset_raw(allocator(mask), dst_addr); 219 pgdp = pgd_offset_raw(allocator(mask), dst_addr);
220 if (pgd_none(*pgd)) { 220 if (pgd_none(READ_ONCE(*pgdp))) {
221 pud = allocator(mask); 221 pudp = allocator(mask);
222 if (!pud) { 222 if (!pudp) {
223 rc = -ENOMEM; 223 rc = -ENOMEM;
224 goto out; 224 goto out;
225 } 225 }
226 pgd_populate(&init_mm, pgd, pud); 226 pgd_populate(&init_mm, pgdp, pudp);
227 } 227 }
228 228
229 pud = pud_offset(pgd, dst_addr); 229 pudp = pud_offset(pgdp, dst_addr);
230 if (pud_none(*pud)) { 230 if (pud_none(READ_ONCE(*pudp))) {
231 pmd = allocator(mask); 231 pmdp = allocator(mask);
232 if (!pmd) { 232 if (!pmdp) {
233 rc = -ENOMEM; 233 rc = -ENOMEM;
234 goto out; 234 goto out;
235 } 235 }
236 pud_populate(&init_mm, pud, pmd); 236 pud_populate(&init_mm, pudp, pmdp);
237 } 237 }
238 238
239 pmd = pmd_offset(pud, dst_addr); 239 pmdp = pmd_offset(pudp, dst_addr);
240 if (pmd_none(*pmd)) { 240 if (pmd_none(READ_ONCE(*pmdp))) {
241 pte = allocator(mask); 241 ptep = allocator(mask);
242 if (!pte) { 242 if (!ptep) {
243 rc = -ENOMEM; 243 rc = -ENOMEM;
244 goto out; 244 goto out;
245 } 245 }
246 pmd_populate_kernel(&init_mm, pmd, pte); 246 pmd_populate_kernel(&init_mm, pmdp, ptep);
247 } 247 }
248 248
249 pte = pte_offset_kernel(pmd, dst_addr); 249 ptep = pte_offset_kernel(pmdp, dst_addr);
250 set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC)); 250 set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
251 251
252 /* 252 /*
253 * Load our new page tables. A strict BBM approach requires that we 253 * Load our new page tables. A strict BBM approach requires that we
@@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
263 */ 263 */
264 cpu_set_reserved_ttbr0(); 264 cpu_set_reserved_ttbr0();
265 local_flush_tlb_all(); 265 local_flush_tlb_all();
266 write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1); 266 write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
267 isb(); 267 isb();
268 268
269 *phys_dst_addr = virt_to_phys((void *)dst); 269 *phys_dst_addr = virt_to_phys((void *)dst);
@@ -320,9 +320,9 @@ int swsusp_arch_suspend(void)
320 return ret; 320 return ret;
321} 321}
322 322
323static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) 323static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
324{ 324{
325 pte_t pte = *src_pte; 325 pte_t pte = READ_ONCE(*src_ptep);
326 326
327 if (pte_valid(pte)) { 327 if (pte_valid(pte)) {
328 /* 328 /*
@@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
330 * read only (code, rodata). Clear the RDONLY bit from 330 * read only (code, rodata). Clear the RDONLY bit from
331 * the temporary mappings we use during restore. 331 * the temporary mappings we use during restore.
332 */ 332 */
333 set_pte(dst_pte, pte_mkwrite(pte)); 333 set_pte(dst_ptep, pte_mkwrite(pte));
334 } else if (debug_pagealloc_enabled() && !pte_none(pte)) { 334 } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
335 /* 335 /*
336 * debug_pagealloc will removed the PTE_VALID bit if 336 * debug_pagealloc will removed the PTE_VALID bit if
@@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
343 */ 343 */
344 BUG_ON(!pfn_valid(pte_pfn(pte))); 344 BUG_ON(!pfn_valid(pte_pfn(pte)));
345 345
346 set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte))); 346 set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
347 } 347 }
348} 348}
349 349
350static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, 350static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
351 unsigned long end) 351 unsigned long end)
352{ 352{
353 pte_t *src_pte; 353 pte_t *src_ptep;
354 pte_t *dst_pte; 354 pte_t *dst_ptep;
355 unsigned long addr = start; 355 unsigned long addr = start;
356 356
357 dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); 357 dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
358 if (!dst_pte) 358 if (!dst_ptep)
359 return -ENOMEM; 359 return -ENOMEM;
360 pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); 360 pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
361 dst_pte = pte_offset_kernel(dst_pmd, start); 361 dst_ptep = pte_offset_kernel(dst_pmdp, start);
362 362
363 src_pte = pte_offset_kernel(src_pmd, start); 363 src_ptep = pte_offset_kernel(src_pmdp, start);
364 do { 364 do {
365 _copy_pte(dst_pte, src_pte, addr); 365 _copy_pte(dst_ptep, src_ptep, addr);
366 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 366 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
367 367
368 return 0; 368 return 0;
369} 369}
370 370
371static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, 371static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
372 unsigned long end) 372 unsigned long end)
373{ 373{
374 pmd_t *src_pmd; 374 pmd_t *src_pmdp;
375 pmd_t *dst_pmd; 375 pmd_t *dst_pmdp;
376 unsigned long next; 376 unsigned long next;
377 unsigned long addr = start; 377 unsigned long addr = start;
378 378
379 if (pud_none(*dst_pud)) { 379 if (pud_none(READ_ONCE(*dst_pudp))) {
380 dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); 380 dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
381 if (!dst_pmd) 381 if (!dst_pmdp)
382 return -ENOMEM; 382 return -ENOMEM;
383 pud_populate(&init_mm, dst_pud, dst_pmd); 383 pud_populate(&init_mm, dst_pudp, dst_pmdp);
384 } 384 }
385 dst_pmd = pmd_offset(dst_pud, start); 385 dst_pmdp = pmd_offset(dst_pudp, start);
386 386
387 src_pmd = pmd_offset(src_pud, start); 387 src_pmdp = pmd_offset(src_pudp, start);
388 do { 388 do {
389 pmd_t pmd = READ_ONCE(*src_pmdp);
390
389 next = pmd_addr_end(addr, end); 391 next = pmd_addr_end(addr, end);
390 if (pmd_none(*src_pmd)) 392 if (pmd_none(pmd))
391 continue; 393 continue;
392 if (pmd_table(*src_pmd)) { 394 if (pmd_table(pmd)) {
393 if (copy_pte(dst_pmd, src_pmd, addr, next)) 395 if (copy_pte(dst_pmdp, src_pmdp, addr, next))
394 return -ENOMEM; 396 return -ENOMEM;
395 } else { 397 } else {
396 set_pmd(dst_pmd, 398 set_pmd(dst_pmdp,
397 __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY)); 399 __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
398 } 400 }
399 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 401 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
400 402
401 return 0; 403 return 0;
402} 404}
403 405
404static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, 406static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
405 unsigned long end) 407 unsigned long end)
406{ 408{
407 pud_t *dst_pud; 409 pud_t *dst_pudp;
408 pud_t *src_pud; 410 pud_t *src_pudp;
409 unsigned long next; 411 unsigned long next;
410 unsigned long addr = start; 412 unsigned long addr = start;
411 413
412 if (pgd_none(*dst_pgd)) { 414 if (pgd_none(READ_ONCE(*dst_pgdp))) {
413 dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); 415 dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
414 if (!dst_pud) 416 if (!dst_pudp)
415 return -ENOMEM; 417 return -ENOMEM;
416 pgd_populate(&init_mm, dst_pgd, dst_pud); 418 pgd_populate(&init_mm, dst_pgdp, dst_pudp);
417 } 419 }
418 dst_pud = pud_offset(dst_pgd, start); 420 dst_pudp = pud_offset(dst_pgdp, start);
419 421
420 src_pud = pud_offset(src_pgd, start); 422 src_pudp = pud_offset(src_pgdp, start);
421 do { 423 do {
424 pud_t pud = READ_ONCE(*src_pudp);
425
422 next = pud_addr_end(addr, end); 426 next = pud_addr_end(addr, end);
423 if (pud_none(*src_pud)) 427 if (pud_none(pud))
424 continue; 428 continue;
425 if (pud_table(*(src_pud))) { 429 if (pud_table(pud)) {
426 if (copy_pmd(dst_pud, src_pud, addr, next)) 430 if (copy_pmd(dst_pudp, src_pudp, addr, next))
427 return -ENOMEM; 431 return -ENOMEM;
428 } else { 432 } else {
429 set_pud(dst_pud, 433 set_pud(dst_pudp,
430 __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY)); 434 __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
431 } 435 }
432 } while (dst_pud++, src_pud++, addr = next, addr != end); 436 } while (dst_pudp++, src_pudp++, addr = next, addr != end);
433 437
434 return 0; 438 return 0;
435} 439}
436 440
437static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, 441static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
438 unsigned long end) 442 unsigned long end)
439{ 443{
440 unsigned long next; 444 unsigned long next;
441 unsigned long addr = start; 445 unsigned long addr = start;
442 pgd_t *src_pgd = pgd_offset_k(start); 446 pgd_t *src_pgdp = pgd_offset_k(start);
443 447
444 dst_pgd = pgd_offset_raw(dst_pgd, start); 448 dst_pgdp = pgd_offset_raw(dst_pgdp, start);
445 do { 449 do {
446 next = pgd_addr_end(addr, end); 450 next = pgd_addr_end(addr, end);
447 if (pgd_none(*src_pgd)) 451 if (pgd_none(READ_ONCE(*src_pgdp)))
448 continue; 452 continue;
449 if (copy_pud(dst_pgd, src_pgd, addr, next)) 453 if (copy_pud(dst_pgdp, src_pgdp, addr, next))
450 return -ENOMEM; 454 return -ENOMEM;
451 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 455 } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
452 456
453 return 0; 457 return 0;
454} 458}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 75b220ba73a3..85a251b6dfa8 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
908 int pmuver; 908 int pmuver;
909 909
910 dfr0 = read_sysreg(id_aa64dfr0_el1); 910 dfr0 = read_sysreg(id_aa64dfr0_el1);
911 pmuver = cpuid_feature_extract_signed_field(dfr0, 911 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
912 ID_AA64DFR0_PMUVER_SHIFT); 912 ID_AA64DFR0_PMUVER_SHIFT);
913 if (pmuver < 1) 913 if (pmuver == 0xf || pmuver == 0)
914 return; 914 return;
915 915
916 probe->present = true; 916 probe->present = true;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ad8aeb098b31..c0da6efe5465 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
220 220
221 show_regs_print_info(KERN_DEFAULT); 221 show_regs_print_info(KERN_DEFAULT);
222 print_pstate(regs); 222 print_pstate(regs);
223 printk("pc : %pS\n", (void *)regs->pc); 223
224 printk("lr : %pS\n", (void *)lr); 224 if (!user_mode(regs)) {
225 printk("pc : %pS\n", (void *)regs->pc);
226 printk("lr : %pS\n", (void *)lr);
227 } else {
228 printk("pc : %016llx\n", regs->pc);
229 printk("lr : %016llx\n", lr);
230 }
231
225 printk("sp : %016llx\n", sp); 232 printk("sp : %016llx\n", sp);
226 233
227 i = top_reg; 234 i = top_reg;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6618036ae6d4..9ae31f7e2243 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type,
1419 u64 addr = 0; 1419 u64 addr = 0;
1420 u32 ctrl = 0; 1420 u32 ctrl = 0;
1421 1421
1422 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 1422 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1423 1423
1424 if (num & 1) { 1424 if (num & 1) {
1425 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1425 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 76809ccd309c..d5718a060672 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
59#ifdef CONFIG_FUNCTION_GRAPH_TRACER 59#ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 if (tsk->ret_stack && 60 if (tsk->ret_stack &&
61 (frame->pc == (unsigned long)return_to_handler)) { 61 (frame->pc == (unsigned long)return_to_handler)) {
62 if (WARN_ON_ONCE(frame->graph == -1))
63 return -EINVAL;
64 if (frame->graph < -1)
65 frame->graph += FTRACE_NOTRACE_DEPTH;
66
62 /* 67 /*
63 * This is a case where function graph tracer has 68 * This is a case where function graph tracer has
64 * modified a return address (LR) in a stack frame 69 * modified a return address (LR) in a stack frame
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 8b8bbd3eaa52..a382b2a1b84e 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
57 if (end < start || flags) 57 if (end < start || flags)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 if (!access_ok(VERIFY_READ, start, end - start)) 60 if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
61 return -EFAULT; 61 return -EFAULT;
62 62
63 return __do_compat_cache_op(start, end); 63 return __do_compat_cache_op(start, end);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a4391280fba9..f258636273c9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
52 frame.fp = regs->regs[29]; 52 frame.fp = regs->regs[29];
53 frame.pc = regs->pc; 53 frame.pc = regs->pc;
54#ifdef CONFIG_FUNCTION_GRAPH_TRACER 54#ifdef CONFIG_FUNCTION_GRAPH_TRACER
55 frame.graph = -1; /* no task info */ 55 frame.graph = current->curr_ret_stack;
56#endif 56#endif
57 do { 57 do {
58 int ret = unwind_frame(NULL, &frame); 58 int ret = unwind_frame(NULL, &frame);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index bbb0fde2780e..eb2d15147e8d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -57,7 +57,7 @@ static const char *handler[]= {
57 "Error" 57 "Error"
58}; 58};
59 59
60int show_unhandled_signals = 1; 60int show_unhandled_signals = 0;
61 61
62static void dump_backtrace_entry(unsigned long where) 62static void dump_backtrace_entry(unsigned long where)
63{ 63{
@@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
526 } 526 }
527#endif 527#endif
528 528
529 if (show_unhandled_signals_ratelimited()) {
530 pr_info("%s[%d]: syscall %d\n", current->comm,
531 task_pid_nr(current), regs->syscallno);
532 dump_instr("", regs);
533 if (user_mode(regs))
534 __show_regs(regs);
535 }
536
537 return sys_ni_syscall(); 529 return sys_ni_syscall();
538} 530}
539 531
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 116252a8d3a5..870f4b1587f9 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -407,8 +407,10 @@ again:
407 u32 midr = read_cpuid_id(); 407 u32 midr = read_cpuid_id();
408 408
409 /* Apply BTAC predictors mitigation to all Falkor chips */ 409 /* Apply BTAC predictors mitigation to all Falkor chips */
410 if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) 410 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
411 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
411 __qcom_hyp_sanitize_btac_predictors(); 412 __qcom_hyp_sanitize_btac_predictors();
413 }
412 } 414 }
413 415
414 fp_enabled = __fpsimd_enabled(); 416 fp_enabled = __fpsimd_enabled();
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 7b60d62ac593..65dfc8571bf8 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
286 286
287} 287}
288 288
289static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 289static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
290{ 290{
291 pte_t *pte = pte_offset_kernel(pmd, 0UL); 291 pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
292 unsigned long addr; 292 unsigned long addr;
293 unsigned i; 293 unsigned i;
294 294
295 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 295 for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
296 addr = start + i * PAGE_SIZE; 296 addr = start + i * PAGE_SIZE;
297 note_page(st, addr, 4, pte_val(*pte)); 297 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
298 } 298 }
299} 299}
300 300
301static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 301static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
302{ 302{
303 pmd_t *pmd = pmd_offset(pud, 0UL); 303 pmd_t *pmdp = pmd_offset(pudp, 0UL);
304 unsigned long addr; 304 unsigned long addr;
305 unsigned i; 305 unsigned i;
306 306
307 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 307 for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
308 pmd_t pmd = READ_ONCE(*pmdp);
309
308 addr = start + i * PMD_SIZE; 310 addr = start + i * PMD_SIZE;
309 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 311 if (pmd_none(pmd) || pmd_sect(pmd)) {
310 note_page(st, addr, 3, pmd_val(*pmd)); 312 note_page(st, addr, 3, pmd_val(pmd));
311 } else { 313 } else {
312 BUG_ON(pmd_bad(*pmd)); 314 BUG_ON(pmd_bad(pmd));
313 walk_pte(st, pmd, addr); 315 walk_pte(st, pmdp, addr);
314 } 316 }
315 } 317 }
316} 318}
317 319
318static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 320static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
319{ 321{
320 pud_t *pud = pud_offset(pgd, 0UL); 322 pud_t *pudp = pud_offset(pgdp, 0UL);
321 unsigned long addr; 323 unsigned long addr;
322 unsigned i; 324 unsigned i;
323 325
324 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 326 for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
327 pud_t pud = READ_ONCE(*pudp);
328
325 addr = start + i * PUD_SIZE; 329 addr = start + i * PUD_SIZE;
326 if (pud_none(*pud) || pud_sect(*pud)) { 330 if (pud_none(pud) || pud_sect(pud)) {
327 note_page(st, addr, 2, pud_val(*pud)); 331 note_page(st, addr, 2, pud_val(pud));
328 } else { 332 } else {
329 BUG_ON(pud_bad(*pud)); 333 BUG_ON(pud_bad(pud));
330 walk_pmd(st, pud, addr); 334 walk_pmd(st, pudp, addr);
331 } 335 }
332 } 336 }
333} 337}
@@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
335static void walk_pgd(struct pg_state *st, struct mm_struct *mm, 339static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
336 unsigned long start) 340 unsigned long start)
337{ 341{
338 pgd_t *pgd = pgd_offset(mm, 0UL); 342 pgd_t *pgdp = pgd_offset(mm, 0UL);
339 unsigned i; 343 unsigned i;
340 unsigned long addr; 344 unsigned long addr;
341 345
342 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 346 for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
347 pgd_t pgd = READ_ONCE(*pgdp);
348
343 addr = start + i * PGDIR_SIZE; 349 addr = start + i * PGDIR_SIZE;
344 if (pgd_none(*pgd)) { 350 if (pgd_none(pgd)) {
345 note_page(st, addr, 1, pgd_val(*pgd)); 351 note_page(st, addr, 1, pgd_val(pgd));
346 } else { 352 } else {
347 BUG_ON(pgd_bad(*pgd)); 353 BUG_ON(pgd_bad(pgd));
348 walk_pud(st, pgd, addr); 354 walk_pud(st, pgdp, addr);
349 } 355 }
350 } 356 }
351} 357}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f76bb2c3c943..bff11553eb05 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr)
130void show_pte(unsigned long addr) 130void show_pte(unsigned long addr)
131{ 131{
132 struct mm_struct *mm; 132 struct mm_struct *mm;
133 pgd_t *pgd; 133 pgd_t *pgdp;
134 pgd_t pgd;
134 135
135 if (addr < TASK_SIZE) { 136 if (addr < TASK_SIZE) {
136 /* TTBR0 */ 137 /* TTBR0 */
@@ -149,33 +150,37 @@ void show_pte(unsigned long addr)
149 return; 150 return;
150 } 151 }
151 152
152 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n", 153 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
153 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, 154 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
154 VA_BITS, mm->pgd); 155 VA_BITS, mm->pgd);
155 pgd = pgd_offset(mm, addr); 156 pgdp = pgd_offset(mm, addr);
156 pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd)); 157 pgd = READ_ONCE(*pgdp);
158 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
157 159
158 do { 160 do {
159 pud_t *pud; 161 pud_t *pudp, pud;
160 pmd_t *pmd; 162 pmd_t *pmdp, pmd;
161 pte_t *pte; 163 pte_t *ptep, pte;
162 164
163 if (pgd_none(*pgd) || pgd_bad(*pgd)) 165 if (pgd_none(pgd) || pgd_bad(pgd))
164 break; 166 break;
165 167
166 pud = pud_offset(pgd, addr); 168 pudp = pud_offset(pgdp, addr);
167 pr_cont(", *pud=%016llx", pud_val(*pud)); 169 pud = READ_ONCE(*pudp);
168 if (pud_none(*pud) || pud_bad(*pud)) 170 pr_cont(", pud=%016llx", pud_val(pud));
171 if (pud_none(pud) || pud_bad(pud))
169 break; 172 break;
170 173
171 pmd = pmd_offset(pud, addr); 174 pmdp = pmd_offset(pudp, addr);
172 pr_cont(", *pmd=%016llx", pmd_val(*pmd)); 175 pmd = READ_ONCE(*pmdp);
173 if (pmd_none(*pmd) || pmd_bad(*pmd)) 176 pr_cont(", pmd=%016llx", pmd_val(pmd));
177 if (pmd_none(pmd) || pmd_bad(pmd))
174 break; 178 break;
175 179
176 pte = pte_offset_map(pmd, addr); 180 ptep = pte_offset_map(pmdp, addr);
177 pr_cont(", *pte=%016llx", pte_val(*pte)); 181 pte = READ_ONCE(*ptep);
178 pte_unmap(pte); 182 pr_cont(", pte=%016llx", pte_val(pte));
183 pte_unmap(ptep);
179 } while(0); 184 } while(0);
180 185
181 pr_cont("\n"); 186 pr_cont("\n");
@@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
196 pte_t entry, int dirty) 201 pte_t entry, int dirty)
197{ 202{
198 pteval_t old_pteval, pteval; 203 pteval_t old_pteval, pteval;
204 pte_t pte = READ_ONCE(*ptep);
199 205
200 if (pte_same(*ptep, entry)) 206 if (pte_same(pte, entry))
201 return 0; 207 return 0;
202 208
203 /* only preserve the access flags and write permission */ 209 /* only preserve the access flags and write permission */
@@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
210 * (calculated as: a & b == ~(~a | ~b)). 216 * (calculated as: a & b == ~(~a | ~b)).
211 */ 217 */
212 pte_val(entry) ^= PTE_RDONLY; 218 pte_val(entry) ^= PTE_RDONLY;
213 pteval = READ_ONCE(pte_val(*ptep)); 219 pteval = pte_val(pte);
214 do { 220 do {
215 old_pteval = pteval; 221 old_pteval = pteval;
216 pteval ^= PTE_RDONLY; 222 pteval ^= PTE_RDONLY;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 6cb0fa92a651..ecc6818191df 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte)
54static int find_num_contig(struct mm_struct *mm, unsigned long addr, 54static int find_num_contig(struct mm_struct *mm, unsigned long addr,
55 pte_t *ptep, size_t *pgsize) 55 pte_t *ptep, size_t *pgsize)
56{ 56{
57 pgd_t *pgd = pgd_offset(mm, addr); 57 pgd_t *pgdp = pgd_offset(mm, addr);
58 pud_t *pud; 58 pud_t *pudp;
59 pmd_t *pmd; 59 pmd_t *pmdp;
60 60
61 *pgsize = PAGE_SIZE; 61 *pgsize = PAGE_SIZE;
62 pud = pud_offset(pgd, addr); 62 pudp = pud_offset(pgdp, addr);
63 pmd = pmd_offset(pud, addr); 63 pmdp = pmd_offset(pudp, addr);
64 if ((pte_t *)pmd == ptep) { 64 if ((pte_t *)pmdp == ptep) {
65 *pgsize = PMD_SIZE; 65 *pgsize = PMD_SIZE;
66 return CONT_PMDS; 66 return CONT_PMDS;
67 } 67 }
@@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
181 181
182 clear_flush(mm, addr, ptep, pgsize, ncontig); 182 clear_flush(mm, addr, ptep, pgsize, ncontig);
183 183
184 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) { 184 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
185 pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
186 pte_val(pfn_pte(pfn, hugeprot)));
187 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); 185 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
188 }
189} 186}
190 187
191void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 188void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
203pte_t *huge_pte_alloc(struct mm_struct *mm, 200pte_t *huge_pte_alloc(struct mm_struct *mm,
204 unsigned long addr, unsigned long sz) 201 unsigned long addr, unsigned long sz)
205{ 202{
206 pgd_t *pgd; 203 pgd_t *pgdp;
207 pud_t *pud; 204 pud_t *pudp;
208 pte_t *pte = NULL; 205 pmd_t *pmdp;
209 206 pte_t *ptep = NULL;
210 pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz); 207
211 pgd = pgd_offset(mm, addr); 208 pgdp = pgd_offset(mm, addr);
212 pud = pud_alloc(mm, pgd, addr); 209 pudp = pud_alloc(mm, pgdp, addr);
213 if (!pud) 210 if (!pudp)
214 return NULL; 211 return NULL;
215 212
216 if (sz == PUD_SIZE) { 213 if (sz == PUD_SIZE) {
217 pte = (pte_t *)pud; 214 ptep = (pte_t *)pudp;
218 } else if (sz == (PAGE_SIZE * CONT_PTES)) { 215 } else if (sz == (PAGE_SIZE * CONT_PTES)) {
219 pmd_t *pmd = pmd_alloc(mm, pud, addr); 216 pmdp = pmd_alloc(mm, pudp, addr);
220 217
221 WARN_ON(addr & (sz - 1)); 218 WARN_ON(addr & (sz - 1));
222 /* 219 /*
@@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
226 * will be no pte_unmap() to correspond with this 223 * will be no pte_unmap() to correspond with this
227 * pte_alloc_map(). 224 * pte_alloc_map().
228 */ 225 */
229 pte = pte_alloc_map(mm, pmd, addr); 226 ptep = pte_alloc_map(mm, pmdp, addr);
230 } else if (sz == PMD_SIZE) { 227 } else if (sz == PMD_SIZE) {
231 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && 228 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
232 pud_none(*pud)) 229 pud_none(READ_ONCE(*pudp)))
233 pte = huge_pmd_share(mm, addr, pud); 230 ptep = huge_pmd_share(mm, addr, pudp);
234 else 231 else
235 pte = (pte_t *)pmd_alloc(mm, pud, addr); 232 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
236 } else if (sz == (PMD_SIZE * CONT_PMDS)) { 233 } else if (sz == (PMD_SIZE * CONT_PMDS)) {
237 pmd_t *pmd; 234 pmdp = pmd_alloc(mm, pudp, addr);
238
239 pmd = pmd_alloc(mm, pud, addr);
240 WARN_ON(addr & (sz - 1)); 235 WARN_ON(addr & (sz - 1));
241 return (pte_t *)pmd; 236 return (pte_t *)pmdp;
242 } 237 }
243 238
244 pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr, 239 return ptep;
245 sz, pte, pte_val(*pte));
246 return pte;
247} 240}
248 241
249pte_t *huge_pte_offset(struct mm_struct *mm, 242pte_t *huge_pte_offset(struct mm_struct *mm,
250 unsigned long addr, unsigned long sz) 243 unsigned long addr, unsigned long sz)
251{ 244{
252 pgd_t *pgd; 245 pgd_t *pgdp;
253 pud_t *pud; 246 pud_t *pudp, pud;
254 pmd_t *pmd; 247 pmd_t *pmdp, pmd;
255 248
256 pgd = pgd_offset(mm, addr); 249 pgdp = pgd_offset(mm, addr);
257 pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd); 250 if (!pgd_present(READ_ONCE(*pgdp)))
258 if (!pgd_present(*pgd))
259 return NULL; 251 return NULL;
260 252
261 pud = pud_offset(pgd, addr); 253 pudp = pud_offset(pgdp, addr);
262 if (sz != PUD_SIZE && pud_none(*pud)) 254 pud = READ_ONCE(*pudp);
255 if (sz != PUD_SIZE && pud_none(pud))
263 return NULL; 256 return NULL;
264 /* hugepage or swap? */ 257 /* hugepage or swap? */
265 if (pud_huge(*pud) || !pud_present(*pud)) 258 if (pud_huge(pud) || !pud_present(pud))
266 return (pte_t *)pud; 259 return (pte_t *)pudp;
267 /* table; check the next level */ 260 /* table; check the next level */
268 261
269 if (sz == CONT_PMD_SIZE) 262 if (sz == CONT_PMD_SIZE)
270 addr &= CONT_PMD_MASK; 263 addr &= CONT_PMD_MASK;
271 264
272 pmd = pmd_offset(pud, addr); 265 pmdp = pmd_offset(pudp, addr);
266 pmd = READ_ONCE(*pmdp);
273 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && 267 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
274 pmd_none(*pmd)) 268 pmd_none(pmd))
275 return NULL; 269 return NULL;
276 if (pmd_huge(*pmd) || !pmd_present(*pmd)) 270 if (pmd_huge(pmd) || !pmd_present(pmd))
277 return (pte_t *)pmd; 271 return (pte_t *)pmdp;
278 272
279 if (sz == CONT_PTE_SIZE) { 273 if (sz == CONT_PTE_SIZE)
280 pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK)); 274 return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
281 return pte;
282 }
283 275
284 return NULL; 276 return NULL;
285} 277}
@@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
367 size_t pgsize; 359 size_t pgsize;
368 pte_t pte; 360 pte_t pte;
369 361
370 if (!pte_cont(*ptep)) { 362 if (!pte_cont(READ_ONCE(*ptep))) {
371 ptep_set_wrprotect(mm, addr, ptep); 363 ptep_set_wrprotect(mm, addr, ptep);
372 return; 364 return;
373 } 365 }
@@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
391 size_t pgsize; 383 size_t pgsize;
392 int ncontig; 384 int ncontig;
393 385
394 if (!pte_cont(*ptep)) { 386 if (!pte_cont(READ_ONCE(*ptep))) {
395 ptep_clear_flush(vma, addr, ptep); 387 ptep_clear_flush(vma, addr, ptep);
396 return; 388 return;
397 } 389 }
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 6e02e6fb4c7b..dabfc1ecda3d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
44 return __pa(p); 44 return __pa(p);
45} 45}
46 46
47static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node, 47static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
48 bool early) 48 bool early)
49{ 49{
50 if (pmd_none(*pmd)) { 50 if (pmd_none(READ_ONCE(*pmdp))) {
51 phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte) 51 phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
52 : kasan_alloc_zeroed_page(node); 52 : kasan_alloc_zeroed_page(node);
53 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 53 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
54 } 54 }
55 55
56 return early ? pte_offset_kimg(pmd, addr) 56 return early ? pte_offset_kimg(pmdp, addr)
57 : pte_offset_kernel(pmd, addr); 57 : pte_offset_kernel(pmdp, addr);
58} 58}
59 59
60static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node, 60static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
61 bool early) 61 bool early)
62{ 62{
63 if (pud_none(*pud)) { 63 if (pud_none(READ_ONCE(*pudp))) {
64 phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd) 64 phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
65 : kasan_alloc_zeroed_page(node); 65 : kasan_alloc_zeroed_page(node);
66 __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE); 66 __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
67 } 67 }
68 68
69 return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr); 69 return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
70} 70}
71 71
72static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node, 72static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
73 bool early) 73 bool early)
74{ 74{
75 if (pgd_none(*pgd)) { 75 if (pgd_none(READ_ONCE(*pgdp))) {
76 phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud) 76 phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
77 : kasan_alloc_zeroed_page(node); 77 : kasan_alloc_zeroed_page(node);
78 __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE); 78 __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
79 } 79 }
80 80
81 return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr); 81 return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
82} 82}
83 83
84static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr, 84static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
85 unsigned long end, int node, bool early) 85 unsigned long end, int node, bool early)
86{ 86{
87 unsigned long next; 87 unsigned long next;
88 pte_t *pte = kasan_pte_offset(pmd, addr, node, early); 88 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
89 89
90 do { 90 do {
91 phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page) 91 phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
92 : kasan_alloc_zeroed_page(node); 92 : kasan_alloc_zeroed_page(node);
93 next = addr + PAGE_SIZE; 93 next = addr + PAGE_SIZE;
94 set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); 94 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
95 } while (pte++, addr = next, addr != end && pte_none(*pte)); 95 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
96} 96}
97 97
98static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr, 98static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
99 unsigned long end, int node, bool early) 99 unsigned long end, int node, bool early)
100{ 100{
101 unsigned long next; 101 unsigned long next;
102 pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early); 102 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
103 103
104 do { 104 do {
105 next = pmd_addr_end(addr, end); 105 next = pmd_addr_end(addr, end);
106 kasan_pte_populate(pmd, addr, next, node, early); 106 kasan_pte_populate(pmdp, addr, next, node, early);
107 } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); 107 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
108} 108}
109 109
110static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr, 110static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
111 unsigned long end, int node, bool early) 111 unsigned long end, int node, bool early)
112{ 112{
113 unsigned long next; 113 unsigned long next;
114 pud_t *pud = kasan_pud_offset(pgd, addr, node, early); 114 pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
115 115
116 do { 116 do {
117 next = pud_addr_end(addr, end); 117 next = pud_addr_end(addr, end);
118 kasan_pmd_populate(pud, addr, next, node, early); 118 kasan_pmd_populate(pudp, addr, next, node, early);
119 } while (pud++, addr = next, addr != end && pud_none(*pud)); 119 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
120} 120}
121 121
122static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, 122static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
123 int node, bool early) 123 int node, bool early)
124{ 124{
125 unsigned long next; 125 unsigned long next;
126 pgd_t *pgd; 126 pgd_t *pgdp;
127 127
128 pgd = pgd_offset_k(addr); 128 pgdp = pgd_offset_k(addr);
129 do { 129 do {
130 next = pgd_addr_end(addr, end); 130 next = pgd_addr_end(addr, end);
131 kasan_pud_populate(pgd, addr, next, node, early); 131 kasan_pud_populate(pgdp, addr, next, node, early);
132 } while (pgd++, addr = next, addr != end); 132 } while (pgdp++, addr = next, addr != end);
133} 133}
134 134
135/* The early shadow maps everything to a single page of zeroes */ 135/* The early shadow maps everything to a single page of zeroes */
@@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
155 */ 155 */
156void __init kasan_copy_shadow(pgd_t *pgdir) 156void __init kasan_copy_shadow(pgd_t *pgdir)
157{ 157{
158 pgd_t *pgd, *pgd_new, *pgd_end; 158 pgd_t *pgdp, *pgdp_new, *pgdp_end;
159 159
160 pgd = pgd_offset_k(KASAN_SHADOW_START); 160 pgdp = pgd_offset_k(KASAN_SHADOW_START);
161 pgd_end = pgd_offset_k(KASAN_SHADOW_END); 161 pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
162 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); 162 pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
163 do { 163 do {
164 set_pgd(pgd_new, *pgd); 164 set_pgd(pgdp_new, READ_ONCE(*pgdp));
165 } while (pgd++, pgd_new++, pgd != pgd_end); 165 } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
166} 166}
167 167
168static void __init clear_pgds(unsigned long start, 168static void __init clear_pgds(unsigned long start,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4694cda823c9..84a019f55022 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
125 return ((old ^ new) & ~mask) == 0; 125 return ((old ^ new) & ~mask) == 0;
126} 126}
127 127
128static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, 128static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
129 phys_addr_t phys, pgprot_t prot) 129 phys_addr_t phys, pgprot_t prot)
130{ 130{
131 pte_t *pte; 131 pte_t *ptep;
132 132
133 pte = pte_set_fixmap_offset(pmd, addr); 133 ptep = pte_set_fixmap_offset(pmdp, addr);
134 do { 134 do {
135 pte_t old_pte = *pte; 135 pte_t old_pte = READ_ONCE(*ptep);
136 136
137 set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot)); 137 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
138 138
139 /* 139 /*
140 * After the PTE entry has been populated once, we 140 * After the PTE entry has been populated once, we
141 * only allow updates to the permission attributes. 141 * only allow updates to the permission attributes.
142 */ 142 */
143 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); 143 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
144 READ_ONCE(pte_val(*ptep))));
144 145
145 phys += PAGE_SIZE; 146 phys += PAGE_SIZE;
146 } while (pte++, addr += PAGE_SIZE, addr != end); 147 } while (ptep++, addr += PAGE_SIZE, addr != end);
147 148
148 pte_clear_fixmap(); 149 pte_clear_fixmap();
149} 150}
150 151
151static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr, 152static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
152 unsigned long end, phys_addr_t phys, 153 unsigned long end, phys_addr_t phys,
153 pgprot_t prot, 154 pgprot_t prot,
154 phys_addr_t (*pgtable_alloc)(void), 155 phys_addr_t (*pgtable_alloc)(void),
155 int flags) 156 int flags)
156{ 157{
157 unsigned long next; 158 unsigned long next;
159 pmd_t pmd = READ_ONCE(*pmdp);
158 160
159 BUG_ON(pmd_sect(*pmd)); 161 BUG_ON(pmd_sect(pmd));
160 if (pmd_none(*pmd)) { 162 if (pmd_none(pmd)) {
161 phys_addr_t pte_phys; 163 phys_addr_t pte_phys;
162 BUG_ON(!pgtable_alloc); 164 BUG_ON(!pgtable_alloc);
163 pte_phys = pgtable_alloc(); 165 pte_phys = pgtable_alloc();
164 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 166 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
167 pmd = READ_ONCE(*pmdp);
165 } 168 }
166 BUG_ON(pmd_bad(*pmd)); 169 BUG_ON(pmd_bad(pmd));
167 170
168 do { 171 do {
169 pgprot_t __prot = prot; 172 pgprot_t __prot = prot;
@@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
175 (flags & NO_CONT_MAPPINGS) == 0) 178 (flags & NO_CONT_MAPPINGS) == 0)
176 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 179 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
177 180
178 init_pte(pmd, addr, next, phys, __prot); 181 init_pte(pmdp, addr, next, phys, __prot);
179 182
180 phys += next - addr; 183 phys += next - addr;
181 } while (addr = next, addr != end); 184 } while (addr = next, addr != end);
182} 185}
183 186
184static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 187static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
185 phys_addr_t phys, pgprot_t prot, 188 phys_addr_t phys, pgprot_t prot,
186 phys_addr_t (*pgtable_alloc)(void), int flags) 189 phys_addr_t (*pgtable_alloc)(void), int flags)
187{ 190{
188 unsigned long next; 191 unsigned long next;
189 pmd_t *pmd; 192 pmd_t *pmdp;
190 193
191 pmd = pmd_set_fixmap_offset(pud, addr); 194 pmdp = pmd_set_fixmap_offset(pudp, addr);
192 do { 195 do {
193 pmd_t old_pmd = *pmd; 196 pmd_t old_pmd = READ_ONCE(*pmdp);
194 197
195 next = pmd_addr_end(addr, end); 198 next = pmd_addr_end(addr, end);
196 199
197 /* try section mapping first */ 200 /* try section mapping first */
198 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 201 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
199 (flags & NO_BLOCK_MAPPINGS) == 0) { 202 (flags & NO_BLOCK_MAPPINGS) == 0) {
200 pmd_set_huge(pmd, phys, prot); 203 pmd_set_huge(pmdp, phys, prot);
201 204
202 /* 205 /*
203 * After the PMD entry has been populated once, we 206 * After the PMD entry has been populated once, we
204 * only allow updates to the permission attributes. 207 * only allow updates to the permission attributes.
205 */ 208 */
206 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 209 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
207 pmd_val(*pmd))); 210 READ_ONCE(pmd_val(*pmdp))));
208 } else { 211 } else {
209 alloc_init_cont_pte(pmd, addr, next, phys, prot, 212 alloc_init_cont_pte(pmdp, addr, next, phys, prot,
210 pgtable_alloc, flags); 213 pgtable_alloc, flags);
211 214
212 BUG_ON(pmd_val(old_pmd) != 0 && 215 BUG_ON(pmd_val(old_pmd) != 0 &&
213 pmd_val(old_pmd) != pmd_val(*pmd)); 216 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
214 } 217 }
215 phys += next - addr; 218 phys += next - addr;
216 } while (pmd++, addr = next, addr != end); 219 } while (pmdp++, addr = next, addr != end);
217 220
218 pmd_clear_fixmap(); 221 pmd_clear_fixmap();
219} 222}
220 223
221static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr, 224static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
222 unsigned long end, phys_addr_t phys, 225 unsigned long end, phys_addr_t phys,
223 pgprot_t prot, 226 pgprot_t prot,
224 phys_addr_t (*pgtable_alloc)(void), int flags) 227 phys_addr_t (*pgtable_alloc)(void), int flags)
225{ 228{
226 unsigned long next; 229 unsigned long next;
230 pud_t pud = READ_ONCE(*pudp);
227 231
228 /* 232 /*
229 * Check for initial section mappings in the pgd/pud. 233 * Check for initial section mappings in the pgd/pud.
230 */ 234 */
231 BUG_ON(pud_sect(*pud)); 235 BUG_ON(pud_sect(pud));
232 if (pud_none(*pud)) { 236 if (pud_none(pud)) {
233 phys_addr_t pmd_phys; 237 phys_addr_t pmd_phys;
234 BUG_ON(!pgtable_alloc); 238 BUG_ON(!pgtable_alloc);
235 pmd_phys = pgtable_alloc(); 239 pmd_phys = pgtable_alloc();
236 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); 240 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
241 pud = READ_ONCE(*pudp);
237 } 242 }
238 BUG_ON(pud_bad(*pud)); 243 BUG_ON(pud_bad(pud));
239 244
240 do { 245 do {
241 pgprot_t __prot = prot; 246 pgprot_t __prot = prot;
@@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
247 (flags & NO_CONT_MAPPINGS) == 0) 252 (flags & NO_CONT_MAPPINGS) == 0)
248 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 253 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
249 254
250 init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags); 255 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
251 256
252 phys += next - addr; 257 phys += next - addr;
253 } while (addr = next, addr != end); 258 } while (addr = next, addr != end);
@@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
265 return true; 270 return true;
266} 271}
267 272
268static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 273static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
269 phys_addr_t phys, pgprot_t prot, 274 phys_addr_t phys, pgprot_t prot,
270 phys_addr_t (*pgtable_alloc)(void), 275 phys_addr_t (*pgtable_alloc)(void),
271 int flags) 276 int flags)
272{ 277{
273 pud_t *pud;
274 unsigned long next; 278 unsigned long next;
279 pud_t *pudp;
280 pgd_t pgd = READ_ONCE(*pgdp);
275 281
276 if (pgd_none(*pgd)) { 282 if (pgd_none(pgd)) {
277 phys_addr_t pud_phys; 283 phys_addr_t pud_phys;
278 BUG_ON(!pgtable_alloc); 284 BUG_ON(!pgtable_alloc);
279 pud_phys = pgtable_alloc(); 285 pud_phys = pgtable_alloc();
280 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); 286 __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
287 pgd = READ_ONCE(*pgdp);
281 } 288 }
282 BUG_ON(pgd_bad(*pgd)); 289 BUG_ON(pgd_bad(pgd));
283 290
284 pud = pud_set_fixmap_offset(pgd, addr); 291 pudp = pud_set_fixmap_offset(pgdp, addr);
285 do { 292 do {
286 pud_t old_pud = *pud; 293 pud_t old_pud = READ_ONCE(*pudp);
287 294
288 next = pud_addr_end(addr, end); 295 next = pud_addr_end(addr, end);
289 296
@@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
292 */ 299 */
293 if (use_1G_block(addr, next, phys) && 300 if (use_1G_block(addr, next, phys) &&
294 (flags & NO_BLOCK_MAPPINGS) == 0) { 301 (flags & NO_BLOCK_MAPPINGS) == 0) {
295 pud_set_huge(pud, phys, prot); 302 pud_set_huge(pudp, phys, prot);
296 303
297 /* 304 /*
298 * After the PUD entry has been populated once, we 305 * After the PUD entry has been populated once, we
299 * only allow updates to the permission attributes. 306 * only allow updates to the permission attributes.
300 */ 307 */
301 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 308 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
302 pud_val(*pud))); 309 READ_ONCE(pud_val(*pudp))));
303 } else { 310 } else {
304 alloc_init_cont_pmd(pud, addr, next, phys, prot, 311 alloc_init_cont_pmd(pudp, addr, next, phys, prot,
305 pgtable_alloc, flags); 312 pgtable_alloc, flags);
306 313
307 BUG_ON(pud_val(old_pud) != 0 && 314 BUG_ON(pud_val(old_pud) != 0 &&
308 pud_val(old_pud) != pud_val(*pud)); 315 pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
309 } 316 }
310 phys += next - addr; 317 phys += next - addr;
311 } while (pud++, addr = next, addr != end); 318 } while (pudp++, addr = next, addr != end);
312 319
313 pud_clear_fixmap(); 320 pud_clear_fixmap();
314} 321}
@@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
320 int flags) 327 int flags)
321{ 328{
322 unsigned long addr, length, end, next; 329 unsigned long addr, length, end, next;
323 pgd_t *pgd = pgd_offset_raw(pgdir, virt); 330 pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
324 331
325 /* 332 /*
326 * If the virtual and physical address don't have the same offset 333 * If the virtual and physical address don't have the same offset
@@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
336 end = addr + length; 343 end = addr + length;
337 do { 344 do {
338 next = pgd_addr_end(addr, end); 345 next = pgd_addr_end(addr, end);
339 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, 346 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
340 flags); 347 flags);
341 phys += next - addr; 348 phys += next - addr;
342 } while (pgd++, addr = next, addr != end); 349 } while (pgdp++, addr = next, addr != end);
343} 350}
344 351
345static phys_addr_t pgd_pgtable_alloc(void) 352static phys_addr_t pgd_pgtable_alloc(void)
@@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
401 flush_tlb_kernel_range(virt, virt + size); 408 flush_tlb_kernel_range(virt, virt + size);
402} 409}
403 410
404static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, 411static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
405 phys_addr_t end, pgprot_t prot, int flags) 412 phys_addr_t end, pgprot_t prot, int flags)
406{ 413{
407 __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start, 414 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
408 prot, early_pgtable_alloc, flags); 415 prot, early_pgtable_alloc, flags);
409} 416}
410 417
@@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void)
418 PAGE_KERNEL_RO); 425 PAGE_KERNEL_RO);
419} 426}
420 427
421static void __init map_mem(pgd_t *pgd) 428static void __init map_mem(pgd_t *pgdp)
422{ 429{
423 phys_addr_t kernel_start = __pa_symbol(_text); 430 phys_addr_t kernel_start = __pa_symbol(_text);
424 phys_addr_t kernel_end = __pa_symbol(__init_begin); 431 phys_addr_t kernel_end = __pa_symbol(__init_begin);
@@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd)
451 if (memblock_is_nomap(reg)) 458 if (memblock_is_nomap(reg))
452 continue; 459 continue;
453 460
454 __map_memblock(pgd, start, end, PAGE_KERNEL, flags); 461 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
455 } 462 }
456 463
457 /* 464 /*
@@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd)
464 * Note that contiguous mappings cannot be remapped in this way, 471 * Note that contiguous mappings cannot be remapped in this way,
465 * so we should avoid them here. 472 * so we should avoid them here.
466 */ 473 */
467 __map_memblock(pgd, kernel_start, kernel_end, 474 __map_memblock(pgdp, kernel_start, kernel_end,
468 PAGE_KERNEL, NO_CONT_MAPPINGS); 475 PAGE_KERNEL, NO_CONT_MAPPINGS);
469 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 476 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
470 477
@@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd)
475 * through /sys/kernel/kexec_crash_size interface. 482 * through /sys/kernel/kexec_crash_size interface.
476 */ 483 */
477 if (crashk_res.end) { 484 if (crashk_res.end) {
478 __map_memblock(pgd, crashk_res.start, crashk_res.end + 1, 485 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
479 PAGE_KERNEL, 486 PAGE_KERNEL,
480 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 487 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
481 memblock_clear_nomap(crashk_res.start, 488 memblock_clear_nomap(crashk_res.start,
@@ -499,7 +506,7 @@ void mark_rodata_ro(void)
499 debug_checkwx(); 506 debug_checkwx();
500} 507}
501 508
502static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, 509static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
503 pgprot_t prot, struct vm_struct *vma, 510 pgprot_t prot, struct vm_struct *vma,
504 int flags, unsigned long vm_flags) 511 int flags, unsigned long vm_flags)
505{ 512{
@@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
509 BUG_ON(!PAGE_ALIGNED(pa_start)); 516 BUG_ON(!PAGE_ALIGNED(pa_start));
510 BUG_ON(!PAGE_ALIGNED(size)); 517 BUG_ON(!PAGE_ALIGNED(size));
511 518
512 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 519 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
513 early_pgtable_alloc, flags); 520 early_pgtable_alloc, flags);
514 521
515 if (!(vm_flags & VM_NO_GUARD)) 522 if (!(vm_flags & VM_NO_GUARD))
@@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline);
562/* 569/*
563 * Create fine-grained mappings for the kernel. 570 * Create fine-grained mappings for the kernel.
564 */ 571 */
565static void __init map_kernel(pgd_t *pgd) 572static void __init map_kernel(pgd_t *pgdp)
566{ 573{
567 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 574 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
568 vmlinux_initdata, vmlinux_data; 575 vmlinux_initdata, vmlinux_data;
@@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd)
578 * Only rodata will be remapped with different permissions later on, 585 * Only rodata will be remapped with different permissions later on,
579 * all other segments are allowed to use contiguous mappings. 586 * all other segments are allowed to use contiguous mappings.
580 */ 587 */
581 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0, 588 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
582 VM_NO_GUARD); 589 VM_NO_GUARD);
583 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, 590 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
584 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 591 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
585 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, 592 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
586 &vmlinux_inittext, 0, VM_NO_GUARD); 593 &vmlinux_inittext, 0, VM_NO_GUARD);
587 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, 594 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
588 &vmlinux_initdata, 0, VM_NO_GUARD); 595 &vmlinux_initdata, 0, VM_NO_GUARD);
589 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 596 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
590 597
591 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 598 if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
592 /* 599 /*
593 * The fixmap falls in a separate pgd to the kernel, and doesn't 600 * The fixmap falls in a separate pgd to the kernel, and doesn't
594 * live in the carveout for the swapper_pg_dir. We can simply 601 * live in the carveout for the swapper_pg_dir. We can simply
595 * re-use the existing dir for the fixmap. 602 * re-use the existing dir for the fixmap.
596 */ 603 */
597 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), 604 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
598 *pgd_offset_k(FIXADDR_START)); 605 READ_ONCE(*pgd_offset_k(FIXADDR_START)));
599 } else if (CONFIG_PGTABLE_LEVELS > 3) { 606 } else if (CONFIG_PGTABLE_LEVELS > 3) {
600 /* 607 /*
601 * The fixmap shares its top level pgd entry with the kernel 608 * The fixmap shares its top level pgd entry with the kernel
@@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd)
604 * entry instead. 611 * entry instead.
605 */ 612 */
606 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 613 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
607 pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START), 614 pud_populate(&init_mm,
615 pud_set_fixmap_offset(pgdp, FIXADDR_START),
608 lm_alias(bm_pmd)); 616 lm_alias(bm_pmd));
609 pud_clear_fixmap(); 617 pud_clear_fixmap();
610 } else { 618 } else {
611 BUG(); 619 BUG();
612 } 620 }
613 621
614 kasan_copy_shadow(pgd); 622 kasan_copy_shadow(pgdp);
615} 623}
616 624
617/* 625/*
@@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd)
621void __init paging_init(void) 629void __init paging_init(void)
622{ 630{
623 phys_addr_t pgd_phys = early_pgtable_alloc(); 631 phys_addr_t pgd_phys = early_pgtable_alloc();
624 pgd_t *pgd = pgd_set_fixmap(pgd_phys); 632 pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
625 633
626 map_kernel(pgd); 634 map_kernel(pgdp);
627 map_mem(pgd); 635 map_mem(pgdp);
628 636
629 /* 637 /*
630 * We want to reuse the original swapper_pg_dir so we don't have to 638 * We want to reuse the original swapper_pg_dir so we don't have to
@@ -635,7 +643,7 @@ void __init paging_init(void)
635 * To do this we need to go via a temporary pgd. 643 * To do this we need to go via a temporary pgd.
636 */ 644 */
637 cpu_replace_ttbr1(__va(pgd_phys)); 645 cpu_replace_ttbr1(__va(pgd_phys));
638 memcpy(swapper_pg_dir, pgd, PGD_SIZE); 646 memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
639 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 647 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
640 648
641 pgd_clear_fixmap(); 649 pgd_clear_fixmap();
@@ -655,37 +663,40 @@ void __init paging_init(void)
655 */ 663 */
656int kern_addr_valid(unsigned long addr) 664int kern_addr_valid(unsigned long addr)
657{ 665{
658 pgd_t *pgd; 666 pgd_t *pgdp;
659 pud_t *pud; 667 pud_t *pudp, pud;
660 pmd_t *pmd; 668 pmd_t *pmdp, pmd;
661 pte_t *pte; 669 pte_t *ptep, pte;
662 670
663 if ((((long)addr) >> VA_BITS) != -1UL) 671 if ((((long)addr) >> VA_BITS) != -1UL)
664 return 0; 672 return 0;
665 673
666 pgd = pgd_offset_k(addr); 674 pgdp = pgd_offset_k(addr);
667 if (pgd_none(*pgd)) 675 if (pgd_none(READ_ONCE(*pgdp)))
668 return 0; 676 return 0;
669 677
670 pud = pud_offset(pgd, addr); 678 pudp = pud_offset(pgdp, addr);
671 if (pud_none(*pud)) 679 pud = READ_ONCE(*pudp);
680 if (pud_none(pud))
672 return 0; 681 return 0;
673 682
674 if (pud_sect(*pud)) 683 if (pud_sect(pud))
675 return pfn_valid(pud_pfn(*pud)); 684 return pfn_valid(pud_pfn(pud));
676 685
677 pmd = pmd_offset(pud, addr); 686 pmdp = pmd_offset(pudp, addr);
678 if (pmd_none(*pmd)) 687 pmd = READ_ONCE(*pmdp);
688 if (pmd_none(pmd))
679 return 0; 689 return 0;
680 690
681 if (pmd_sect(*pmd)) 691 if (pmd_sect(pmd))
682 return pfn_valid(pmd_pfn(*pmd)); 692 return pfn_valid(pmd_pfn(pmd));
683 693
684 pte = pte_offset_kernel(pmd, addr); 694 ptep = pte_offset_kernel(pmdp, addr);
685 if (pte_none(*pte)) 695 pte = READ_ONCE(*ptep);
696 if (pte_none(pte))
686 return 0; 697 return 0;
687 698
688 return pfn_valid(pte_pfn(*pte)); 699 return pfn_valid(pte_pfn(pte));
689} 700}
690#ifdef CONFIG_SPARSEMEM_VMEMMAP 701#ifdef CONFIG_SPARSEMEM_VMEMMAP
691#if !ARM64_SWAPPER_USES_SECTION_MAPS 702#if !ARM64_SWAPPER_USES_SECTION_MAPS
@@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
700{ 711{
701 unsigned long addr = start; 712 unsigned long addr = start;
702 unsigned long next; 713 unsigned long next;
703 pgd_t *pgd; 714 pgd_t *pgdp;
704 pud_t *pud; 715 pud_t *pudp;
705 pmd_t *pmd; 716 pmd_t *pmdp;
706 717
707 do { 718 do {
708 next = pmd_addr_end(addr, end); 719 next = pmd_addr_end(addr, end);
709 720
710 pgd = vmemmap_pgd_populate(addr, node); 721 pgdp = vmemmap_pgd_populate(addr, node);
711 if (!pgd) 722 if (!pgdp)
712 return -ENOMEM; 723 return -ENOMEM;
713 724
714 pud = vmemmap_pud_populate(pgd, addr, node); 725 pudp = vmemmap_pud_populate(pgdp, addr, node);
715 if (!pud) 726 if (!pudp)
716 return -ENOMEM; 727 return -ENOMEM;
717 728
718 pmd = pmd_offset(pud, addr); 729 pmdp = pmd_offset(pudp, addr);
719 if (pmd_none(*pmd)) { 730 if (pmd_none(READ_ONCE(*pmdp))) {
720 void *p = NULL; 731 void *p = NULL;
721 732
722 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 733 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
723 if (!p) 734 if (!p)
724 return -ENOMEM; 735 return -ENOMEM;
725 736
726 pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL)); 737 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
727 } else 738 } else
728 vmemmap_verify((pte_t *)pmd, node, addr, next); 739 vmemmap_verify((pte_t *)pmdp, node, addr, next);
729 } while (addr = next, addr != end); 740 } while (addr = next, addr != end);
730 741
731 return 0; 742 return 0;
@@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end,
739 750
740static inline pud_t * fixmap_pud(unsigned long addr) 751static inline pud_t * fixmap_pud(unsigned long addr)
741{ 752{
742 pgd_t *pgd = pgd_offset_k(addr); 753 pgd_t *pgdp = pgd_offset_k(addr);
754 pgd_t pgd = READ_ONCE(*pgdp);
743 755
744 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 756 BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
745 757
746 return pud_offset_kimg(pgd, addr); 758 return pud_offset_kimg(pgdp, addr);
747} 759}
748 760
749static inline pmd_t * fixmap_pmd(unsigned long addr) 761static inline pmd_t * fixmap_pmd(unsigned long addr)
750{ 762{
751 pud_t *pud = fixmap_pud(addr); 763 pud_t *pudp = fixmap_pud(addr);
764 pud_t pud = READ_ONCE(*pudp);
752 765
753 BUG_ON(pud_none(*pud) || pud_bad(*pud)); 766 BUG_ON(pud_none(pud) || pud_bad(pud));
754 767
755 return pmd_offset_kimg(pud, addr); 768 return pmd_offset_kimg(pudp, addr);
756} 769}
757 770
758static inline pte_t * fixmap_pte(unsigned long addr) 771static inline pte_t * fixmap_pte(unsigned long addr)
@@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr)
768 */ 781 */
769void __init early_fixmap_init(void) 782void __init early_fixmap_init(void)
770{ 783{
771 pgd_t *pgd; 784 pgd_t *pgdp, pgd;
772 pud_t *pud; 785 pud_t *pudp;
773 pmd_t *pmd; 786 pmd_t *pmdp;
774 unsigned long addr = FIXADDR_START; 787 unsigned long addr = FIXADDR_START;
775 788
776 pgd = pgd_offset_k(addr); 789 pgdp = pgd_offset_k(addr);
790 pgd = READ_ONCE(*pgdp);
777 if (CONFIG_PGTABLE_LEVELS > 3 && 791 if (CONFIG_PGTABLE_LEVELS > 3 &&
778 !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { 792 !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
779 /* 793 /*
780 * We only end up here if the kernel mapping and the fixmap 794 * We only end up here if the kernel mapping and the fixmap
781 * share the top level pgd entry, which should only happen on 795 * share the top level pgd entry, which should only happen on
782 * 16k/4 levels configurations. 796 * 16k/4 levels configurations.
783 */ 797 */
784 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 798 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
785 pud = pud_offset_kimg(pgd, addr); 799 pudp = pud_offset_kimg(pgdp, addr);
786 } else { 800 } else {
787 if (pgd_none(*pgd)) 801 if (pgd_none(pgd))
788 __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 802 __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
789 pud = fixmap_pud(addr); 803 pudp = fixmap_pud(addr);
790 } 804 }
791 if (pud_none(*pud)) 805 if (pud_none(READ_ONCE(*pudp)))
792 __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 806 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
793 pmd = fixmap_pmd(addr); 807 pmdp = fixmap_pmd(addr);
794 __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 808 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
795 809
796 /* 810 /*
797 * The boot-ioremap range spans multiple pmds, for which 811 * The boot-ioremap range spans multiple pmds, for which
@@ -800,11 +814,11 @@ void __init early_fixmap_init(void)
800 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 814 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
801 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 815 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
802 816
803 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 817 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
804 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 818 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
805 WARN_ON(1); 819 WARN_ON(1);
806 pr_warn("pmd %p != %p, %p\n", 820 pr_warn("pmdp %p != %p, %p\n",
807 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 821 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
808 fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 822 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
809 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 823 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
810 fix_to_virt(FIX_BTMAP_BEGIN)); 824 fix_to_virt(FIX_BTMAP_BEGIN));
@@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx,
824 phys_addr_t phys, pgprot_t flags) 838 phys_addr_t phys, pgprot_t flags)
825{ 839{
826 unsigned long addr = __fix_to_virt(idx); 840 unsigned long addr = __fix_to_virt(idx);
827 pte_t *pte; 841 pte_t *ptep;
828 842
829 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 843 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
830 844
831 pte = fixmap_pte(addr); 845 ptep = fixmap_pte(addr);
832 846
833 if (pgprot_val(flags)) { 847 if (pgprot_val(flags)) {
834 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 848 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
835 } else { 849 } else {
836 pte_clear(&init_mm, addr, pte); 850 pte_clear(&init_mm, addr, ptep);
837 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 851 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
838 } 852 }
839} 853}
@@ -915,36 +929,46 @@ int __init arch_ioremap_pmd_supported(void)
915 return 1; 929 return 1;
916} 930}
917 931
918int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) 932int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
919{ 933{
920 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | 934 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
921 pgprot_val(mk_sect_prot(prot))); 935 pgprot_val(mk_sect_prot(prot)));
936
937 /* ioremap_page_range doesn't honour BBM */
938 if (pud_present(READ_ONCE(*pudp)))
939 return 0;
940
922 BUG_ON(phys & ~PUD_MASK); 941 BUG_ON(phys & ~PUD_MASK);
923 set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot)); 942 set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
924 return 1; 943 return 1;
925} 944}
926 945
927int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) 946int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
928{ 947{
929 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | 948 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
930 pgprot_val(mk_sect_prot(prot))); 949 pgprot_val(mk_sect_prot(prot)));
950
951 /* ioremap_page_range doesn't honour BBM */
952 if (pmd_present(READ_ONCE(*pmdp)))
953 return 0;
954
931 BUG_ON(phys & ~PMD_MASK); 955 BUG_ON(phys & ~PMD_MASK);
932 set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot)); 956 set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
933 return 1; 957 return 1;
934} 958}
935 959
936int pud_clear_huge(pud_t *pud) 960int pud_clear_huge(pud_t *pudp)
937{ 961{
938 if (!pud_sect(*pud)) 962 if (!pud_sect(READ_ONCE(*pudp)))
939 return 0; 963 return 0;
940 pud_clear(pud); 964 pud_clear(pudp);
941 return 1; 965 return 1;
942} 966}
943 967
944int pmd_clear_huge(pmd_t *pmd) 968int pmd_clear_huge(pmd_t *pmdp)
945{ 969{
946 if (!pmd_sect(*pmd)) 970 if (!pmd_sect(READ_ONCE(*pmdp)))
947 return 0; 971 return 0;
948 pmd_clear(pmd); 972 pmd_clear(pmdp);
949 return 1; 973 return 1;
950} 974}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index a682a0a2a0fa..a56359373d8b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
29 void *data) 29 void *data)
30{ 30{
31 struct page_change_data *cdata = data; 31 struct page_change_data *cdata = data;
32 pte_t pte = *ptep; 32 pte_t pte = READ_ONCE(*ptep);
33 33
34 pte = clear_pte_bit(pte, cdata->clear_mask); 34 pte = clear_pte_bit(pte, cdata->clear_mask);
35 pte = set_pte_bit(pte, cdata->set_mask); 35 pte = set_pte_bit(pte, cdata->set_mask);
@@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
156 */ 156 */
157bool kernel_page_present(struct page *page) 157bool kernel_page_present(struct page *page)
158{ 158{
159 pgd_t *pgd; 159 pgd_t *pgdp;
160 pud_t *pud; 160 pud_t *pudp, pud;
161 pmd_t *pmd; 161 pmd_t *pmdp, pmd;
162 pte_t *pte; 162 pte_t *ptep;
163 unsigned long addr = (unsigned long)page_address(page); 163 unsigned long addr = (unsigned long)page_address(page);
164 164
165 pgd = pgd_offset_k(addr); 165 pgdp = pgd_offset_k(addr);
166 if (pgd_none(*pgd)) 166 if (pgd_none(READ_ONCE(*pgdp)))
167 return false; 167 return false;
168 168
169 pud = pud_offset(pgd, addr); 169 pudp = pud_offset(pgdp, addr);
170 if (pud_none(*pud)) 170 pud = READ_ONCE(*pudp);
171 if (pud_none(pud))
171 return false; 172 return false;
172 if (pud_sect(*pud)) 173 if (pud_sect(pud))
173 return true; 174 return true;
174 175
175 pmd = pmd_offset(pud, addr); 176 pmdp = pmd_offset(pudp, addr);
176 if (pmd_none(*pmd)) 177 pmd = READ_ONCE(*pmdp);
178 if (pmd_none(pmd))
177 return false; 179 return false;
178 if (pmd_sect(*pmd)) 180 if (pmd_sect(pmd))
179 return true; 181 return true;
180 182
181 pte = pte_offset_kernel(pmd, addr); 183 ptep = pte_offset_kernel(pmdp, addr);
182 return pte_valid(*pte); 184 return pte_valid(READ_ONCE(*ptep));
183} 185}
184#endif /* CONFIG_HIBERNATION */ 186#endif /* CONFIG_HIBERNATION */
185#endif /* CONFIG_DEBUG_PAGEALLOC */ 187#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 71baed7e592a..c0af47617299 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
205 dc cvac, cur_\()\type\()p // Ensure any existing dirty 205 dc cvac, cur_\()\type\()p // Ensure any existing dirty
206 dmb sy // lines are written back before 206 dmb sy // lines are written back before
207 ldr \type, [cur_\()\type\()p] // loading the entry 207 ldr \type, [cur_\()\type\()p] // loading the entry
208 tbz \type, #0, next_\()\type // Skip invalid entries 208 tbz \type, #0, skip_\()\type // Skip invalid and
209 tbnz \type, #11, skip_\()\type // non-global entries
209 .endm 210 .endm
210 211
211 .macro __idmap_kpti_put_pgtable_ent_ng, type 212 .macro __idmap_kpti_put_pgtable_ent_ng, type
@@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
265 add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) 266 add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
266do_pgd: __idmap_kpti_get_pgtable_ent pgd 267do_pgd: __idmap_kpti_get_pgtable_ent pgd
267 tbnz pgd, #1, walk_puds 268 tbnz pgd, #1, walk_puds
268 __idmap_kpti_put_pgtable_ent_ng pgd
269next_pgd: 269next_pgd:
270 __idmap_kpti_put_pgtable_ent_ng pgd
271skip_pgd:
270 add cur_pgdp, cur_pgdp, #8 272 add cur_pgdp, cur_pgdp, #8
271 cmp cur_pgdp, end_pgdp 273 cmp cur_pgdp, end_pgdp
272 b.ne do_pgd 274 b.ne do_pgd
@@ -294,8 +296,9 @@ walk_puds:
294 add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) 296 add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
295do_pud: __idmap_kpti_get_pgtable_ent pud 297do_pud: __idmap_kpti_get_pgtable_ent pud
296 tbnz pud, #1, walk_pmds 298 tbnz pud, #1, walk_pmds
297 __idmap_kpti_put_pgtable_ent_ng pud
298next_pud: 299next_pud:
300 __idmap_kpti_put_pgtable_ent_ng pud
301skip_pud:
299 add cur_pudp, cur_pudp, 8 302 add cur_pudp, cur_pudp, 8
300 cmp cur_pudp, end_pudp 303 cmp cur_pudp, end_pudp
301 b.ne do_pud 304 b.ne do_pud
@@ -314,8 +317,9 @@ walk_pmds:
314 add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) 317 add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
315do_pmd: __idmap_kpti_get_pgtable_ent pmd 318do_pmd: __idmap_kpti_get_pgtable_ent pmd
316 tbnz pmd, #1, walk_ptes 319 tbnz pmd, #1, walk_ptes
317 __idmap_kpti_put_pgtable_ent_ng pmd
318next_pmd: 320next_pmd:
321 __idmap_kpti_put_pgtable_ent_ng pmd
322skip_pmd:
319 add cur_pmdp, cur_pmdp, #8 323 add cur_pmdp, cur_pmdp, #8
320 cmp cur_pmdp, end_pmdp 324 cmp cur_pmdp, end_pmdp
321 b.ne do_pmd 325 b.ne do_pmd
@@ -333,7 +337,7 @@ walk_ptes:
333 add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) 337 add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
334do_pte: __idmap_kpti_get_pgtable_ent pte 338do_pte: __idmap_kpti_get_pgtable_ent pte
335 __idmap_kpti_put_pgtable_ent_ng pte 339 __idmap_kpti_put_pgtable_ent_ng pte
336next_pte: 340skip_pte:
337 add cur_ptep, cur_ptep, #8 341 add cur_ptep, cur_ptep, #8
338 cmp cur_ptep, end_ptep 342 cmp cur_ptep, end_ptep
339 b.ne do_pte 343 b.ne do_pte
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 1d4f1da7c58f..a93350451e8e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
250 off = offsetof(struct bpf_array, map.max_entries); 250 off = offsetof(struct bpf_array, map.max_entries);
251 emit_a64_mov_i64(tmp, off, ctx); 251 emit_a64_mov_i64(tmp, off, ctx);
252 emit(A64_LDR32(tmp, r2, tmp), ctx); 252 emit(A64_LDR32(tmp, r2, tmp), ctx);
253 emit(A64_MOV(0, r3, r3), ctx);
253 emit(A64_CMP(0, r3, tmp), ctx); 254 emit(A64_CMP(0, r3, tmp), ctx);
254 emit(A64_B_(A64_COND_GE, jmp_offset), ctx); 255 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
255 256
256 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) 257 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
257 * goto out; 258 * goto out;
@@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
259 */ 260 */
260 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); 261 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
261 emit(A64_CMP(1, tcc, tmp), ctx); 262 emit(A64_CMP(1, tcc, tmp), ctx);
262 emit(A64_B_(A64_COND_GT, jmp_offset), ctx); 263 emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
263 emit(A64_ADD_I(1, tcc, tcc, 1), ctx); 264 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
264 265
265 /* prog = array->ptrs[index]; 266 /* prog = array->ptrs[index];
diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h
index 905afeacfedf..06da9d49152a 100644
--- a/arch/cris/include/arch-v10/arch/bug.h
+++ b/arch/cris/include/arch-v10/arch/bug.h
@@ -44,18 +44,25 @@ struct bug_frame {
44 * not be used like this with newer versions of gcc. 44 * not be used like this with newer versions of gcc.
45 */ 45 */
46#define BUG() \ 46#define BUG() \
47do { \
47 __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ 48 __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
48 "movu.w " __stringify(__LINE__) ",$r0\n\t"\ 49 "movu.w " __stringify(__LINE__) ",$r0\n\t"\
49 "jump 0f\n\t" \ 50 "jump 0f\n\t" \
50 ".section .rodata\n" \ 51 ".section .rodata\n" \
51 "0:\t.string \"" __FILE__ "\"\n\t" \ 52 "0:\t.string \"" __FILE__ "\"\n\t" \
52 ".previous") 53 ".previous"); \
54 unreachable(); \
55} while (0)
53#endif 56#endif
54 57
55#else 58#else
56 59
57/* This just causes an oops. */ 60/* This just causes an oops. */
58#define BUG() (*(int *)0 = 0) 61#define BUG() \
62do { \
63 barrier_before_unreachable(); \
64 __builtin_trap(); \
65} while (0)
59 66
60#endif 67#endif
61 68
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
index bd3eeb8d1cfa..66b37a532765 100644
--- a/arch/ia64/include/asm/bug.h
+++ b/arch/ia64/include/asm/bug.h
@@ -4,7 +4,11 @@
4 4
5#ifdef CONFIG_BUG 5#ifdef CONFIG_BUG
6#define ia64_abort() __builtin_trap() 6#define ia64_abort() __builtin_trap()
7#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) 7#define BUG() do { \
8 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
9 barrier_before_unreachable(); \
10 ia64_abort(); \
11} while (0)
8 12
9/* should this BUG be made generic? */ 13/* should this BUG be made generic? */
10#define HAVE_ARCH_BUG 14#define HAVE_ARCH_BUG
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 0b4c65a1af25..498f3da3f225 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),)
41obj-y += esi_stub.o # must be in kernel proper 41obj-y += esi_stub.o # must be in kernel proper
42endif 42endif
43obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o 43obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
44obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
45 44
46obj-$(CONFIG_BINFMT_ELF) += elfcore.o 45obj-$(CONFIG_BINFMT_ELF) += elfcore.o
47 46
diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h
index b7e2bf1ba4a6..275dca1435bf 100644
--- a/arch/m68k/include/asm/bug.h
+++ b/arch/m68k/include/asm/bug.h
@@ -8,16 +8,19 @@
8#ifndef CONFIG_SUN3 8#ifndef CONFIG_SUN3
9#define BUG() do { \ 9#define BUG() do { \
10 pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 10 pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
11 barrier_before_unreachable(); \
11 __builtin_trap(); \ 12 __builtin_trap(); \
12} while (0) 13} while (0)
13#else 14#else
14#define BUG() do { \ 15#define BUG() do { \
15 pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 16 pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
17 barrier_before_unreachable(); \
16 panic("BUG!"); \ 18 panic("BUG!"); \
17} while (0) 19} while (0)
18#endif 20#endif
19#else 21#else
20#define BUG() do { \ 22#define BUG() do { \
23 barrier_before_unreachable(); \
21 __builtin_trap(); \ 24 __builtin_trap(); \
22} while (0) 25} while (0)
23#endif 26#endif
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 1bd5c4f00d19..c22da16d67b8 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
126 126
127quiet_cmd_cpp_its_S = ITS $@ 127quiet_cmd_cpp_its_S = ITS $@
128 cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ 128 cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
129 -D__ASSEMBLY__ \
129 -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ 130 -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
130 -DVMLINUX_BINARY="\"$(3)\"" \ 131 -DVMLINUX_BINARY="\"$(3)\"" \
131 -DVMLINUX_COMPRESSION="\"$(2)\"" \ 132 -DVMLINUX_COMPRESSION="\"$(2)\"" \
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 946681db8dc3..9a0fa66b81ac 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -86,7 +86,6 @@ struct compat_flock {
86 compat_off_t l_len; 86 compat_off_t l_len;
87 s32 l_sysid; 87 s32 l_sysid;
88 compat_pid_t l_pid; 88 compat_pid_t l_pid;
89 short __unused;
90 s32 pad[4]; 89 s32 pad[4];
91}; 90};
92 91
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 19c88d770054..fcf9af492d60 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -10,6 +10,8 @@
10 10
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
13#include <linux/spinlock.h> 15#include <linux/spinlock.h>
14 16
15#include <asm/mips-cps.h> 17#include <asm/mips-cps.h>
@@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
22 24
23phys_addr_t __weak mips_cpc_default_phys_base(void) 25phys_addr_t __weak mips_cpc_default_phys_base(void)
24{ 26{
27 struct device_node *cpc_node;
28 struct resource res;
29 int err;
30
31 cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
32 if (cpc_node) {
33 err = of_address_to_resource(cpc_node, 0, &res);
34 if (!err)
35 return res.start;
36 }
37
25 return 0; 38 return 0;
26} 39}
27 40
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 85bc601e9a0d..5f8b0a9e30b3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -375,6 +375,7 @@ static void __init bootmem_init(void)
375 unsigned long reserved_end; 375 unsigned long reserved_end;
376 unsigned long mapstart = ~0UL; 376 unsigned long mapstart = ~0UL;
377 unsigned long bootmap_size; 377 unsigned long bootmap_size;
378 phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
378 bool bootmap_valid = false; 379 bool bootmap_valid = false;
379 int i; 380 int i;
380 381
@@ -395,7 +396,8 @@ static void __init bootmem_init(void)
395 max_low_pfn = 0; 396 max_low_pfn = 0;
396 397
397 /* 398 /*
398 * Find the highest page frame number we have available. 399 * Find the highest page frame number we have available
400 * and the lowest used RAM address
399 */ 401 */
400 for (i = 0; i < boot_mem_map.nr_map; i++) { 402 for (i = 0; i < boot_mem_map.nr_map; i++) {
401 unsigned long start, end; 403 unsigned long start, end;
@@ -407,6 +409,8 @@ static void __init bootmem_init(void)
407 end = PFN_DOWN(boot_mem_map.map[i].addr 409 end = PFN_DOWN(boot_mem_map.map[i].addr
408 + boot_mem_map.map[i].size); 410 + boot_mem_map.map[i].size);
409 411
412 ramstart = min(ramstart, boot_mem_map.map[i].addr);
413
410#ifndef CONFIG_HIGHMEM 414#ifndef CONFIG_HIGHMEM
411 /* 415 /*
412 * Skip highmem here so we get an accurate max_low_pfn if low 416 * Skip highmem here so we get an accurate max_low_pfn if low
@@ -436,6 +440,13 @@ static void __init bootmem_init(void)
436 mapstart = max(reserved_end, start); 440 mapstart = max(reserved_end, start);
437 } 441 }
438 442
443 /*
444 * Reserve any memory between the start of RAM and PHYS_OFFSET
445 */
446 if (ramstart > PHYS_OFFSET)
447 add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
448 BOOT_MEM_RESERVED);
449
439 if (min_low_pfn >= max_low_pfn) 450 if (min_low_pfn >= max_low_pfn)
440 panic("Incorrect memory mapping !!!"); 451 panic("Incorrect memory mapping !!!");
441 if (min_low_pfn > ARCH_PFN_OFFSET) { 452 if (min_low_pfn > ARCH_PFN_OFFSET) {
@@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
664 675
665 add_memory_region(start, size, BOOT_MEM_RAM); 676 add_memory_region(start, size, BOOT_MEM_RAM);
666 677
667 if (start && start > PHYS_OFFSET)
668 add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
669 BOOT_MEM_RESERVED);
670 return 0; 678 return 0;
671} 679}
672early_param("mem", early_parse_mem); 680early_param("mem", early_parse_mem);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 87dcac2447c8..9d41732a9146 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
572 */ 572 */
573} 573}
574 574
575void __init bmips_cpu_setup(void) 575void bmips_cpu_setup(void)
576{ 576{
577 void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); 577 void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
578 u32 __maybe_unused cfg; 578 u32 __maybe_unused cfg;
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 30a155c0a6b0..c615abdce119 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -16,6 +16,7 @@
16#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 16#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
17 17
18#define PMD_CACHE_INDEX PMD_INDEX_SIZE 18#define PMD_CACHE_INDEX PMD_INDEX_SIZE
19#define PUD_CACHE_INDEX PUD_INDEX_SIZE
19 20
20#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
21#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 22#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 949d691094a4..67c5475311ee 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
63 * keeping the prototype consistent across the two formats. 63 * keeping the prototype consistent across the two formats.
64 */ 64 */
65static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 65static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
66 unsigned int subpg_index, unsigned long hidx) 66 unsigned int subpg_index, unsigned long hidx,
67 int offset)
67{ 68{
68 return (hidx << H_PAGE_F_GIX_SHIFT) & 69 return (hidx << H_PAGE_F_GIX_SHIFT) &
69 (H_PAGE_F_SECOND | H_PAGE_F_GIX); 70 (H_PAGE_F_SECOND | H_PAGE_F_GIX);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 338b7da468ce..3bcf269f8f55 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -45,7 +45,7 @@
45 * generic accessors and iterators here 45 * generic accessors and iterators here
46 */ 46 */
47#define __real_pte __real_pte 47#define __real_pte __real_pte
48static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) 48static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
49{ 49{
50 real_pte_t rpte; 50 real_pte_t rpte;
51 unsigned long *hidxp; 51 unsigned long *hidxp;
@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
59 */ 59 */
60 smp_rmb(); 60 smp_rmb();
61 61
62 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 62 hidxp = (unsigned long *)(ptep + offset);
63 rpte.hidx = *hidxp; 63 rpte.hidx = *hidxp;
64 return rpte; 64 return rpte;
65} 65}
@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
86 * expected to modify the PTE bits accordingly and commit the PTE to memory. 86 * expected to modify the PTE bits accordingly and commit the PTE to memory.
87 */ 87 */
88static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 88static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
89 unsigned int subpg_index, unsigned long hidx) 89 unsigned int subpg_index,
90 unsigned long hidx, int offset)
90{ 91{
91 unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 92 unsigned long *hidxp = (unsigned long *)(ptep + offset);
92 93
93 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); 94 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
94 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); 95 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
@@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
140} 141}
141 142
142#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE 143#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
143#ifdef CONFIG_TRANSPARENT_HUGEPAGE 144#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
144#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ 145#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
145 (sizeof(unsigned long) << PMD_INDEX_SIZE)) 146 (sizeof(unsigned long) << PMD_INDEX_SIZE))
146#else 147#else
147#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 148#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
148#endif 149#endif
150#ifdef CONFIG_HUGETLB_PAGE
151#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
152 (sizeof(unsigned long) << PUD_INDEX_SIZE))
153#else
149#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 154#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
155#endif
150#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 156#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
151 157
152#ifdef CONFIG_TRANSPARENT_HUGEPAGE 158#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0920eff731b3..935adcd92a81 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -23,7 +23,8 @@
23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) 23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
24#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) 24#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
25 25
26#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) 26#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
27 defined(CONFIG_PPC_64K_PAGES)
27/* 28/*
28 * only with hash 64k we need to use the second half of pmd page table 29 * only with hash 64k we need to use the second half of pmd page table
29 * to store pointer to deposited pgtable_t 30 * to store pointer to deposited pgtable_t
@@ -33,6 +34,16 @@
33#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE 34#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
34#endif 35#endif
35/* 36/*
37 * We store the slot details in the second half of page table.
38 * Increase the pud level table so that hugetlb ptes can be stored
39 * at pud level.
40 */
41#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
42#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
43#else
44#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
45#endif
46/*
36 * Define the address range of the kernel non-linear virtual area 47 * Define the address range of the kernel non-linear virtual area
37 */ 48 */
38#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) 49#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 1fcfa425cefa..4746bc68d446 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
73 73
74static inline pgd_t *pgd_alloc(struct mm_struct *mm) 74static inline pgd_t *pgd_alloc(struct mm_struct *mm)
75{ 75{
76 pgd_t *pgd;
77
76 if (radix_enabled()) 78 if (radix_enabled())
77 return radix__pgd_alloc(mm); 79 return radix__pgd_alloc(mm);
78 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 80
79 pgtable_gfp_flags(mm, GFP_KERNEL)); 81 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
82 pgtable_gfp_flags(mm, GFP_KERNEL));
83 memset(pgd, 0, PGD_TABLE_SIZE);
84
85 return pgd;
80} 86}
81 87
82static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 88static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
93 99
94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 100static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95{ 101{
96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 102 return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
97 pgtable_gfp_flags(mm, GFP_KERNEL)); 103 pgtable_gfp_flags(mm, GFP_KERNEL));
98} 104}
99 105
100static inline void pud_free(struct mm_struct *mm, pud_t *pud) 106static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101{ 107{
102 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); 108 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
103} 109}
104 110
105static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 111static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
115 * ahead and flush the page walk cache 121 * ahead and flush the page walk cache
116 */ 122 */
117 flush_tlb_pgtable(tlb, address); 123 flush_tlb_pgtable(tlb, address);
118 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); 124 pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
119} 125}
120 126
121static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 127static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 51017726d495..a6b9f1d74600 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
232extern unsigned long __pud_index_size; 232extern unsigned long __pud_index_size;
233extern unsigned long __pgd_index_size; 233extern unsigned long __pgd_index_size;
234extern unsigned long __pmd_cache_index; 234extern unsigned long __pmd_cache_index;
235extern unsigned long __pud_cache_index;
235#define PTE_INDEX_SIZE __pte_index_size 236#define PTE_INDEX_SIZE __pte_index_size
236#define PMD_INDEX_SIZE __pmd_index_size 237#define PMD_INDEX_SIZE __pmd_index_size
237#define PUD_INDEX_SIZE __pud_index_size 238#define PUD_INDEX_SIZE __pud_index_size
238#define PGD_INDEX_SIZE __pgd_index_size 239#define PGD_INDEX_SIZE __pgd_index_size
239#define PMD_CACHE_INDEX __pmd_cache_index 240#define PMD_CACHE_INDEX __pmd_cache_index
241#define PUD_CACHE_INDEX __pud_cache_index
240/* 242/*
241 * Because of use of pte fragments and THP, size of page table 243 * Because of use of pte fragments and THP, size of page table
242 * are not always derived out of index size above. 244 * are not always derived out of index size above.
@@ -348,7 +350,7 @@ extern unsigned long pci_io_base;
348 */ 350 */
349#ifndef __real_pte 351#ifndef __real_pte
350 352
351#define __real_pte(e,p) ((real_pte_t){(e)}) 353#define __real_pte(e, p, o) ((real_pte_t){(e)})
352#define __rpte_to_pte(r) ((r).pte) 354#define __rpte_to_pte(r) ((r).pte)
353#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) 355#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
354 356
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 176dfb73d42c..471b2274fbeb 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
645 EXC_HV, SOFTEN_TEST_HV, bitmask) 645 EXC_HV, SOFTEN_TEST_HV, bitmask)
646 646
647#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ 647#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
648 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\ 648 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
649 EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV) 649 EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
650 650
651/* 651/*
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 511acfd7ab0d..535add3f7791 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,7 +52,7 @@
52#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) 52#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
53#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) 53#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
54#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000) 54#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
55#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000400000000) 55#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
56 56
57#ifndef __ASSEMBLY__ 57#ifndef __ASSEMBLY__
58 58
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 88e5e8f17e98..855e17d158b1 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -30,6 +30,16 @@
30#define PACA_IRQ_PMI 0x40 30#define PACA_IRQ_PMI 0x40
31 31
32/* 32/*
33 * Some soft-masked interrupts must be hard masked until they are replayed
34 * (e.g., because the soft-masked handler does not clear the exception).
35 */
36#ifdef CONFIG_PPC_BOOK3S
37#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
38#else
39#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
40#endif
41
42/*
33 * flags for paca->irq_soft_mask 43 * flags for paca->irq_soft_mask
34 */ 44 */
35#define IRQS_ENABLED 0 45#define IRQS_ENABLED 0
@@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void)
244static inline void may_hard_irq_enable(void) 254static inline void may_hard_irq_enable(void)
245{ 255{
246 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; 256 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
247 if (!(get_paca()->irq_happened & PACA_IRQ_EE)) 257 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
248 __hard_irq_enable(); 258 __hard_irq_enable();
249} 259}
250 260
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 9dcbfa6bbb91..d8b1e8e7e035 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void)
140 return false; 140 return false;
141} 141}
142 142
143static inline void crash_ipi_callback(struct pt_regs *regs) { }
144
145static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
146{
147}
148
143#endif /* CONFIG_KEXEC_CORE */ 149#endif /* CONFIG_KEXEC_CORE */
144#endif /* ! __ASSEMBLY__ */ 150#endif /* ! __ASSEMBLY__ */
145#endif /* __KERNEL__ */ 151#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 504a3c36ce5c..03bbd1149530 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
24#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 24#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
25 25
26#define PMD_CACHE_INDEX PMD_INDEX_SIZE 26#define PMD_CACHE_INDEX PMD_INDEX_SIZE
27#define PUD_CACHE_INDEX PUD_INDEX_SIZE
27 28
28#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
29#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 30#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abddf5830ad5..5c5f75d005ad 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -27,6 +27,7 @@
27#else 27#else
28#define PMD_CACHE_INDEX PMD_INDEX_SIZE 28#define PMD_CACHE_INDEX PMD_INDEX_SIZE
29#endif 29#endif
30#define PUD_CACHE_INDEX PUD_INDEX_SIZE
30 31
31/* 32/*
32 * Define the address range of the kernel non-linear virtual area 33 * Define the address range of the kernel non-linear virtual area
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 88187c285c70..9f421641a35c 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
44extern void sysfs_remove_device_from_node(struct device *dev, int nid); 44extern void sysfs_remove_device_from_node(struct device *dev, int nid);
45extern int numa_update_cpu_topology(bool cpus_locked); 45extern int numa_update_cpu_topology(bool cpus_locked);
46 46
47static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
48{
49 numa_cpu_lookup_table[cpu] = node;
50}
51
47static inline int early_cpu_to_node(int cpu) 52static inline int early_cpu_to_node(int cpu)
48{ 53{
49 int nid; 54 int nid;
@@ -76,12 +81,16 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
76{ 81{
77 return 0; 82 return 0;
78} 83}
84
85static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
86
79#endif /* CONFIG_NUMA */ 87#endif /* CONFIG_NUMA */
80 88
81#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) 89#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
82extern int start_topology_update(void); 90extern int start_topology_update(void);
83extern int stop_topology_update(void); 91extern int stop_topology_update(void);
84extern int prrn_is_enabled(void); 92extern int prrn_is_enabled(void);
93extern int find_and_online_cpu_nid(int cpu);
85#else 94#else
86static inline int start_topology_update(void) 95static inline int start_topology_update(void)
87{ 96{
@@ -95,6 +104,10 @@ static inline int prrn_is_enabled(void)
95{ 104{
96 return 0; 105 return 0;
97} 106}
107static inline int find_and_online_cpu_nid(int cpu)
108{
109 return 0;
110}
98#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ 111#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
99 112
100#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) 113#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index beea2182d754..0c0b66fc5bfb 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata)
384 eeh_pcid_put(dev); 384 eeh_pcid_put(dev);
385 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); 385 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
386#ifdef CONFIG_PCI_IOV 386#ifdef CONFIG_PCI_IOV
387 eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); 387 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
388 eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
388#endif 389#endif
389 return NULL; 390 return NULL;
390} 391}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ee832d344a5a..9b6e653e501a 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -943,6 +943,8 @@ kernel_dbg_exc:
943/* 943/*
944 * An interrupt came in while soft-disabled; We mark paca->irq_happened 944 * An interrupt came in while soft-disabled; We mark paca->irq_happened
945 * accordingly and if the interrupt is level sensitive, we hard disable 945 * accordingly and if the interrupt is level sensitive, we hard disable
946 * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
947 * keep these in synch.
946 */ 948 */
947 949
948.macro masked_interrupt_book3e paca_irq full_mask 950.macro masked_interrupt_book3e paca_irq full_mask
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 243d072a225a..3ac87e53b3da 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
1426 * triggered and won't automatically refire. 1426 * triggered and won't automatically refire.
1427 * - If it was a HMI we return immediately since we handled it in realmode 1427 * - If it was a HMI we return immediately since we handled it in realmode
1428 * and it won't refire. 1428 * and it won't refire.
1429 * - else we hard disable and return. 1429 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
1430 * This is called with r10 containing the value to OR to the paca field. 1430 * This is called with r10 containing the value to OR to the paca field.
1431 */ 1431 */
1432#define MASKED_INTERRUPT(_H) \ 1432#define MASKED_INTERRUPT(_H) \
@@ -1441,8 +1441,8 @@ masked_##_H##interrupt: \
1441 ori r10,r10,0xffff; \ 1441 ori r10,r10,0xffff; \
1442 mtspr SPRN_DEC,r10; \ 1442 mtspr SPRN_DEC,r10; \
1443 b MASKED_DEC_HANDLER_LABEL; \ 1443 b MASKED_DEC_HANDLER_LABEL; \
14441: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \ 14441: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
1445 bne 2f; \ 1445 beq 2f; \
1446 mfspr r10,SPRN_##_H##SRR1; \ 1446 mfspr r10,SPRN_##_H##SRR1; \
1447 xori r10,r10,MSR_EE; /* clear MSR_EE */ \ 1447 xori r10,r10,MSR_EE; /* clear MSR_EE */ \
1448 mtspr SPRN_##_H##SRR1,r10; \ 1448 mtspr SPRN_##_H##SRR1,r10; \
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index adf044daafd7..d22c41c26bb3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
874 .mmu = 0, 874 .mmu = 0,
875 .hash_ext = 0, 875 .hash_ext = 0,
876 .radix_ext = 0, 876 .radix_ext = 0,
877 .byte22 = OV5_FEAT(OV5_DRC_INFO), 877 .byte22 = 0,
878 }, 878 },
879 879
880 /* option vector 6: IBM PAPR hints */ 880 /* option vector 6: IBM PAPR hints */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5a8bfee6e187..04d0bbd7a1dd 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
788 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) 788 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
789 device_create_file(s, &dev_attr_pir); 789 device_create_file(s, &dev_attr_pir);
790 790
791 if (cpu_has_feature(CPU_FTR_ARCH_206)) 791 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
792 !firmware_has_feature(FW_FEATURE_LPAR))
792 device_create_file(s, &dev_attr_tscr); 793 device_create_file(s, &dev_attr_tscr);
793#endif /* CONFIG_PPC64 */ 794#endif /* CONFIG_PPC64 */
794 795
@@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
873 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) 874 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
874 device_remove_file(s, &dev_attr_pir); 875 device_remove_file(s, &dev_attr_pir);
875 876
876 if (cpu_has_feature(CPU_FTR_ARCH_206)) 877 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
878 !firmware_has_feature(FW_FEATURE_LPAR))
877 device_remove_file(s, &dev_attr_tscr); 879 device_remove_file(s, &dev_attr_tscr);
878#endif /* CONFIG_PPC64 */ 880#endif /* CONFIG_PPC64 */
879 881
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f0f5cd4d2fe7..f9818d7d3381 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
188 if (!qpage) { 188 if (!qpage) {
189 pr_err("Failed to allocate queue %d for VCPU %d\n", 189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio, xc->server_num); 190 prio, xc->server_num);
191 return -ENOMEM;; 191 return -ENOMEM;
192 } 192 }
193 memset(qpage, 0, 1 << xive->q_order); 193 memset(qpage, 0, 1 << xive->q_order);
194 194
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 1604110c4238..3f1803672c9b 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
98 dr_cell->base_addr = cpu_to_be64(lmb->base_addr); 98 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
99 dr_cell->drc_index = cpu_to_be32(lmb->drc_index); 99 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
100 dr_cell->aa_index = cpu_to_be32(lmb->aa_index); 100 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
101 dr_cell->flags = cpu_to_be32(lmb->flags); 101 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
102} 102}
103 103
104static int drmem_update_dt_v2(struct device_node *memory, 104static int drmem_update_dt_v2(struct device_node *memory,
@@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
121 } 121 }
122 122
123 if (prev_lmb->aa_index != lmb->aa_index || 123 if (prev_lmb->aa_index != lmb->aa_index ||
124 prev_lmb->flags != lmb->flags) 124 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
125 lmb_sets++; 125 lmb_sets++;
126 126
127 prev_lmb = lmb; 127 prev_lmb = lmb;
@@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
150 } 150 }
151 151
152 if (prev_lmb->aa_index != lmb->aa_index || 152 if (prev_lmb->aa_index != lmb->aa_index ||
153 prev_lmb->flags != lmb->flags) { 153 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
154 /* end of one set, start of another */ 154 /* end of one set, start of another */
155 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); 155 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
156 dr_cell++; 156 dr_cell++;
@@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
216 u32 i, n_lmbs; 216 u32 i, n_lmbs;
217 217
218 n_lmbs = of_read_number(prop++, 1); 218 n_lmbs = of_read_number(prop++, 1);
219 if (n_lmbs == 0)
220 return;
219 221
220 for (i = 0; i < n_lmbs; i++) { 222 for (i = 0; i < n_lmbs; i++) {
221 read_drconf_v1_cell(&lmb, &prop); 223 read_drconf_v1_cell(&lmb, &prop);
@@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
245 u32 i, j, lmb_sets; 247 u32 i, j, lmb_sets;
246 248
247 lmb_sets = of_read_number(prop++, 1); 249 lmb_sets = of_read_number(prop++, 1);
250 if (lmb_sets == 0)
251 return;
248 252
249 for (i = 0; i < lmb_sets; i++) { 253 for (i = 0; i < lmb_sets; i++) {
250 read_drconf_v2_cell(&dr_cell, &prop); 254 read_drconf_v2_cell(&dr_cell, &prop);
@@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
354 struct drmem_lmb *lmb; 358 struct drmem_lmb *lmb;
355 359
356 drmem_info->n_lmbs = of_read_number(prop++, 1); 360 drmem_info->n_lmbs = of_read_number(prop++, 1);
361 if (drmem_info->n_lmbs == 0)
362 return;
357 363
358 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), 364 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
359 GFP_KERNEL); 365 GFP_KERNEL);
@@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
373 int lmb_index; 379 int lmb_index;
374 380
375 lmb_sets = of_read_number(prop++, 1); 381 lmb_sets = of_read_number(prop++, 1);
382 if (lmb_sets == 0)
383 return;
376 384
377 /* first pass, calculate the number of LMBs */ 385 /* first pass, calculate the number of LMBs */
378 p = prop; 386 p = prop;
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 5a69b51d08a3..d573d7d07f25 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
55 * need to add in 0x1 if it's a read-only user page 55 * need to add in 0x1 if it's a read-only user page
56 */ 56 */
57 rflags = htab_convert_pte_flags(new_pte); 57 rflags = htab_convert_pte_flags(new_pte);
58 rpte = __real_pte(__pte(old_pte), ptep); 58 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
59 59
60 if (cpu_has_feature(CPU_FTR_NOEXECUTE) && 60 if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
61 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 61 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -117,7 +117,7 @@ repeat:
117 return -1; 117 return -1;
118 } 118 }
119 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; 119 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
120 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 120 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
121 } 121 }
122 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 122 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
123 return 0; 123 return 0;
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 2253bbc6a599..e601d95c3b20 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
86 86
87 subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; 87 subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
88 vpn = hpt_vpn(ea, vsid, ssize); 88 vpn = hpt_vpn(ea, vsid, ssize);
89 rpte = __real_pte(__pte(old_pte), ptep); 89 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
90 /* 90 /*
91 *None of the sub 4k page is hashed 91 *None of the sub 4k page is hashed
92 */ 92 */
@@ -214,7 +214,7 @@ repeat:
214 return -1; 214 return -1;
215 } 215 }
216 216
217 new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot); 217 new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
218 new_pte |= H_PAGE_HASHPTE; 218 new_pte |= H_PAGE_HASHPTE;
219 219
220 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 220 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
262 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 262 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
263 263
264 rflags = htab_convert_pte_flags(new_pte); 264 rflags = htab_convert_pte_flags(new_pte);
265 rpte = __real_pte(__pte(old_pte), ptep); 265 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
266 266
267 if (cpu_has_feature(CPU_FTR_NOEXECUTE) && 267 if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
268 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 268 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -327,7 +327,7 @@ repeat:
327 } 327 }
328 328
329 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; 329 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
330 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 330 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
331 } 331 }
332 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 332 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
333 return 0; 333 return 0;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7d07c7e17db6..cf290d415dcd 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
1008 __pmd_index_size = H_PMD_INDEX_SIZE; 1008 __pmd_index_size = H_PMD_INDEX_SIZE;
1009 __pud_index_size = H_PUD_INDEX_SIZE; 1009 __pud_index_size = H_PUD_INDEX_SIZE;
1010 __pgd_index_size = H_PGD_INDEX_SIZE; 1010 __pgd_index_size = H_PGD_INDEX_SIZE;
1011 __pud_cache_index = H_PUD_CACHE_INDEX;
1011 __pmd_cache_index = H_PMD_CACHE_INDEX; 1012 __pmd_cache_index = H_PMD_CACHE_INDEX;
1012 __pte_table_size = H_PTE_TABLE_SIZE; 1013 __pte_table_size = H_PTE_TABLE_SIZE;
1013 __pmd_table_size = H_PMD_TABLE_SIZE; 1014 __pmd_table_size = H_PMD_TABLE_SIZE;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 12511f5a015f..b320f5097a06 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
27 unsigned long vpn; 27 unsigned long vpn;
28 unsigned long old_pte, new_pte; 28 unsigned long old_pte, new_pte;
29 unsigned long rflags, pa, sz; 29 unsigned long rflags, pa, sz;
30 long slot; 30 long slot, offset;
31 31
32 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 32 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
33 33
@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
64 64
65 rflags = htab_convert_pte_flags(new_pte); 65 rflags = htab_convert_pte_flags(new_pte);
66 rpte = __real_pte(__pte(old_pte), ptep); 66 if (unlikely(mmu_psize == MMU_PAGE_16G))
67 offset = PTRS_PER_PUD;
68 else
69 offset = PTRS_PER_PMD;
70 rpte = __real_pte(__pte(old_pte), ptep, offset);
67 71
68 sz = ((1UL) << shift); 72 sz = ((1UL) << shift);
69 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 73 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
104 return -1; 108 return -1;
105 } 109 }
106 110
107 new_pte |= pte_set_hidx(ptep, rpte, 0, slot); 111 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
108 } 112 }
109 113
110 /* 114 /*
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index eb8c6c8c4851..2b656e67f2ea 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
100 * same size as either the pgd or pmd index except with THP enabled 100 * same size as either the pgd or pmd index except with THP enabled
101 * on book3s 64 101 * on book3s 64
102 */ 102 */
103 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 103 if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
104 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 104 pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
105} 105}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 314d19ab9385..edd8d0bc9364 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void)
143 numa_cpu_lookup_table[cpu] = -1; 143 numa_cpu_lookup_table[cpu] = -1;
144} 144}
145 145
146static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
147{
148 numa_cpu_lookup_table[cpu] = node;
149}
150
151static void map_cpu_to_node(int cpu, int node) 146static void map_cpu_to_node(int cpu, int node)
152{ 147{
153 update_numa_cpu_lookup_table(cpu, node); 148 update_numa_cpu_lookup_table(cpu, node);
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 573a9a2ee455..2e10a964e290 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -17,9 +17,11 @@
17#include <linux/of_fdt.h> 17#include <linux/of_fdt.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/string_helpers.h> 19#include <linux/string_helpers.h>
20#include <linux/stop_machine.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24#include <asm/mmu_context.h>
23#include <asm/dma.h> 25#include <asm/dma.h>
24#include <asm/machdep.h> 26#include <asm/machdep.h>
25#include <asm/mmu.h> 27#include <asm/mmu.h>
@@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void)
333 "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); 335 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
334 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 336 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
335 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); 337 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
338
339 /*
340 * The init_mm context is given the first available (non-zero) PID,
341 * which is the "guard PID" and contains no page table. PIDR should
342 * never be set to zero because that duplicates the kernel address
343 * space at the 0x0... offset (quadrant 0)!
344 *
345 * An arbitrary PID that may later be allocated by the PID allocator
346 * for userspace processes must not be used either, because that
347 * would cause stale user mappings for that PID on CPUs outside of
348 * the TLB invalidation scheme (because it won't be in mm_cpumask).
349 *
350 * So permanently carve out one PID for the purpose of a guard PID.
351 */
352 init_mm.context.id = mmu_base_pid;
353 mmu_base_pid++;
336} 354}
337 355
338static void __init radix_init_partition_table(void) 356static void __init radix_init_partition_table(void)
@@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void)
535 __pmd_index_size = RADIX_PMD_INDEX_SIZE; 553 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
536 __pud_index_size = RADIX_PUD_INDEX_SIZE; 554 __pud_index_size = RADIX_PUD_INDEX_SIZE;
537 __pgd_index_size = RADIX_PGD_INDEX_SIZE; 555 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
556 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
538 __pmd_cache_index = RADIX_PMD_INDEX_SIZE; 557 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
539 __pte_table_size = RADIX_PTE_TABLE_SIZE; 558 __pte_table_size = RADIX_PTE_TABLE_SIZE;
540 __pmd_table_size = RADIX_PMD_TABLE_SIZE; 559 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
@@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void)
579 598
580 radix_init_iamr(); 599 radix_init_iamr();
581 radix_init_pgtable(); 600 radix_init_pgtable();
582 601 /* Switch to the guard PID before turning on MMU */
602 radix__switch_mmu_context(NULL, &init_mm);
583 if (cpu_has_feature(CPU_FTR_HVMODE)) 603 if (cpu_has_feature(CPU_FTR_HVMODE))
584 tlbiel_all(); 604 tlbiel_all();
585} 605}
@@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void)
604 } 624 }
605 radix_init_iamr(); 625 radix_init_iamr();
606 626
627 radix__switch_mmu_context(NULL, &init_mm);
607 if (cpu_has_feature(CPU_FTR_HVMODE)) 628 if (cpu_has_feature(CPU_FTR_HVMODE))
608 tlbiel_all(); 629 tlbiel_all();
609} 630}
@@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
666 pud_clear(pud); 687 pud_clear(pud);
667} 688}
668 689
690struct change_mapping_params {
691 pte_t *pte;
692 unsigned long start;
693 unsigned long end;
694 unsigned long aligned_start;
695 unsigned long aligned_end;
696};
697
698static int stop_machine_change_mapping(void *data)
699{
700 struct change_mapping_params *params =
701 (struct change_mapping_params *)data;
702
703 if (!data)
704 return -1;
705
706 spin_unlock(&init_mm.page_table_lock);
707 pte_clear(&init_mm, params->aligned_start, params->pte);
708 create_physical_mapping(params->aligned_start, params->start);
709 create_physical_mapping(params->end, params->aligned_end);
710 spin_lock(&init_mm.page_table_lock);
711 return 0;
712}
713
669static void remove_pte_table(pte_t *pte_start, unsigned long addr, 714static void remove_pte_table(pte_t *pte_start, unsigned long addr,
670 unsigned long end) 715 unsigned long end)
671{ 716{
@@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
694 } 739 }
695} 740}
696 741
742/*
743 * clear the pte and potentially split the mapping helper
744 */
745static void split_kernel_mapping(unsigned long addr, unsigned long end,
746 unsigned long size, pte_t *pte)
747{
748 unsigned long mask = ~(size - 1);
749 unsigned long aligned_start = addr & mask;
750 unsigned long aligned_end = addr + size;
751 struct change_mapping_params params;
752 bool split_region = false;
753
754 if ((end - addr) < size) {
755 /*
756 * We're going to clear the PTE, but not flushed
757 * the mapping, time to remap and flush. The
758 * effects if visible outside the processor or
759 * if we are running in code close to the
760 * mapping we cleared, we are in trouble.
761 */
762 if (overlaps_kernel_text(aligned_start, addr) ||
763 overlaps_kernel_text(end, aligned_end)) {
764 /*
765 * Hack, just return, don't pte_clear
766 */
767 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
768 "text, not splitting\n", addr, end);
769 return;
770 }
771 split_region = true;
772 }
773
774 if (split_region) {
775 params.pte = pte;
776 params.start = addr;
777 params.end = end;
778 params.aligned_start = addr & ~(size - 1);
779 params.aligned_end = min_t(unsigned long, aligned_end,
780 (unsigned long)__va(memblock_end_of_DRAM()));
781 stop_machine(stop_machine_change_mapping, &params, NULL);
782 return;
783 }
784
785 pte_clear(&init_mm, addr, pte);
786}
787
697static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, 788static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
698 unsigned long end) 789 unsigned long end)
699{ 790{
@@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
709 continue; 800 continue;
710 801
711 if (pmd_huge(*pmd)) { 802 if (pmd_huge(*pmd)) {
712 if (!IS_ALIGNED(addr, PMD_SIZE) || 803 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
713 !IS_ALIGNED(next, PMD_SIZE)) {
714 WARN_ONCE(1, "%s: unaligned range\n", __func__);
715 continue;
716 }
717
718 pte_clear(&init_mm, addr, (pte_t *)pmd);
719 continue; 804 continue;
720 } 805 }
721 806
@@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
740 continue; 825 continue;
741 826
742 if (pud_huge(*pud)) { 827 if (pud_huge(*pud)) {
743 if (!IS_ALIGNED(addr, PUD_SIZE) || 828 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
744 !IS_ALIGNED(next, PUD_SIZE)) {
745 WARN_ONCE(1, "%s: unaligned range\n", __func__);
746 continue;
747 }
748
749 pte_clear(&init_mm, addr, (pte_t *)pud);
750 continue; 829 continue;
751 } 830 }
752 831
@@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
772 continue; 851 continue;
773 852
774 if (pgd_huge(*pgd)) { 853 if (pgd_huge(*pgd)) {
775 if (!IS_ALIGNED(addr, PGDIR_SIZE) || 854 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
776 !IS_ALIGNED(next, PGDIR_SIZE)) {
777 WARN_ONCE(1, "%s: unaligned range\n", __func__);
778 continue;
779 }
780
781 pte_clear(&init_mm, addr, (pte_t *)pgd);
782 continue; 855 continue;
783 } 856 }
784 857
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c9a623c2d8a2..28c980eb4422 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
82EXPORT_SYMBOL(__pgd_index_size); 82EXPORT_SYMBOL(__pgd_index_size);
83unsigned long __pmd_cache_index; 83unsigned long __pmd_cache_index;
84EXPORT_SYMBOL(__pmd_cache_index); 84EXPORT_SYMBOL(__pmd_cache_index);
85unsigned long __pud_cache_index;
86EXPORT_SYMBOL(__pud_cache_index);
85unsigned long __pte_table_size; 87unsigned long __pte_table_size;
86EXPORT_SYMBOL(__pte_table_size); 88EXPORT_SYMBOL(__pte_table_size);
87unsigned long __pmd_table_size; 89unsigned long __pmd_table_size;
@@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
471 if (old & PATB_HR) { 473 if (old & PATB_HR) {
472 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 474 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
473 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 475 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
476 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
477 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
474 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); 478 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
475 } else { 479 } else {
476 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 480 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 881ebd53ffc2..9b23f12e863c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
51 unsigned int psize; 51 unsigned int psize;
52 int ssize; 52 int ssize;
53 real_pte_t rpte; 53 real_pte_t rpte;
54 int i; 54 int i, offset;
55 55
56 i = batch->index; 56 i = batch->index;
57 57
@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
67 psize = get_slice_psize(mm, addr); 67 psize = get_slice_psize(mm, addr);
68 /* Mask the address for the correct page size */ 68 /* Mask the address for the correct page size */
69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); 69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
70 if (unlikely(psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
72 else
73 offset = PTRS_PER_PMD;
70#else 74#else
71 BUG(); 75 BUG();
72 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 76 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
78 * support 64k pages, this might be different from the 82 * support 64k pages, this might be different from the
79 * hardware page size encoded in the slice table. */ 83 * hardware page size encoded in the slice table. */
80 addr &= PAGE_MASK; 84 addr &= PAGE_MASK;
85 offset = PTRS_PER_PTE;
81 } 86 }
82 87
83 88
@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
91 } 96 }
92 WARN_ON(vsid == 0); 97 WARN_ON(vsid == 0);
93 vpn = hpt_vpn(addr, vsid, ssize); 98 vpn = hpt_vpn(addr, vsid, ssize);
94 rpte = __real_pte(__pte(pte), ptep); 99 rpte = __real_pte(__pte(pte), ptep, offset);
95 100
96 /* 101 /*
97 * Check if we have an active batch on this CPU. If not, just 102 * Check if we have an active batch on this CPU. If not, just
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 872d1f6dd11e..a9636d8cba15 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
327 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 327 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
328 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); 328 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
329 break; 329 break;
330 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
331 PPC_LWZ_OFFS(r_A, r_skb, K);
332 break;
330 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ 333 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); 334 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
332 break; 335 break;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index dd4c9b8b8a81..f6f55ab4980e 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
199 const struct cpumask *l_cpumask; 199 const struct cpumask *l_cpumask;
200 200
201 get_online_cpus(); 201 get_online_cpus();
202 for_each_online_node(nid) { 202 for_each_node_with_cpus(nid) {
203 l_cpumask = cpumask_of_node(nid); 203 l_cpumask = cpumask_of_node(nid);
204 cpu = cpumask_first(l_cpumask); 204 cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
205 if (cpu >= nr_cpu_ids)
206 continue;
205 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 207 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
206 get_hard_smp_processor_id(cpu)); 208 get_hard_smp_processor_id(cpu));
207 } 209 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 496e47696ed0..a6c92c78c9b2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
1854 s64 rc; 1854 s64 rc;
1855 1855
1856 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) 1856 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1857 return -ENODEV;; 1857 return -ENODEV;
1858 1858
1859 pe = &phb->ioda.pe_array[pdn->pe_number]; 1859 pe = &phb->ioda.pe_array[pdn->pe_number];
1860 if (pe->tce_bypass_enabled) { 1860 if (pe->tce_bypass_enabled) {
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 4fb21e17504a..092715b9674b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void)
80 if (np && of_property_read_bool(np, "disabled")) 80 if (np && of_property_read_bool(np, "disabled"))
81 enable--; 81 enable--;
82 82
83 np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
84 if (np && of_property_read_bool(np, "disabled"))
85 enable = 0;
86
83 of_node_put(np); 87 of_node_put(np);
84 of_node_put(fw_features); 88 of_node_put(fw_features);
85 } 89 }
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 2b3eb01ab110..b7c53a51c31b 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
1063 rc = PTR_ERR(txwin->paste_kaddr); 1063 rc = PTR_ERR(txwin->paste_kaddr);
1064 goto free_window; 1064 goto free_window;
1065 } 1065 }
1066 } else {
1067 /*
1068 * A user mapping must ensure that context switch issues
1069 * CP_ABORT for this thread.
1070 */
1071 rc = set_thread_uses_vas();
1072 if (rc)
1073 goto free_window;
1066 } 1074 }
1067 1075
1068 /*
1069 * Now that we have a send window, ensure context switch issues
1070 * CP_ABORT for this thread.
1071 */
1072 rc = -EINVAL;
1073 if (set_thread_uses_vas() < 0)
1074 goto free_window;
1075
1076 set_vinst_win(vinst, txwin); 1076 set_vinst_win(vinst, txwin);
1077 1077
1078 return txwin; 1078 return txwin;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index dceb51454d8d..652d3e96b812 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -36,6 +36,7 @@
36#include <asm/xics.h> 36#include <asm/xics.h>
37#include <asm/xive.h> 37#include <asm/xive.h>
38#include <asm/plpar_wrappers.h> 38#include <asm/plpar_wrappers.h>
39#include <asm/topology.h>
39 40
40#include "pseries.h" 41#include "pseries.h"
41#include "offline_states.h" 42#include "offline_states.h"
@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
331 BUG_ON(cpu_online(cpu)); 332 BUG_ON(cpu_online(cpu));
332 set_cpu_present(cpu, false); 333 set_cpu_present(cpu, false);
333 set_hard_smp_processor_id(cpu, -1); 334 set_hard_smp_processor_id(cpu, -1);
335 update_numa_cpu_lookup_table(cpu, -1);
334 break; 336 break;
335 } 337 }
336 if (cpu >= nr_cpu_ids) 338 if (cpu >= nr_cpu_ids)
@@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np)
340 cpu_maps_update_done(); 342 cpu_maps_update_done();
341} 343}
342 344
343extern int find_and_online_cpu_nid(int cpu);
344
345static int dlpar_online_cpu(struct device_node *dn) 345static int dlpar_online_cpu(struct device_node *dn)
346{ 346{
347 int rc = 0; 347 int rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 81d8614e7379..5e1ef9150182 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -49,6 +49,28 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
49 49
50 50
51/* 51/*
52 * Enable the hotplug interrupt late because processing them may touch other
53 * devices or systems (e.g. hugepages) that have not been initialized at the
54 * subsys stage.
55 */
56int __init init_ras_hotplug_IRQ(void)
57{
58 struct device_node *np;
59
60 /* Hotplug Events */
61 np = of_find_node_by_path("/event-sources/hot-plug-events");
62 if (np != NULL) {
63 if (dlpar_workqueue_init() == 0)
64 request_event_sources_irqs(np, ras_hotplug_interrupt,
65 "RAS_HOTPLUG");
66 of_node_put(np);
67 }
68
69 return 0;
70}
71machine_late_initcall(pseries, init_ras_hotplug_IRQ);
72
73/*
52 * Initialize handlers for the set of interrupts caused by hardware errors 74 * Initialize handlers for the set of interrupts caused by hardware errors
53 * and power system events. 75 * and power system events.
54 */ 76 */
@@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
66 of_node_put(np); 88 of_node_put(np);
67 } 89 }
68 90
69 /* Hotplug Events */
70 np = of_find_node_by_path("/event-sources/hot-plug-events");
71 if (np != NULL) {
72 if (dlpar_workqueue_init() == 0)
73 request_event_sources_irqs(np, ras_hotplug_interrupt,
74 "RAS_HOTPLUG");
75 of_node_put(np);
76 }
77
78 /* EPOW Events */ 91 /* EPOW Events */
79 np = of_find_node_by_path("/event-sources/epow-events"); 92 np = of_find_node_by_path("/event-sources/epow-events");
80 if (np != NULL) { 93 if (np != NULL) {
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 372d7ada1a0c..1a527625acf7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void)
482 if (types == L1D_FLUSH_NONE) 482 if (types == L1D_FLUSH_NONE)
483 types = L1D_FLUSH_FALLBACK; 483 types = L1D_FLUSH_FALLBACK;
484 484
485 if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) 485 if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
486 (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
486 enable = false; 487 enable = false;
487 } else { 488 } else {
488 /* Default to fallback if case hcall is not available */ 489 /* Default to fallback if case hcall is not available */
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index d9c4c9366049..091f1d0d0af1 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
356 356
357 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); 357 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
358 if (rc) { 358 if (rc) {
359 pr_err("Error %lld getting queue info prio %d\n", rc, prio); 359 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
360 target, prio);
360 rc = -EIO; 361 rc = -EIO;
361 goto fail; 362 goto fail;
362 } 363 }
@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
370 /* Configure and enable the queue in HW */ 371 /* Configure and enable the queue in HW */
371 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); 372 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
372 if (rc) { 373 if (rc) {
373 pr_err("Error %lld setting queue for prio %d\n", rc, prio); 374 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
375 target, prio);
374 rc = -EIO; 376 rc = -EIO;
375 } else { 377 } else {
376 q->qpage = qpage; 378 q->qpage = qpage;
@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
389 if (IS_ERR(qpage)) 391 if (IS_ERR(qpage))
390 return PTR_ERR(qpage); 392 return PTR_ERR(qpage);
391 393
392 return xive_spapr_configure_queue(cpu, q, prio, qpage, 394 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
393 xive_queue_shift); 395 q, prio, qpage, xive_queue_shift);
394} 396}
395 397
396static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, 398static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
399 struct xive_q *q = &xc->queue[prio]; 401 struct xive_q *q = &xc->queue[prio];
400 unsigned int alloc_order; 402 unsigned int alloc_order;
401 long rc; 403 long rc;
404 int hw_cpu = get_hard_smp_processor_id(cpu);
402 405
403 rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); 406 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
404 if (rc) 407 if (rc)
405 pr_err("Error %ld setting queue for prio %d\n", rc, prio); 408 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
409 hw_cpu, prio);
406 410
407 alloc_order = xive_alloc_order(xive_queue_shift); 411 alloc_order = xive_alloc_order(xive_queue_shift);
408 free_pages((unsigned long)q->qpage, alloc_order); 412 free_pages((unsigned long)q->qpage, alloc_order);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b6722c246d9c..04807c7f64cc 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -8,7 +8,6 @@ config RISCV
8 select OF 8 select OF
9 select OF_EARLY_FLATTREE 9 select OF_EARLY_FLATTREE
10 select OF_IRQ 10 select OF_IRQ
11 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
12 select ARCH_WANT_FRAME_POINTERS 11 select ARCH_WANT_FRAME_POINTERS
13 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
14 select COMMON_CLK 13 select COMMON_CLK
@@ -20,7 +19,6 @@ config RISCV
20 select GENERIC_STRNLEN_USER 19 select GENERIC_STRNLEN_USER
21 select GENERIC_SMP_IDLE_THREAD 20 select GENERIC_SMP_IDLE_THREAD
22 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A 21 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
23 select ARCH_WANT_OPTIONAL_GPIOLIB
24 select HAVE_MEMBLOCK 22 select HAVE_MEMBLOCK
25 select HAVE_MEMBLOCK_NODE_MAP 23 select HAVE_MEMBLOCK_NODE_MAP
26 select HAVE_DMA_API_DEBUG 24 select HAVE_DMA_API_DEBUG
@@ -34,7 +32,6 @@ config RISCV
34 select HAVE_ARCH_TRACEHOOK 32 select HAVE_ARCH_TRACEHOOK
35 select MODULES_USE_ELF_RELA if MODULES 33 select MODULES_USE_ELF_RELA if MODULES
36 select THREAD_INFO_IN_TASK 34 select THREAD_INFO_IN_TASK
37 select RISCV_IRQ_INTC
38 select RISCV_TIMER 35 select RISCV_TIMER
39 36
40config MMU 37config MMU
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 87fc045be51f..56fa592cfa34 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -172,6 +172,9 @@ ENTRY(handle_exception)
172 move a1, sp /* pt_regs */ 172 move a1, sp /* pt_regs */
173 tail do_IRQ 173 tail do_IRQ
1741: 1741:
175 /* Exceptions run with interrupts enabled */
176 csrs sstatus, SR_SIE
177
175 /* Handle syscalls */ 178 /* Handle syscalls */
176 li t0, EXC_SYSCALL 179 li t0, EXC_SYSCALL
177 beq s4, t0, handle_syscall 180 beq s4, t0, handle_syscall
@@ -198,8 +201,6 @@ handle_syscall:
198 */ 201 */
199 addi s2, s2, 0x4 202 addi s2, s2, 0x4
200 REG_S s2, PT_SEPC(sp) 203 REG_S s2, PT_SEPC(sp)
201 /* System calls run with interrupts enabled */
202 csrs sstatus, SR_SIE
203 /* Trace syscalls, but only if requested by the user. */ 204 /* Trace syscalls, but only if requested by the user. */
204 REG_L t0, TASK_TI_FLAGS(tp) 205 REG_L t0, TASK_TI_FLAGS(tp)
205 andi t0, t0, _TIF_SYSCALL_TRACE 206 andi t0, t0, _TIF_SYSCALL_TRACE
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 226eeb190f90..6e07ed37bbff 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -64,7 +64,7 @@ ENTRY(_start)
64 /* Start the kernel */ 64 /* Start the kernel */
65 mv a0, s0 65 mv a0, s0
66 mv a1, s1 66 mv a1, s1
67 call sbi_save 67 call parse_dtb
68 tail start_kernel 68 tail start_kernel
69 69
70relocate: 70relocate:
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 09f7064e898c..c11f40c1b2a8 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -144,7 +144,7 @@ asmlinkage void __init setup_vm(void)
144#endif 144#endif
145} 145}
146 146
147void __init sbi_save(unsigned int hartid, void *dtb) 147void __init parse_dtb(unsigned int hartid, void *dtb)
148{ 148{
149 early_init_dt_scan(__va(dtb)); 149 early_init_dt_scan(__va(dtb));
150} 150}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6bf594ace663..8767e45f1b2b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -430,6 +430,8 @@ config SPARC_LEON
430 depends on SPARC32 430 depends on SPARC32
431 select USB_EHCI_BIG_ENDIAN_MMIO 431 select USB_EHCI_BIG_ENDIAN_MMIO
432 select USB_EHCI_BIG_ENDIAN_DESC 432 select USB_EHCI_BIG_ENDIAN_DESC
433 select USB_UHCI_BIG_ENDIAN_MMIO
434 select USB_UHCI_BIG_ENDIAN_DESC
433 ---help--- 435 ---help---
434 If you say Y here if you are running on a SPARC-LEON processor. 436 If you say Y here if you are running on a SPARC-LEON processor.
435 The LEON processor is a synthesizable VHDL model of the 437 The LEON processor is a synthesizable VHDL model of the
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
index 6f17528356b2..ea53e418f6c0 100644
--- a/arch/sparc/include/asm/bug.h
+++ b/arch/sparc/include/asm/bug.h
@@ -9,10 +9,14 @@
9void do_BUG(const char *file, int line); 9void do_BUG(const char *file, int line);
10#define BUG() do { \ 10#define BUG() do { \
11 do_BUG(__FILE__, __LINE__); \ 11 do_BUG(__FILE__, __LINE__); \
12 barrier_before_unreachable(); \
12 __builtin_trap(); \ 13 __builtin_trap(); \
13} while (0) 14} while (0)
14#else 15#else
15#define BUG() __builtin_trap() 16#define BUG() do { \
17 barrier_before_unreachable(); \
18 __builtin_trap(); \
19} while (0)
16#endif 20#endif
17 21
18#define HAVE_ARCH_BUG 22#define HAVE_ARCH_BUG
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index aff152c87cf4..5a82bac5e0bc 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,6 +1,7 @@
1boot/compressed/vmlinux 1boot/compressed/vmlinux
2tools/test_get_len 2tools/test_get_len
3tools/insn_sanity 3tools/insn_sanity
4tools/insn_decoder_test
4purgatory/kexec-purgatory.c 5purgatory/kexec-purgatory.c
5purgatory/purgatory.ro 6purgatory/purgatory.ro
6 7
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 63bf349b2b24..c1236b187824 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -423,12 +423,6 @@ config X86_MPPARSE
423 For old smp systems that do not have proper acpi support. Newer systems 423 For old smp systems that do not have proper acpi support. Newer systems
424 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it 424 (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
425 425
426config X86_BIGSMP
427 bool "Support for big SMP systems with more than 8 CPUs"
428 depends on X86_32 && SMP
429 ---help---
430 This option is needed for the systems that have more than 8 CPUs
431
432config GOLDFISH 426config GOLDFISH
433 def_bool y 427 def_bool y
434 depends on X86_GOLDFISH 428 depends on X86_GOLDFISH
@@ -460,6 +454,12 @@ config INTEL_RDT
460 Say N if unsure. 454 Say N if unsure.
461 455
462if X86_32 456if X86_32
457config X86_BIGSMP
458 bool "Support for big SMP systems with more than 8 CPUs"
459 depends on SMP
460 ---help---
461 This option is needed for the systems that have more than 8 CPUs
462
463config X86_EXTENDED_PLATFORM 463config X86_EXTENDED_PLATFORM
464 bool "Support for extended (non-PC) x86 platforms" 464 bool "Support for extended (non-PC) x86 platforms"
465 default y 465 default y
@@ -949,25 +949,66 @@ config MAXSMP
949 Enable maximum number of CPUS and NUMA Nodes for this architecture. 949 Enable maximum number of CPUS and NUMA Nodes for this architecture.
950 If unsure, say N. 950 If unsure, say N.
951 951
952#
953# The maximum number of CPUs supported:
954#
955# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT,
956# and which can be configured interactively in the
957# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range.
958#
959# The ranges are different on 32-bit and 64-bit kernels, depending on
960# hardware capabilities and scalability features of the kernel.
961#
962# ( If MAXSMP is enabled we just use the highest possible value and disable
963# interactive configuration. )
964#
965
966config NR_CPUS_RANGE_BEGIN
967 int
968 default NR_CPUS_RANGE_END if MAXSMP
969 default 1 if !SMP
970 default 2
971
972config NR_CPUS_RANGE_END
973 int
974 depends on X86_32
975 default 64 if SMP && X86_BIGSMP
976 default 8 if SMP && !X86_BIGSMP
977 default 1 if !SMP
978
979config NR_CPUS_RANGE_END
980 int
981 depends on X86_64
982 default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK)
983 default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK)
984 default 1 if !SMP
985
986config NR_CPUS_DEFAULT
987 int
988 depends on X86_32
989 default 32 if X86_BIGSMP
990 default 8 if SMP
991 default 1 if !SMP
992
993config NR_CPUS_DEFAULT
994 int
995 depends on X86_64
996 default 8192 if MAXSMP
997 default 64 if SMP
998 default 1 if !SMP
999
952config NR_CPUS 1000config NR_CPUS
953 int "Maximum number of CPUs" if SMP && !MAXSMP 1001 int "Maximum number of CPUs" if SMP && !MAXSMP
954 range 2 8 if SMP && X86_32 && !X86_BIGSMP 1002 range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END
955 range 2 64 if SMP && X86_32 && X86_BIGSMP 1003 default NR_CPUS_DEFAULT
956 range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
957 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
958 default "1" if !SMP
959 default "8192" if MAXSMP
960 default "32" if SMP && X86_BIGSMP
961 default "8" if SMP && X86_32
962 default "64" if SMP
963 ---help--- 1004 ---help---
964 This allows you to specify the maximum number of CPUs which this 1005 This allows you to specify the maximum number of CPUs which this
965 kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum 1006 kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum
966 supported value is 8192, otherwise the maximum value is 512. The 1007 supported value is 8192, otherwise the maximum value is 512. The
967 minimum value which makes sense is 2. 1008 minimum value which makes sense is 2.
968 1009
969 This is purely to save memory - each supported CPU adds 1010 This is purely to save memory: each supported CPU adds about 8KB
970 approximately eight kilobytes to the kernel image. 1011 to the kernel image.
971 1012
972config SCHED_SMT 1013config SCHED_SMT
973 bool "SMT (Hyperthreading) scheduler support" 1014 bool "SMT (Hyperthreading) scheduler support"
@@ -1363,7 +1404,7 @@ config HIGHMEM4G
1363 1404
1364config HIGHMEM64G 1405config HIGHMEM64G
1365 bool "64GB" 1406 bool "64GB"
1366 depends on !M486 1407 depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
1367 select X86_PAE 1408 select X86_PAE
1368 ---help--- 1409 ---help---
1369 Select this if you have a 32-bit processor and more than 4 1410 Select this if you have a 32-bit processor and more than 4
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 65a9a4716e34..8b8d2297d486 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -374,7 +374,7 @@ config X86_TSC
374 374
375config X86_CMPXCHG64 375config X86_CMPXCHG64
376 def_bool y 376 def_bool y
377 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM 377 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
378 378
379# this should be set for all -march=.. options where the compiler 379# this should be set for all -march=.. options where the compiler
380# generates cmov. 380# generates cmov.
@@ -385,7 +385,7 @@ config X86_CMOV
385config X86_MINIMUM_CPU_FAMILY 385config X86_MINIMUM_CPU_FAMILY
386 int 386 int
387 default "64" if X86_64 387 default "64" if X86_64
388 default "6" if X86_32 && X86_P6_NOP 388 default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
389 default "5" if X86_32 && X86_CMPXCHG64 389 default "5" if X86_32 && X86_CMPXCHG64
390 default "4" 390 default "4"
391 391
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 353e20c3f114..886a9115af62 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
439 struct efi_uga_draw_protocol *uga = NULL, *first_uga; 439 struct efi_uga_draw_protocol *uga = NULL, *first_uga;
440 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; 440 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
441 unsigned long nr_ugas; 441 unsigned long nr_ugas;
442 u32 *handles = (u32 *)uga_handle;; 442 u32 *handles = (u32 *)uga_handle;
443 efi_status_t status = EFI_INVALID_PARAMETER; 443 efi_status_t status = EFI_INVALID_PARAMETER;
444 int i; 444 int i;
445 445
@@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
484 struct efi_uga_draw_protocol *uga = NULL, *first_uga; 484 struct efi_uga_draw_protocol *uga = NULL, *first_uga;
485 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; 485 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
486 unsigned long nr_ugas; 486 unsigned long nr_ugas;
487 u64 *handles = (u64 *)uga_handle;; 487 u64 *handles = (u64 *)uga_handle;
488 efi_status_t status = EFI_INVALID_PARAMETER; 488 efi_status_t status = EFI_INVALID_PARAMETER;
489 int i; 489 int i;
490 490
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
index 36870b26067a..d08805032f01 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
@@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
57{ 57{
58 unsigned int j; 58 unsigned int j;
59 59
60 state->lens[0] = 0; 60 /* initially all lanes are unused */
61 state->lens[1] = 1; 61 state->lens[0] = 0xFFFFFFFF00000000;
62 state->lens[2] = 2; 62 state->lens[1] = 0xFFFFFFFF00000001;
63 state->lens[3] = 3; 63 state->lens[2] = 0xFFFFFFFF00000002;
64 state->lens[3] = 0xFFFFFFFF00000003;
65
64 state->unused_lanes = 0xFF03020100; 66 state->unused_lanes = 0xFF03020100;
65 for (j = 0; j < 4; j++) 67 for (j = 0; j < 4; j++)
66 state->ldata[j].job_in_lane = NULL; 68 state->ldata[j].job_in_lane = NULL;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 3f48f695d5e6..dce7092ab24a 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
97 97
98#define SIZEOF_PTREGS 21*8 98#define SIZEOF_PTREGS 21*8
99 99
100 .macro ALLOC_PT_GPREGS_ON_STACK 100.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
101 addq $-(15*8), %rsp 101 /*
102 .endm 102 * Push registers and sanitize registers of values that a
103 * speculation attack might otherwise want to exploit. The
104 * lower registers are likely clobbered well before they
105 * could be put to use in a speculative execution gadget.
106 * Interleave XOR with PUSH for better uop scheduling:
107 */
108 pushq %rdi /* pt_regs->di */
109 pushq %rsi /* pt_regs->si */
110 pushq \rdx /* pt_regs->dx */
111 pushq %rcx /* pt_regs->cx */
112 pushq \rax /* pt_regs->ax */
113 pushq %r8 /* pt_regs->r8 */
114 xorq %r8, %r8 /* nospec r8 */
115 pushq %r9 /* pt_regs->r9 */
116 xorq %r9, %r9 /* nospec r9 */
117 pushq %r10 /* pt_regs->r10 */
118 xorq %r10, %r10 /* nospec r10 */
119 pushq %r11 /* pt_regs->r11 */
120 xorq %r11, %r11 /* nospec r11*/
121 pushq %rbx /* pt_regs->rbx */
122 xorl %ebx, %ebx /* nospec rbx*/
123 pushq %rbp /* pt_regs->rbp */
124 xorl %ebp, %ebp /* nospec rbp*/
125 pushq %r12 /* pt_regs->r12 */
126 xorq %r12, %r12 /* nospec r12*/
127 pushq %r13 /* pt_regs->r13 */
128 xorq %r13, %r13 /* nospec r13*/
129 pushq %r14 /* pt_regs->r14 */
130 xorq %r14, %r14 /* nospec r14*/
131 pushq %r15 /* pt_regs->r15 */
132 xorq %r15, %r15 /* nospec r15*/
133 UNWIND_HINT_REGS
134.endm
103 135
104 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 136.macro POP_REGS pop_rdi=1 skip_r11rcx=0
105 .if \r11
106 movq %r11, 6*8+\offset(%rsp)
107 .endif
108 .if \r8910
109 movq %r10, 7*8+\offset(%rsp)
110 movq %r9, 8*8+\offset(%rsp)
111 movq %r8, 9*8+\offset(%rsp)
112 .endif
113 .if \rax
114 movq %rax, 10*8+\offset(%rsp)
115 .endif
116 .if \rcx
117 movq %rcx, 11*8+\offset(%rsp)
118 .endif
119 movq %rdx, 12*8+\offset(%rsp)
120 movq %rsi, 13*8+\offset(%rsp)
121 movq %rdi, 14*8+\offset(%rsp)
122 UNWIND_HINT_REGS offset=\offset extra=0
123 .endm
124 .macro SAVE_C_REGS offset=0
125 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
126 .endm
127 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
128 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
129 .endm
130 .macro SAVE_C_REGS_EXCEPT_R891011
131 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
132 .endm
133 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
134 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
135 .endm
136 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
137 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
138 .endm
139
140 .macro SAVE_EXTRA_REGS offset=0
141 movq %r15, 0*8+\offset(%rsp)
142 movq %r14, 1*8+\offset(%rsp)
143 movq %r13, 2*8+\offset(%rsp)
144 movq %r12, 3*8+\offset(%rsp)
145 movq %rbp, 4*8+\offset(%rsp)
146 movq %rbx, 5*8+\offset(%rsp)
147 UNWIND_HINT_REGS offset=\offset
148 .endm
149
150 .macro POP_EXTRA_REGS
151 popq %r15 137 popq %r15
152 popq %r14 138 popq %r14
153 popq %r13 139 popq %r13
154 popq %r12 140 popq %r12
155 popq %rbp 141 popq %rbp
156 popq %rbx 142 popq %rbx
157 .endm 143 .if \skip_r11rcx
158 144 popq %rsi
159 .macro POP_C_REGS 145 .else
160 popq %r11 146 popq %r11
147 .endif
161 popq %r10 148 popq %r10
162 popq %r9 149 popq %r9
163 popq %r8 150 popq %r8
164 popq %rax 151 popq %rax
152 .if \skip_r11rcx
153 popq %rsi
154 .else
165 popq %rcx 155 popq %rcx
156 .endif
166 popq %rdx 157 popq %rdx
167 popq %rsi 158 popq %rsi
159 .if \pop_rdi
168 popq %rdi 160 popq %rdi
169 .endm 161 .endif
170 162.endm
171 .macro icebp
172 .byte 0xf1
173 .endm
174 163
175/* 164/*
176 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The 165 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
@@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
178 * is just setting the LSB, which makes it an invalid stack address and is also 167 * is just setting the LSB, which makes it an invalid stack address and is also
179 * a signal to the unwinder that it's a pt_regs pointer in disguise. 168 * a signal to the unwinder that it's a pt_regs pointer in disguise.
180 * 169 *
181 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts 170 * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
182 * the original rbp. 171 * the original rbp.
183 */ 172 */
184.macro ENCODE_FRAME_POINTER ptregs_offset=0 173.macro ENCODE_FRAME_POINTER ptregs_offset=0
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 30c8c5344c4a..8971bd64d515 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
213 213
214 swapgs 214 swapgs
215 /* 215 /*
216 * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it 216 * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
217 * is not required to switch CR3. 217 * is not required to switch CR3.
218 */ 218 */
219 movq %rsp, PER_CPU_VAR(rsp_scratch) 219 movq %rsp, PER_CPU_VAR(rsp_scratch)
@@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64)
227 pushq %rcx /* pt_regs->ip */ 227 pushq %rcx /* pt_regs->ip */
228GLOBAL(entry_SYSCALL_64_after_hwframe) 228GLOBAL(entry_SYSCALL_64_after_hwframe)
229 pushq %rax /* pt_regs->orig_ax */ 229 pushq %rax /* pt_regs->orig_ax */
230 pushq %rdi /* pt_regs->di */ 230
231 pushq %rsi /* pt_regs->si */ 231 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
232 pushq %rdx /* pt_regs->dx */
233 pushq %rcx /* pt_regs->cx */
234 pushq $-ENOSYS /* pt_regs->ax */
235 pushq %r8 /* pt_regs->r8 */
236 pushq %r9 /* pt_regs->r9 */
237 pushq %r10 /* pt_regs->r10 */
238 pushq %r11 /* pt_regs->r11 */
239 pushq %rbx /* pt_regs->rbx */
240 pushq %rbp /* pt_regs->rbp */
241 pushq %r12 /* pt_regs->r12 */
242 pushq %r13 /* pt_regs->r13 */
243 pushq %r14 /* pt_regs->r14 */
244 pushq %r15 /* pt_regs->r15 */
245 UNWIND_HINT_REGS
246 232
247 TRACE_IRQS_OFF 233 TRACE_IRQS_OFF
248 234
@@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
321syscall_return_via_sysret: 307syscall_return_via_sysret:
322 /* rcx and r11 are already restored (see code above) */ 308 /* rcx and r11 are already restored (see code above) */
323 UNWIND_HINT_EMPTY 309 UNWIND_HINT_EMPTY
324 POP_EXTRA_REGS 310 POP_REGS pop_rdi=0 skip_r11rcx=1
325 popq %rsi /* skip r11 */
326 popq %r10
327 popq %r9
328 popq %r8
329 popq %rax
330 popq %rsi /* skip rcx */
331 popq %rdx
332 popq %rsi
333 311
334 /* 312 /*
335 * Now all regs are restored except RSP and RDI. 313 * Now all regs are restored except RSP and RDI.
@@ -559,9 +537,7 @@ END(irq_entries_start)
559 call switch_to_thread_stack 537 call switch_to_thread_stack
5601: 5381:
561 539
562 ALLOC_PT_GPREGS_ON_STACK 540 PUSH_AND_CLEAR_REGS
563 SAVE_C_REGS
564 SAVE_EXTRA_REGS
565 ENCODE_FRAME_POINTER 541 ENCODE_FRAME_POINTER
566 542
567 testb $3, CS(%rsp) 543 testb $3, CS(%rsp)
@@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
622 ud2 598 ud2
6231: 5991:
624#endif 600#endif
625 POP_EXTRA_REGS 601 POP_REGS pop_rdi=0
626 popq %r11
627 popq %r10
628 popq %r9
629 popq %r8
630 popq %rax
631 popq %rcx
632 popq %rdx
633 popq %rsi
634 602
635 /* 603 /*
636 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 604 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
@@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
688 ud2 656 ud2
6891: 6571:
690#endif 658#endif
691 POP_EXTRA_REGS 659 POP_REGS
692 POP_C_REGS
693 addq $8, %rsp /* skip regs->orig_ax */ 660 addq $8, %rsp /* skip regs->orig_ax */
694 /* 661 /*
695 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 662 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -908,7 +875,9 @@ ENTRY(\sym)
908 pushq $-1 /* ORIG_RAX: no syscall to restart */ 875 pushq $-1 /* ORIG_RAX: no syscall to restart */
909 .endif 876 .endif
910 877
911 ALLOC_PT_GPREGS_ON_STACK 878 /* Save all registers in pt_regs */
879 PUSH_AND_CLEAR_REGS
880 ENCODE_FRAME_POINTER
912 881
913 .if \paranoid < 2 882 .if \paranoid < 2
914 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 883 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
@@ -1121,9 +1090,7 @@ ENTRY(xen_failsafe_callback)
1121 addq $0x30, %rsp 1090 addq $0x30, %rsp
1122 UNWIND_HINT_IRET_REGS 1091 UNWIND_HINT_IRET_REGS
1123 pushq $-1 /* orig_ax = -1 => not a system call */ 1092 pushq $-1 /* orig_ax = -1 => not a system call */
1124 ALLOC_PT_GPREGS_ON_STACK 1093 PUSH_AND_CLEAR_REGS
1125 SAVE_C_REGS
1126 SAVE_EXTRA_REGS
1127 ENCODE_FRAME_POINTER 1094 ENCODE_FRAME_POINTER
1128 jmp error_exit 1095 jmp error_exit
1129END(xen_failsafe_callback) 1096END(xen_failsafe_callback)
@@ -1163,16 +1130,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
1163#endif 1130#endif
1164 1131
1165/* 1132/*
1166 * Save all registers in pt_regs, and switch gs if needed. 1133 * Switch gs if needed.
1167 * Use slow, but surefire "are we in kernel?" check. 1134 * Use slow, but surefire "are we in kernel?" check.
1168 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1135 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1169 */ 1136 */
1170ENTRY(paranoid_entry) 1137ENTRY(paranoid_entry)
1171 UNWIND_HINT_FUNC 1138 UNWIND_HINT_FUNC
1172 cld 1139 cld
1173 SAVE_C_REGS 8
1174 SAVE_EXTRA_REGS 8
1175 ENCODE_FRAME_POINTER 8
1176 movl $1, %ebx 1140 movl $1, %ebx
1177 movl $MSR_GS_BASE, %ecx 1141 movl $MSR_GS_BASE, %ecx
1178 rdmsr 1142 rdmsr
@@ -1211,21 +1175,18 @@ ENTRY(paranoid_exit)
1211 jmp .Lparanoid_exit_restore 1175 jmp .Lparanoid_exit_restore
1212.Lparanoid_exit_no_swapgs: 1176.Lparanoid_exit_no_swapgs:
1213 TRACE_IRQS_IRETQ_DEBUG 1177 TRACE_IRQS_IRETQ_DEBUG
1178 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1214.Lparanoid_exit_restore: 1179.Lparanoid_exit_restore:
1215 jmp restore_regs_and_return_to_kernel 1180 jmp restore_regs_and_return_to_kernel
1216END(paranoid_exit) 1181END(paranoid_exit)
1217 1182
1218/* 1183/*
1219 * Save all registers in pt_regs, and switch gs if needed. 1184 * Switch gs if needed.
1220 * Return: EBX=0: came from user mode; EBX=1: otherwise 1185 * Return: EBX=0: came from user mode; EBX=1: otherwise
1221 */ 1186 */
1222ENTRY(error_entry) 1187ENTRY(error_entry)
1223 UNWIND_HINT_FUNC 1188 UNWIND_HINT_REGS offset=8
1224 cld 1189 cld
1225 SAVE_C_REGS 8
1226 SAVE_EXTRA_REGS 8
1227 ENCODE_FRAME_POINTER 8
1228 xorl %ebx, %ebx
1229 testb $3, CS+8(%rsp) 1190 testb $3, CS+8(%rsp)
1230 jz .Lerror_kernelspace 1191 jz .Lerror_kernelspace
1231 1192
@@ -1406,22 +1367,7 @@ ENTRY(nmi)
1406 pushq 1*8(%rdx) /* pt_regs->rip */ 1367 pushq 1*8(%rdx) /* pt_regs->rip */
1407 UNWIND_HINT_IRET_REGS 1368 UNWIND_HINT_IRET_REGS
1408 pushq $-1 /* pt_regs->orig_ax */ 1369 pushq $-1 /* pt_regs->orig_ax */
1409 pushq %rdi /* pt_regs->di */ 1370 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1410 pushq %rsi /* pt_regs->si */
1411 pushq (%rdx) /* pt_regs->dx */
1412 pushq %rcx /* pt_regs->cx */
1413 pushq %rax /* pt_regs->ax */
1414 pushq %r8 /* pt_regs->r8 */
1415 pushq %r9 /* pt_regs->r9 */
1416 pushq %r10 /* pt_regs->r10 */
1417 pushq %r11 /* pt_regs->r11 */
1418 pushq %rbx /* pt_regs->rbx */
1419 pushq %rbp /* pt_regs->rbp */
1420 pushq %r12 /* pt_regs->r12 */
1421 pushq %r13 /* pt_regs->r13 */
1422 pushq %r14 /* pt_regs->r14 */
1423 pushq %r15 /* pt_regs->r15 */
1424 UNWIND_HINT_REGS
1425 ENCODE_FRAME_POINTER 1371 ENCODE_FRAME_POINTER
1426 1372
1427 /* 1373 /*
@@ -1631,7 +1577,8 @@ end_repeat_nmi:
1631 * frame to point back to repeat_nmi. 1577 * frame to point back to repeat_nmi.
1632 */ 1578 */
1633 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1579 pushq $-1 /* ORIG_RAX: no syscall to restart */
1634 ALLOC_PT_GPREGS_ON_STACK 1580 PUSH_AND_CLEAR_REGS
1581 ENCODE_FRAME_POINTER
1635 1582
1636 /* 1583 /*
1637 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1584 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
@@ -1655,8 +1602,7 @@ end_repeat_nmi:
1655nmi_swapgs: 1602nmi_swapgs:
1656 SWAPGS_UNSAFE_STACK 1603 SWAPGS_UNSAFE_STACK
1657nmi_restore: 1604nmi_restore:
1658 POP_EXTRA_REGS 1605 POP_REGS
1659 POP_C_REGS
1660 1606
1661 /* 1607 /*
1662 * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1608 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 98d5358e4041..fd65e016e413 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
85 pushq %rcx /* pt_regs->cx */ 85 pushq %rcx /* pt_regs->cx */
86 pushq $-ENOSYS /* pt_regs->ax */ 86 pushq $-ENOSYS /* pt_regs->ax */
87 pushq $0 /* pt_regs->r8 = 0 */ 87 pushq $0 /* pt_regs->r8 = 0 */
88 xorq %r8, %r8 /* nospec r8 */
88 pushq $0 /* pt_regs->r9 = 0 */ 89 pushq $0 /* pt_regs->r9 = 0 */
90 xorq %r9, %r9 /* nospec r9 */
89 pushq $0 /* pt_regs->r10 = 0 */ 91 pushq $0 /* pt_regs->r10 = 0 */
92 xorq %r10, %r10 /* nospec r10 */
90 pushq $0 /* pt_regs->r11 = 0 */ 93 pushq $0 /* pt_regs->r11 = 0 */
94 xorq %r11, %r11 /* nospec r11 */
91 pushq %rbx /* pt_regs->rbx */ 95 pushq %rbx /* pt_regs->rbx */
96 xorl %ebx, %ebx /* nospec rbx */
92 pushq %rbp /* pt_regs->rbp (will be overwritten) */ 97 pushq %rbp /* pt_regs->rbp (will be overwritten) */
98 xorl %ebp, %ebp /* nospec rbp */
93 pushq $0 /* pt_regs->r12 = 0 */ 99 pushq $0 /* pt_regs->r12 = 0 */
100 xorq %r12, %r12 /* nospec r12 */
94 pushq $0 /* pt_regs->r13 = 0 */ 101 pushq $0 /* pt_regs->r13 = 0 */
102 xorq %r13, %r13 /* nospec r13 */
95 pushq $0 /* pt_regs->r14 = 0 */ 103 pushq $0 /* pt_regs->r14 = 0 */
104 xorq %r14, %r14 /* nospec r14 */
96 pushq $0 /* pt_regs->r15 = 0 */ 105 pushq $0 /* pt_regs->r15 = 0 */
106 xorq %r15, %r15 /* nospec r15 */
97 cld 107 cld
98 108
99 /* 109 /*
@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
214 pushq %rbp /* pt_regs->cx (stashed in bp) */ 224 pushq %rbp /* pt_regs->cx (stashed in bp) */
215 pushq $-ENOSYS /* pt_regs->ax */ 225 pushq $-ENOSYS /* pt_regs->ax */
216 pushq $0 /* pt_regs->r8 = 0 */ 226 pushq $0 /* pt_regs->r8 = 0 */
227 xorq %r8, %r8 /* nospec r8 */
217 pushq $0 /* pt_regs->r9 = 0 */ 228 pushq $0 /* pt_regs->r9 = 0 */
229 xorq %r9, %r9 /* nospec r9 */
218 pushq $0 /* pt_regs->r10 = 0 */ 230 pushq $0 /* pt_regs->r10 = 0 */
231 xorq %r10, %r10 /* nospec r10 */
219 pushq $0 /* pt_regs->r11 = 0 */ 232 pushq $0 /* pt_regs->r11 = 0 */
233 xorq %r11, %r11 /* nospec r11 */
220 pushq %rbx /* pt_regs->rbx */ 234 pushq %rbx /* pt_regs->rbx */
235 xorl %ebx, %ebx /* nospec rbx */
221 pushq %rbp /* pt_regs->rbp (will be overwritten) */ 236 pushq %rbp /* pt_regs->rbp (will be overwritten) */
237 xorl %ebp, %ebp /* nospec rbp */
222 pushq $0 /* pt_regs->r12 = 0 */ 238 pushq $0 /* pt_regs->r12 = 0 */
239 xorq %r12, %r12 /* nospec r12 */
223 pushq $0 /* pt_regs->r13 = 0 */ 240 pushq $0 /* pt_regs->r13 = 0 */
241 xorq %r13, %r13 /* nospec r13 */
224 pushq $0 /* pt_regs->r14 = 0 */ 242 pushq $0 /* pt_regs->r14 = 0 */
243 xorq %r14, %r14 /* nospec r14 */
225 pushq $0 /* pt_regs->r15 = 0 */ 244 pushq $0 /* pt_regs->r15 = 0 */
245 xorq %r15, %r15 /* nospec r15 */
226 246
227 /* 247 /*
228 * User mode is traced as though IRQs are on, and SYSENTER 248 * User mode is traced as though IRQs are on, and SYSENTER
@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
338 pushq %rcx /* pt_regs->cx */ 358 pushq %rcx /* pt_regs->cx */
339 pushq $-ENOSYS /* pt_regs->ax */ 359 pushq $-ENOSYS /* pt_regs->ax */
340 pushq $0 /* pt_regs->r8 = 0 */ 360 pushq $0 /* pt_regs->r8 = 0 */
361 xorq %r8, %r8 /* nospec r8 */
341 pushq $0 /* pt_regs->r9 = 0 */ 362 pushq $0 /* pt_regs->r9 = 0 */
363 xorq %r9, %r9 /* nospec r9 */
342 pushq $0 /* pt_regs->r10 = 0 */ 364 pushq $0 /* pt_regs->r10 = 0 */
365 xorq %r10, %r10 /* nospec r10 */
343 pushq $0 /* pt_regs->r11 = 0 */ 366 pushq $0 /* pt_regs->r11 = 0 */
367 xorq %r11, %r11 /* nospec r11 */
344 pushq %rbx /* pt_regs->rbx */ 368 pushq %rbx /* pt_regs->rbx */
369 xorl %ebx, %ebx /* nospec rbx */
345 pushq %rbp /* pt_regs->rbp */ 370 pushq %rbp /* pt_regs->rbp */
371 xorl %ebp, %ebp /* nospec rbp */
346 pushq %r12 /* pt_regs->r12 */ 372 pushq %r12 /* pt_regs->r12 */
373 xorq %r12, %r12 /* nospec r12 */
347 pushq %r13 /* pt_regs->r13 */ 374 pushq %r13 /* pt_regs->r13 */
375 xorq %r13, %r13 /* nospec r13 */
348 pushq %r14 /* pt_regs->r14 */ 376 pushq %r14 /* pt_regs->r14 */
377 xorq %r14, %r14 /* nospec r14 */
349 pushq %r15 /* pt_regs->r15 */ 378 pushq %r15 /* pt_regs->r15 */
379 xorq %r15, %r15 /* nospec r15 */
350 cld 380 cld
351 381
352 /* 382 /*
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 731153a4681e..56457cb73448 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
3559 break; 3559 break;
3560 3560
3561 case INTEL_FAM6_SANDYBRIDGE_X: 3561 case INTEL_FAM6_SANDYBRIDGE_X:
3562 switch (cpu_data(cpu).x86_mask) { 3562 switch (cpu_data(cpu).x86_stepping) {
3563 case 6: rev = 0x618; break; 3563 case 6: rev = 0x618; break;
3564 case 7: rev = 0x70c; break; 3564 case 7: rev = 0x70c; break;
3565 } 3565 }
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ae64d0b69729..cf372b90557e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
1186 * on PMU interrupt 1186 * on PMU interrupt
1187 */ 1187 */
1188 if (boot_cpu_data.x86_model == 28 1188 if (boot_cpu_data.x86_model == 28
1189 && boot_cpu_data.x86_mask < 10) { 1189 && boot_cpu_data.x86_stepping < 10) {
1190 pr_cont("LBR disabled due to erratum"); 1190 pr_cont("LBR disabled due to erratum");
1191 return; 1191 return;
1192 } 1192 }
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index a5604c352930..408879b0c0d4 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
234 234
235static __init void p6_pmu_rdpmc_quirk(void) 235static __init void p6_pmu_rdpmc_quirk(void)
236{ 236{
237 if (boot_cpu_data.x86_mask < 9) { 237 if (boot_cpu_data.x86_stepping < 9) {
238 /* 238 /*
239 * PPro erratum 26; fixed in stepping 9 and above. 239 * PPro erratum 26; fixed in stepping 9 and above.
240 */ 240 */
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 44f5d79d5105..11881726ed37 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
94 if (boot_cpu_data.x86 == 0x0F && 94 if (boot_cpu_data.x86 == 0x0F &&
95 boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 95 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
96 boot_cpu_data.x86_model <= 0x05 && 96 boot_cpu_data.x86_model <= 0x05 &&
97 boot_cpu_data.x86_mask < 0x0A) 97 boot_cpu_data.x86_stepping < 0x0A)
98 return 1; 98 return 1;
99 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) 99 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
100 return 1; 100 return 1;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 30d406146016..e1259f043ae9 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
40 40
41 asm ("cmp %1,%2; sbb %0,%0;" 41 asm ("cmp %1,%2; sbb %0,%0;"
42 :"=r" (mask) 42 :"=r" (mask)
43 :"r"(size),"r" (index) 43 :"g"(size),"r" (index)
44 :"cc"); 44 :"cc");
45 return mask; 45 return mask;
46} 46}
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 34d99af43994..6804d6642767 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -5,23 +5,20 @@
5#include <linux/stringify.h> 5#include <linux/stringify.h>
6 6
7/* 7/*
8 * Since some emulators terminate on UD2, we cannot use it for WARN. 8 * Despite that some emulators terminate on UD2, we use it for WARN().
9 * Since various instruction decoders disagree on the length of UD1,
10 * we cannot use it either. So use UD0 for WARN.
11 * 9 *
12 * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas 10 * Since various instruction decoders/specs disagree on the encoding of
13 * our kernel decoder thinks it takes a ModRM byte, which seems consistent 11 * UD0/UD1.
14 * with various things like the Intel SDM instruction encoding rules)
15 */ 12 */
16 13
17#define ASM_UD0 ".byte 0x0f, 0xff" 14#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
18#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ 15#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
19#define ASM_UD2 ".byte 0x0f, 0x0b" 16#define ASM_UD2 ".byte 0x0f, 0x0b"
20 17
21#define INSN_UD0 0xff0f 18#define INSN_UD0 0xff0f
22#define INSN_UD2 0x0b0f 19#define INSN_UD2 0x0b0f
23 20
24#define LEN_UD0 2 21#define LEN_UD2 2
25 22
26#ifdef CONFIG_GENERIC_BUG 23#ifdef CONFIG_GENERIC_BUG
27 24
@@ -77,7 +74,11 @@ do { \
77 unreachable(); \ 74 unreachable(); \
78} while (0) 75} while (0)
79 76
80#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) 77#define __WARN_FLAGS(flags) \
78do { \
79 _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
80 annotate_reachable(); \
81} while (0)
81 82
82#include <asm-generic/bug.h> 83#include <asm-generic/bug.h>
83 84
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 70eddb3922ff..736771c9822e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
148 */ 148 */
149static __always_inline __pure bool _static_cpu_has(u16 bit) 149static __always_inline __pure bool _static_cpu_has(u16 bit)
150{ 150{
151 asm_volatile_goto("1: jmp 6f\n" 151 asm_volatile_goto("1: jmp 6f\n"
152 "2:\n" 152 "2:\n"
153 ".skip -(((5f-4f) - (2b-1b)) > 0) * " 153 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
154 "((5f-4f) - (2b-1b)),0x90\n" 154 "((5f-4f) - (2b-1b)),0x90\n"
155 "3:\n" 155 "3:\n"
156 ".section .altinstructions,\"a\"\n" 156 ".section .altinstructions,\"a\"\n"
157 " .long 1b - .\n" /* src offset */ 157 " .long 1b - .\n" /* src offset */
158 " .long 4f - .\n" /* repl offset */ 158 " .long 4f - .\n" /* repl offset */
159 " .word %P1\n" /* always replace */ 159 " .word %P[always]\n" /* always replace */
160 " .byte 3b - 1b\n" /* src len */ 160 " .byte 3b - 1b\n" /* src len */
161 " .byte 5f - 4f\n" /* repl len */ 161 " .byte 5f - 4f\n" /* repl len */
162 " .byte 3b - 2b\n" /* pad len */ 162 " .byte 3b - 2b\n" /* pad len */
163 ".previous\n" 163 ".previous\n"
164 ".section .altinstr_replacement,\"ax\"\n" 164 ".section .altinstr_replacement,\"ax\"\n"
165 "4: jmp %l[t_no]\n" 165 "4: jmp %l[t_no]\n"
166 "5:\n" 166 "5:\n"
167 ".previous\n" 167 ".previous\n"
168 ".section .altinstructions,\"a\"\n" 168 ".section .altinstructions,\"a\"\n"
169 " .long 1b - .\n" /* src offset */ 169 " .long 1b - .\n" /* src offset */
170 " .long 0\n" /* no replacement */ 170 " .long 0\n" /* no replacement */
171 " .word %P0\n" /* feature bit */ 171 " .word %P[feature]\n" /* feature bit */
172 " .byte 3b - 1b\n" /* src len */ 172 " .byte 3b - 1b\n" /* src len */
173 " .byte 0\n" /* repl len */ 173 " .byte 0\n" /* repl len */
174 " .byte 0\n" /* pad len */ 174 " .byte 0\n" /* pad len */
175 ".previous\n" 175 ".previous\n"
176 ".section .altinstr_aux,\"ax\"\n" 176 ".section .altinstr_aux,\"ax\"\n"
177 "6:\n" 177 "6:\n"
178 " testb %[bitnum],%[cap_byte]\n" 178 " testb %[bitnum],%[cap_byte]\n"
179 " jnz %l[t_yes]\n" 179 " jnz %l[t_yes]\n"
180 " jmp %l[t_no]\n" 180 " jmp %l[t_no]\n"
181 ".previous\n" 181 ".previous\n"
182 : : "i" (bit), "i" (X86_FEATURE_ALWAYS), 182 : : [feature] "i" (bit),
183 [bitnum] "i" (1 << (bit & 7)), 183 [always] "i" (X86_FEATURE_ALWAYS),
184 [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) 184 [bitnum] "i" (1 << (bit & 7)),
185 : : t_yes, t_no); 185 [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
186 t_yes: 186 : : t_yes, t_no);
187 return true; 187t_yes:
188 t_no: 188 return true;
189 return false; 189t_no:
190 return false;
190} 191}
191 192
192#define static_cpu_has(bit) \ 193#define static_cpu_has(bit) \
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 4d57894635f2..81a1be326571 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -6,6 +6,7 @@
6#include <asm/alternative.h> 6#include <asm/alternative.h>
7#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
8#include <asm/cpufeatures.h> 8#include <asm/cpufeatures.h>
9#include <asm/msr-index.h>
9 10
10#ifdef __ASSEMBLY__ 11#ifdef __ASSEMBLY__
11 12
@@ -164,11 +165,53 @@ static inline void vmexit_fill_RSB(void)
164 165
165static inline void indirect_branch_prediction_barrier(void) 166static inline void indirect_branch_prediction_barrier(void)
166{ 167{
167 alternative_input("", 168 asm volatile(ALTERNATIVE("",
168 "call __ibp_barrier", 169 "movl %[msr], %%ecx\n\t"
169 X86_FEATURE_USE_IBPB, 170 "movl %[val], %%eax\n\t"
170 ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory")); 171 "movl $0, %%edx\n\t"
172 "wrmsr",
173 X86_FEATURE_USE_IBPB)
174 : : [msr] "i" (MSR_IA32_PRED_CMD),
175 [val] "i" (PRED_CMD_IBPB)
176 : "eax", "ecx", "edx", "memory");
171} 177}
172 178
173#endif /* __ASSEMBLY__ */ 179#endif /* __ASSEMBLY__ */
180
181/*
182 * Below is used in the eBPF JIT compiler and emits the byte sequence
183 * for the following assembly:
184 *
185 * With retpolines configured:
186 *
187 * callq do_rop
188 * spec_trap:
189 * pause
190 * lfence
191 * jmp spec_trap
192 * do_rop:
193 * mov %rax,(%rsp)
194 * retq
195 *
196 * Without retpolines configured:
197 *
198 * jmp *%rax
199 */
200#ifdef CONFIG_RETPOLINE
201# define RETPOLINE_RAX_BPF_JIT_SIZE 17
202# define RETPOLINE_RAX_BPF_JIT() \
203 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
204 /* spec_trap: */ \
205 EMIT2(0xF3, 0x90); /* pause */ \
206 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
207 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
208 /* do_rop: */ \
209 EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
210 EMIT1(0xC3); /* retq */
211#else
212# define RETPOLINE_RAX_BPF_JIT_SIZE 2
213# define RETPOLINE_RAX_BPF_JIT() \
214 EMIT2(0xFF, 0xE0); /* jmp *%rax */
215#endif
216
174#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ 217#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 4baa6bceb232..d652a3808065 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
52 52
53void copy_page(void *to, void *from); 53void copy_page(void *to, void *from);
54 54
55#ifdef CONFIG_X86_MCE
56#define arch_unmap_kpfn arch_unmap_kpfn
57#endif
58
59#endif /* !__ASSEMBLY__ */ 55#endif /* !__ASSEMBLY__ */
60 56
61#ifdef CONFIG_X86_VSYSCALL_EMULATION 57#ifdef CONFIG_X86_VSYSCALL_EMULATION
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 892df375b615..554841fab717 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
297{ 297{
298 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); 298 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
299} 299}
300static inline void __flush_tlb_single(unsigned long addr) 300static inline void __flush_tlb_one_user(unsigned long addr)
301{ 301{
302 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 302 PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
303} 303}
304 304
305static inline void flush_tlb_others(const struct cpumask *cpumask, 305static inline void flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 6ec54d01972d..f624f1f10316 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -217,7 +217,7 @@ struct pv_mmu_ops {
217 /* TLB operations */ 217 /* TLB operations */
218 void (*flush_tlb_user)(void); 218 void (*flush_tlb_user)(void);
219 void (*flush_tlb_kernel)(void); 219 void (*flush_tlb_kernel)(void);
220 void (*flush_tlb_single)(unsigned long addr); 220 void (*flush_tlb_one_user)(unsigned long addr);
221 void (*flush_tlb_others)(const struct cpumask *cpus, 221 void (*flush_tlb_others)(const struct cpumask *cpus,
222 const struct flush_tlb_info *info); 222 const struct flush_tlb_info *info);
223 223
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index e67c0620aec2..e55466760ff8 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -61,7 +61,7 @@ void paging_init(void);
61#define kpte_clear_flush(ptep, vaddr) \ 61#define kpte_clear_flush(ptep, vaddr) \
62do { \ 62do { \
63 pte_clear(&init_mm, (vaddr), (ptep)); \ 63 pte_clear(&init_mm, (vaddr), (ptep)); \
64 __flush_tlb_one((vaddr)); \ 64 __flush_tlb_one_kernel((vaddr)); \
65} while (0) 65} while (0)
66 66
67#endif /* !__ASSEMBLY__ */ 67#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 793bae7e7ce3..1bd9ed87606f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
91 __u8 x86; /* CPU family */ 91 __u8 x86; /* CPU family */
92 __u8 x86_vendor; /* CPU vendor */ 92 __u8 x86_vendor; /* CPU vendor */
93 __u8 x86_model; 93 __u8 x86_model;
94 __u8 x86_mask; 94 __u8 x86_stepping;
95#ifdef CONFIG_X86_64 95#ifdef CONFIG_X86_64
96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
97 int x86_tlbsize; 97 int x86_tlbsize;
@@ -109,7 +109,7 @@ struct cpuinfo_x86 {
109 char x86_vendor_id[16]; 109 char x86_vendor_id[16];
110 char x86_model_id[64]; 110 char x86_model_id[64];
111 /* in KB - valid for CPUS which support this call: */ 111 /* in KB - valid for CPUS which support this call: */
112 int x86_cache_size; 112 unsigned int x86_cache_size;
113 int x86_cache_alignment; /* In bytes */ 113 int x86_cache_alignment; /* In bytes */
114 /* Cache QoS architectural values: */ 114 /* Cache QoS architectural values: */
115 int x86_cache_max_rmid; /* max index */ 115 int x86_cache_max_rmid; /* max index */
@@ -977,7 +977,4 @@ bool xen_set_default_idle(void);
977 977
978void stop_this_cpu(void *dummy); 978void stop_this_cpu(void *dummy);
979void df_debug(struct pt_regs *regs, long error_code); 979void df_debug(struct pt_regs *regs, long error_code);
980
981void __ibp_barrier(void);
982
983#endif /* _ASM_X86_PROCESSOR_H */ 980#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 461f53d27708..a4189762b266 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
129void cpu_disable_common(void); 129void cpu_disable_common(void);
130void native_smp_prepare_boot_cpu(void); 130void native_smp_prepare_boot_cpu(void);
131void native_smp_prepare_cpus(unsigned int max_cpus); 131void native_smp_prepare_cpus(unsigned int max_cpus);
132void calculate_max_logical_packages(void);
132void native_smp_cpus_done(unsigned int max_cpus); 133void native_smp_cpus_done(unsigned int max_cpus);
133void common_cpu_up(unsigned int cpunum, struct task_struct *tidle); 134void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
134int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 135int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 2b8f18ca5874..84137c22fdfa 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
140#else 140#else
141#define __flush_tlb() __native_flush_tlb() 141#define __flush_tlb() __native_flush_tlb()
142#define __flush_tlb_global() __native_flush_tlb_global() 142#define __flush_tlb_global() __native_flush_tlb_global()
143#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) 143#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
144#endif 144#endif
145 145
146static inline bool tlb_defer_switch_to_init_mm(void) 146static inline bool tlb_defer_switch_to_init_mm(void)
@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
400/* 400/*
401 * flush one page in the user mapping 401 * flush one page in the user mapping
402 */ 402 */
403static inline void __native_flush_tlb_single(unsigned long addr) 403static inline void __native_flush_tlb_one_user(unsigned long addr)
404{ 404{
405 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 405 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
406 406
@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
437/* 437/*
438 * flush one page in the kernel mapping 438 * flush one page in the kernel mapping
439 */ 439 */
440static inline void __flush_tlb_one(unsigned long addr) 440static inline void __flush_tlb_one_kernel(unsigned long addr)
441{ 441{
442 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 442 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
443 __flush_tlb_single(addr); 443
444 /*
445 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
446 * paravirt equivalent. Even with PCID, this is sufficient: we only
447 * use PCID if we also use global PTEs for the kernel mapping, and
448 * INVLPG flushes global translations across all address spaces.
449 *
450 * If PTI is on, then the kernel is mapped with non-global PTEs, and
451 * __flush_tlb_one_user() will flush the given address for the current
452 * kernel address space and for its usermode counterpart, but it does
453 * not flush it for other address spaces.
454 */
455 __flush_tlb_one_user(addr);
444 456
445 if (!static_cpu_has(X86_FEATURE_PTI)) 457 if (!static_cpu_has(X86_FEATURE_PTI))
446 return; 458 return;
447 459
448 /* 460 /*
449 * __flush_tlb_single() will have cleared the TLB entry for this ASID, 461 * See above. We need to propagate the flush to all other address
450 * but since kernel space is replicated across all, we must also 462 * spaces. In principle, we only need to propagate it to kernelmode
451 * invalidate all others. 463 * address spaces, but the extra bookkeeping we would need is not
464 * worth it.
452 */ 465 */
453 invalidate_other_asid(); 466 invalidate_other_asid();
454} 467}
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 197c2e6c7376..099414345865 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -241,24 +241,24 @@
241#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 241#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
242 242
243struct hv_reenlightenment_control { 243struct hv_reenlightenment_control {
244 u64 vector:8; 244 __u64 vector:8;
245 u64 reserved1:8; 245 __u64 reserved1:8;
246 u64 enabled:1; 246 __u64 enabled:1;
247 u64 reserved2:15; 247 __u64 reserved2:15;
248 u64 target_vp:32; 248 __u64 target_vp:32;
249}; 249};
250 250
251#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 251#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
252#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 252#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
253 253
254struct hv_tsc_emulation_control { 254struct hv_tsc_emulation_control {
255 u64 enabled:1; 255 __u64 enabled:1;
256 u64 reserved:63; 256 __u64 reserved:63;
257}; 257};
258 258
259struct hv_tsc_emulation_status { 259struct hv_tsc_emulation_status {
260 u64 inprogress:1; 260 __u64 inprogress:1;
261 u64 reserved:63; 261 __u64 reserved:63;
262}; 262};
263 263
264#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 264#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 6db28f17ff28..c88e0b127810 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
235 if (boot_cpu_data.x86 == 0x10 && 235 if (boot_cpu_data.x86 == 0x10 &&
236 boot_cpu_data.x86_model >= 0x8 && 236 boot_cpu_data.x86_model >= 0x8 &&
237 (boot_cpu_data.x86_model > 0x9 || 237 (boot_cpu_data.x86_model > 0x9 ||
238 boot_cpu_data.x86_mask >= 0x1)) 238 boot_cpu_data.x86_stepping >= 0x1))
239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
240 240
241 if (boot_cpu_data.x86 == 0x15) 241 if (boot_cpu_data.x86 == 0x15)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 25ddf02598d2..b203af0855b5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
546 546
547static u32 hsx_deadline_rev(void) 547static u32 hsx_deadline_rev(void)
548{ 548{
549 switch (boot_cpu_data.x86_mask) { 549 switch (boot_cpu_data.x86_stepping) {
550 case 0x02: return 0x3a; /* EP */ 550 case 0x02: return 0x3a; /* EP */
551 case 0x04: return 0x0f; /* EX */ 551 case 0x04: return 0x0f; /* EX */
552 } 552 }
@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
556 556
557static u32 bdx_deadline_rev(void) 557static u32 bdx_deadline_rev(void)
558{ 558{
559 switch (boot_cpu_data.x86_mask) { 559 switch (boot_cpu_data.x86_stepping) {
560 case 0x02: return 0x00000011; 560 case 0x02: return 0x00000011;
561 case 0x03: return 0x0700000e; 561 case 0x03: return 0x0700000e;
562 case 0x04: return 0x0f00000c; 562 case 0x04: return 0x0f00000c;
@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
568 568
569static u32 skx_deadline_rev(void) 569static u32 skx_deadline_rev(void)
570{ 570{
571 switch (boot_cpu_data.x86_mask) { 571 switch (boot_cpu_data.x86_stepping) {
572 case 0x03: return 0x01000136; 572 case 0x03: return 0x01000136;
573 case 0x04: return 0x02000014; 573 case 0x04: return 0x02000014;
574 } 574 }
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 3cc471beb50b..bb6f7a2148d7 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
134{ 134{
135 struct apic_chip_data *apicd = apic_chip_data(irqd); 135 struct apic_chip_data *apicd = apic_chip_data(irqd);
136 struct irq_desc *desc = irq_data_to_desc(irqd); 136 struct irq_desc *desc = irq_data_to_desc(irqd);
137 bool managed = irqd_affinity_is_managed(irqd);
137 138
138 lockdep_assert_held(&vector_lock); 139 lockdep_assert_held(&vector_lock);
139 140
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, 141 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu); 142 apicd->cpu);
142 143
143 /* Setup the vector move, if required */ 144 /*
144 if (apicd->vector && cpu_online(apicd->cpu)) { 145 * If there is no vector associated or if the associated vector is
146 * the shutdown vector, which is associated to make PCI/MSI
147 * shutdown mode work, then there is nothing to release. Clear out
148 * prev_vector for this and the offlined target case.
149 */
150 apicd->prev_vector = 0;
151 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
152 goto setnew;
153 /*
154 * If the target CPU of the previous vector is online, then mark
155 * the vector as move in progress and store it for cleanup when the
156 * first interrupt on the new vector arrives. If the target CPU is
157 * offline then the regular release mechanism via the cleanup
158 * vector is not possible and the vector can be immediately freed
159 * in the underlying matrix allocator.
160 */
161 if (cpu_online(apicd->cpu)) {
145 apicd->move_in_progress = true; 162 apicd->move_in_progress = true;
146 apicd->prev_vector = apicd->vector; 163 apicd->prev_vector = apicd->vector;
147 apicd->prev_cpu = apicd->cpu; 164 apicd->prev_cpu = apicd->cpu;
148 } else { 165 } else {
149 apicd->prev_vector = 0; 166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
150 } 168 }
151 169
170setnew:
152 apicd->vector = newvec; 171 apicd->vector = newvec;
153 apicd->cpu = newcpu; 172 apicd->cpu = newcpu;
154 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); 173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 46b675aaf20b..f11910b44638 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
1176 1176
1177 uv_gre_table = gre; 1177 uv_gre_table = gre;
1178 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1178 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1179 unsigned long size = ((unsigned long)(gre->limit - lgre)
1180 << UV_GAM_RANGE_SHFT);
1181 int order = 0;
1182 char suffix[] = " KMGTPE";
1183
1184 while (size > 9999 && order < sizeof(suffix)) {
1185 size /= 1024;
1186 order++;
1187 }
1188
1179 if (!index) { 1189 if (!index) {
1180 pr_info("UV: GAM Range Table...\n"); 1190 pr_info("UV: GAM Range Table...\n");
1181 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); 1191 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
1182 } 1192 }
1183 pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", 1193 pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
1184 index++, 1194 index++,
1185 (unsigned long)lgre << UV_GAM_RANGE_SHFT, 1195 (unsigned long)lgre << UV_GAM_RANGE_SHFT,
1186 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, 1196 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
1187 ((unsigned long)(gre->limit - lgre)) >> 1197 size, suffix[order],
1188 (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
1189 gre->type, gre->nasid, gre->sockid, gre->pnode); 1198 gre->type, gre->nasid, gre->sockid, gre->pnode);
1190 1199
1191 lgre = gre->limit; 1200 lgre = gre->limit;
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fa1261eefa16..f91ba53e06c8 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -18,7 +18,7 @@ void foo(void)
18 OFFSET(CPUINFO_x86, cpuinfo_x86, x86); 18 OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
19 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); 19 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
20 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); 20 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
21 OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); 21 OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
22 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); 22 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
23 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); 23 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
24 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); 24 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5bddbdcbc4a3..f0e6456ca7d3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
119 return; 119 return;
120 } 120 }
121 121
122 if (c->x86_model == 6 && c->x86_mask == 1) { 122 if (c->x86_model == 6 && c->x86_stepping == 1) {
123 const int K6_BUG_LOOP = 1000000; 123 const int K6_BUG_LOOP = 1000000;
124 int n; 124 int n;
125 void (*f_vide)(void); 125 void (*f_vide)(void);
@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
149 149
150 /* K6 with old style WHCR */ 150 /* K6 with old style WHCR */
151 if (c->x86_model < 8 || 151 if (c->x86_model < 8 ||
152 (c->x86_model == 8 && c->x86_mask < 8)) { 152 (c->x86_model == 8 && c->x86_stepping < 8)) {
153 /* We can only write allocate on the low 508Mb */ 153 /* We can only write allocate on the low 508Mb */
154 if (mbytes > 508) 154 if (mbytes > 508)
155 mbytes = 508; 155 mbytes = 508;
@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
168 return; 168 return;
169 } 169 }
170 170
171 if ((c->x86_model == 8 && c->x86_mask > 7) || 171 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
172 c->x86_model == 9 || c->x86_model == 13) { 172 c->x86_model == 9 || c->x86_model == 13) {
173 /* The more serious chips .. */ 173 /* The more serious chips .. */
174 174
@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
221 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 221 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
222 * As per AMD technical note 27212 0.2 222 * As per AMD technical note 27212 0.2
223 */ 223 */
224 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 224 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
225 rdmsr(MSR_K7_CLK_CTL, l, h); 225 rdmsr(MSR_K7_CLK_CTL, l, h);
226 if ((l & 0xfff00000) != 0x20000000) { 226 if ((l & 0xfff00000) != 0x20000000) {
227 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 227 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
241 * but they are not certified as MP capable. 241 * but they are not certified as MP capable.
242 */ 242 */
243 /* Athlon 660/661 is valid. */ 243 /* Athlon 660/661 is valid. */
244 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 244 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
245 (c->x86_mask == 1))) 245 (c->x86_stepping == 1)))
246 return; 246 return;
247 247
248 /* Duron 670 is valid */ 248 /* Duron 670 is valid */
249 if ((c->x86_model == 7) && (c->x86_mask == 0)) 249 if ((c->x86_model == 7) && (c->x86_stepping == 0))
250 return; 250 return;
251 251
252 /* 252 /*
@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
256 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 256 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
257 * more. 257 * more.
258 */ 258 */
259 if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 259 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
260 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 260 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
261 (c->x86_model > 7)) 261 (c->x86_model > 7))
262 if (cpu_has(c, X86_FEATURE_MP)) 262 if (cpu_has(c, X86_FEATURE_MP))
263 return; 263 return;
@@ -628,7 +628,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
628 /* Set MTRR capability flag if appropriate */ 628 /* Set MTRR capability flag if appropriate */
629 if (c->x86 == 5) 629 if (c->x86 == 5)
630 if (c->x86_model == 13 || c->x86_model == 9 || 630 if (c->x86_model == 13 || c->x86_model == 9 ||
631 (c->x86_model == 8 && c->x86_mask >= 8)) 631 (c->x86_model == 8 && c->x86_stepping >= 8))
632 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 632 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
633#endif 633#endif
634#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 634#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
@@ -795,7 +795,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
795 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects 795 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
796 * all up to and including B1. 796 * all up to and including B1.
797 */ 797 */
798 if (c->x86_model <= 1 && c->x86_mask <= 1) 798 if (c->x86_model <= 1 && c->x86_stepping <= 1)
799 set_cpu_cap(c, X86_FEATURE_CPB); 799 set_cpu_cap(c, X86_FEATURE_CPB);
800} 800}
801 801
@@ -906,11 +906,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
906 /* AMD errata T13 (order #21922) */ 906 /* AMD errata T13 (order #21922) */
907 if ((c->x86 == 6)) { 907 if ((c->x86 == 6)) {
908 /* Duron Rev A0 */ 908 /* Duron Rev A0 */
909 if (c->x86_model == 3 && c->x86_mask == 0) 909 if (c->x86_model == 3 && c->x86_stepping == 0)
910 size = 64; 910 size = 64;
911 /* Tbird rev A1/A2 */ 911 /* Tbird rev A1/A2 */
912 if (c->x86_model == 4 && 912 if (c->x86_model == 4 &&
913 (c->x86_mask == 0 || c->x86_mask == 1)) 913 (c->x86_stepping == 0 || c->x86_stepping == 1))
914 size = 256; 914 size = 256;
915 } 915 }
916 return size; 916 return size;
@@ -1047,7 +1047,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1047 } 1047 }
1048 1048
1049 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 1049 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1050 ms = (cpu->x86_model << 4) | cpu->x86_mask; 1050 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1051 while ((range = *erratum++)) 1051 while ((range = *erratum++))
1052 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 1052 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1053 (ms >= AMD_MODEL_RANGE_START(range)) && 1053 (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 71949bf2de5a..d71c8b54b696 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
162 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 162 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
163 return SPECTRE_V2_CMD_NONE; 163 return SPECTRE_V2_CMD_NONE;
164 else { 164 else {
165 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, 165 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
166 sizeof(arg));
167 if (ret < 0) 166 if (ret < 0)
168 return SPECTRE_V2_CMD_AUTO; 167 return SPECTRE_V2_CMD_AUTO;
169 168
@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
175 } 174 }
176 175
177 if (i >= ARRAY_SIZE(mitigation_options)) { 176 if (i >= ARRAY_SIZE(mitigation_options)) {
178 pr_err("unknown option (%s). Switching to AUTO select\n", 177 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
179 mitigation_options[i].option);
180 return SPECTRE_V2_CMD_AUTO; 178 return SPECTRE_V2_CMD_AUTO;
181 } 179 }
182 } 180 }
@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
185 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || 183 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
186 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && 184 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
187 !IS_ENABLED(CONFIG_RETPOLINE)) { 185 !IS_ENABLED(CONFIG_RETPOLINE)) {
188 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 186 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
189 mitigation_options[i].option);
190 return SPECTRE_V2_CMD_AUTO; 187 return SPECTRE_V2_CMD_AUTO;
191 } 188 }
192 189
@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
256 goto retpoline_auto; 253 goto retpoline_auto;
257 break; 254 break;
258 } 255 }
259 pr_err("kernel not compiled with retpoline; no mitigation available!"); 256 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
260 return; 257 return;
261 258
262retpoline_auto: 259retpoline_auto:
263 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
264 retpoline_amd: 261 retpoline_amd:
265 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 262 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
266 pr_err("LFENCE not serializing. Switching to generic retpoline\n"); 263 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
267 goto retpoline_generic; 264 goto retpoline_generic;
268 } 265 }
269 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 266 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
@@ -281,7 +278,7 @@ retpoline_auto:
281 pr_info("%s\n", spectre_v2_strings[mode]); 278 pr_info("%s\n", spectre_v2_strings[mode]);
282 279
283 /* 280 /*
284 * If neither SMEP or KPTI are available, there is a risk of 281 * If neither SMEP nor PTI are available, there is a risk of
285 * hitting userspace addresses in the RSB after a context switch 282 * hitting userspace addresses in the RSB after a context switch
286 * from a shallow call stack to a deeper one. To prevent this fill 283 * from a shallow call stack to a deeper one. To prevent this fill
287 * the entire RSB, even when using IBRS. 284 * the entire RSB, even when using IBRS.
@@ -295,21 +292,20 @@ retpoline_auto:
295 if ((!boot_cpu_has(X86_FEATURE_PTI) && 292 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
296 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { 293 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
297 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 294 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
298 pr_info("Filling RSB on context switch\n"); 295 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
299 } 296 }
300 297
301 /* Initialize Indirect Branch Prediction Barrier if supported */ 298 /* Initialize Indirect Branch Prediction Barrier if supported */
302 if (boot_cpu_has(X86_FEATURE_IBPB)) { 299 if (boot_cpu_has(X86_FEATURE_IBPB)) {
303 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 300 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
304 pr_info("Enabling Indirect Branch Prediction Barrier\n"); 301 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
305 } 302 }
306} 303}
307 304
308#undef pr_fmt 305#undef pr_fmt
309 306
310#ifdef CONFIG_SYSFS 307#ifdef CONFIG_SYSFS
311ssize_t cpu_show_meltdown(struct device *dev, 308ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
312 struct device_attribute *attr, char *buf)
313{ 309{
314 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 310 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
315 return sprintf(buf, "Not affected\n"); 311 return sprintf(buf, "Not affected\n");
@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
318 return sprintf(buf, "Vulnerable\n"); 314 return sprintf(buf, "Vulnerable\n");
319} 315}
320 316
321ssize_t cpu_show_spectre_v1(struct device *dev, 317ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
322 struct device_attribute *attr, char *buf)
323{ 318{
324 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 319 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
325 return sprintf(buf, "Not affected\n"); 320 return sprintf(buf, "Not affected\n");
326 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 321 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
327} 322}
328 323
329ssize_t cpu_show_spectre_v2(struct device *dev, 324ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
330 struct device_attribute *attr, char *buf)
331{ 325{
332 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 326 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
333 return sprintf(buf, "Not affected\n"); 327 return sprintf(buf, "Not affected\n");
@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
337 spectre_v2_module_string()); 331 spectre_v2_module_string());
338} 332}
339#endif 333#endif
340
341void __ibp_barrier(void)
342{
343 __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
344}
345EXPORT_SYMBOL_GPL(__ibp_barrier);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index c578cd29c2d2..e5ec0f11c0de 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -140,7 +140,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
140 clear_cpu_cap(c, X86_FEATURE_TSC); 140 clear_cpu_cap(c, X86_FEATURE_TSC);
141 break; 141 break;
142 case 8: 142 case 8:
143 switch (c->x86_mask) { 143 switch (c->x86_stepping) {
144 default: 144 default:
145 name = "2"; 145 name = "2";
146 break; 146 break;
@@ -215,7 +215,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
215 * - Note, it seems this may only be in engineering samples. 215 * - Note, it seems this may only be in engineering samples.
216 */ 216 */
217 if ((c->x86 == 6) && (c->x86_model == 9) && 217 if ((c->x86 == 6) && (c->x86_model == 9) &&
218 (c->x86_mask == 1) && (size == 65)) 218 (c->x86_stepping == 1) && (size == 65))
219 size -= 1; 219 size -= 1;
220 return size; 220 return size;
221} 221}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d63f4b5706e4..824aee0117bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
731 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 731 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
732 c->x86 = x86_family(tfms); 732 c->x86 = x86_family(tfms);
733 c->x86_model = x86_model(tfms); 733 c->x86_model = x86_model(tfms);
734 c->x86_mask = x86_stepping(tfms); 734 c->x86_stepping = x86_stepping(tfms);
735 735
736 if (cap0 & (1<<19)) { 736 if (cap0 & (1<<19)) {
737 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 737 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1184 int i; 1184 int i;
1185 1185
1186 c->loops_per_jiffy = loops_per_jiffy; 1186 c->loops_per_jiffy = loops_per_jiffy;
1187 c->x86_cache_size = -1; 1187 c->x86_cache_size = 0;
1188 c->x86_vendor = X86_VENDOR_UNKNOWN; 1188 c->x86_vendor = X86_VENDOR_UNKNOWN;
1189 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 1189 c->x86_model = c->x86_stepping = 0; /* So far unknown... */
1190 c->x86_vendor_id[0] = '\0'; /* Unset */ 1190 c->x86_vendor_id[0] = '\0'; /* Unset */
1191 c->x86_model_id[0] = '\0'; /* Unset */ 1191 c->x86_model_id[0] = '\0'; /* Unset */
1192 c->x86_max_cores = 1; 1192 c->x86_max_cores = 1;
@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
1378 1378
1379 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1379 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1380 1380
1381 if (c->x86_mask || c->cpuid_level >= 0) 1381 if (c->x86_stepping || c->cpuid_level >= 0)
1382 pr_cont(", stepping: 0x%x)\n", c->x86_mask); 1382 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1383 else 1383 else
1384 pr_cont(")\n"); 1384 pr_cont(")\n");
1385} 1385}
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 6b4bb335641f..8949b7ae6d92 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
215 215
216 /* common case step number/rev -- exceptions handled below */ 216 /* common case step number/rev -- exceptions handled below */
217 c->x86_model = (dir1 >> 4) + 1; 217 c->x86_model = (dir1 >> 4) + 1;
218 c->x86_mask = dir1 & 0xf; 218 c->x86_stepping = dir1 & 0xf;
219 219
220 /* Now cook; the original recipe is by Channing Corn, from Cyrix. 220 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
221 * We do the same thing for each generation: we work out 221 * We do the same thing for each generation: we work out
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 319bf989fad1..d19e903214b4 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -116,14 +116,13 @@ struct sku_microcode {
116 u32 microcode; 116 u32 microcode;
117}; 117};
118static const struct sku_microcode spectre_bad_microcodes[] = { 118static const struct sku_microcode spectre_bad_microcodes[] = {
119 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, 119 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
120 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, 120 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
121 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, 121 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
122 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, 122 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, 123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, 124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, 125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
126 { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
127 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, 126 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
128 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, 127 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
129 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, 128 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
136 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, 135 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
137 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, 136 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
138 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, 137 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
139 /* Updated in the 20180108 release; blacklist until we know otherwise */
140 { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
141 /* Observed in the wild */ 138 /* Observed in the wild */
142 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, 139 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
143 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, 140 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
149 146
150 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
151 if (c->x86_model == spectre_bad_microcodes[i].model && 148 if (c->x86_model == spectre_bad_microcodes[i].model &&
152 c->x86_mask == spectre_bad_microcodes[i].stepping) 149 c->x86_stepping == spectre_bad_microcodes[i].stepping)
153 return (c->microcode <= spectre_bad_microcodes[i].microcode); 150 return (c->microcode <= spectre_bad_microcodes[i].microcode);
154 } 151 }
155 return false; 152 return false;
@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
196 * need the microcode to have already been loaded... so if it is 193 * need the microcode to have already been loaded... so if it is
197 * not, recommend a BIOS update and disable large pages. 194 * not, recommend a BIOS update and disable large pages.
198 */ 195 */
199 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && 196 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
200 c->microcode < 0x20e) { 197 c->microcode < 0x20e) {
201 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 198 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
202 clear_cpu_cap(c, X86_FEATURE_PSE); 199 clear_cpu_cap(c, X86_FEATURE_PSE);
@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
212 209
213 /* CPUID workaround for 0F33/0F34 CPU */ 210 /* CPUID workaround for 0F33/0F34 CPU */
214 if (c->x86 == 0xF && c->x86_model == 0x3 211 if (c->x86 == 0xF && c->x86_model == 0x3
215 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 212 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
216 c->x86_phys_bits = 36; 213 c->x86_phys_bits = 36;
217 214
218 /* 215 /*
@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
310 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 307 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
311 boot_cpu_data.x86 == 6 && 308 boot_cpu_data.x86 == 6 &&
312 boot_cpu_data.x86_model == 1 && 309 boot_cpu_data.x86_model == 1 &&
313 boot_cpu_data.x86_mask < 8) { 310 boot_cpu_data.x86_stepping < 8) {
314 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 311 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
315 return 1; 312 return 1;
316 } 313 }
@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
327 * Mask B, Pentium, but not Pentium MMX 324 * Mask B, Pentium, but not Pentium MMX
328 */ 325 */
329 if (c->x86 == 5 && 326 if (c->x86 == 5 &&
330 c->x86_mask >= 1 && c->x86_mask <= 4 && 327 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
331 c->x86_model <= 3) { 328 c->x86_model <= 3) {
332 /* 329 /*
333 * Remember we have B step Pentia with bugs 330 * Remember we have B step Pentia with bugs
@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
370 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 367 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
371 * model 3 mask 3 368 * model 3 mask 3
372 */ 369 */
373 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 370 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
374 clear_cpu_cap(c, X86_FEATURE_SEP); 371 clear_cpu_cap(c, X86_FEATURE_SEP);
375 372
376 /* 373 /*
@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
388 * P4 Xeon erratum 037 workaround. 385 * P4 Xeon erratum 037 workaround.
389 * Hardware prefetcher may cause stale data to be loaded into the cache. 386 * Hardware prefetcher may cause stale data to be loaded into the cache.
390 */ 387 */
391 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 388 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
392 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 389 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
393 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { 390 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
394 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 391 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
403 * Specification Update"). 400 * Specification Update").
404 */ 401 */
405 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && 402 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
406 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 403 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
407 set_cpu_bug(c, X86_BUG_11AP); 404 set_cpu_bug(c, X86_BUG_11AP);
408 405
409 406
@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
650 case 6: 647 case 6:
651 if (l2 == 128) 648 if (l2 == 128)
652 p = "Celeron (Mendocino)"; 649 p = "Celeron (Mendocino)";
653 else if (c->x86_mask == 0 || c->x86_mask == 5) 650 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
654 p = "Celeron-A"; 651 p = "Celeron-A";
655 break; 652 break;
656 653
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 410629f10ad3..589b948e6e01 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -819,7 +819,7 @@ static __init void rdt_quirks(void)
819 cache_alloc_hsw_probe(); 819 cache_alloc_hsw_probe();
820 break; 820 break;
821 case INTEL_FAM6_SKYLAKE_X: 821 case INTEL_FAM6_SKYLAKE_X:
822 if (boot_cpu_data.x86_mask <= 4) 822 if (boot_cpu_data.x86_stepping <= 4)
823 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); 823 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
824 } 824 }
825} 825}
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index bdab7d2f51af..fca759d272a1 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
1804 goto out_common_fail; 1804 goto out_common_fail;
1805 } 1805 }
1806 closid = ret; 1806 closid = ret;
1807 ret = 0;
1807 1808
1808 rdtgrp->closid = closid; 1809 rdtgrp->closid = closid;
1809 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 1810 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index aa0d5df9dc60..e956eb267061 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
115 115
116extern struct mca_config mca_cfg; 116extern struct mca_config mca_cfg;
117 117
118#ifndef CONFIG_X86_64
119/*
120 * On 32-bit systems it would be difficult to safely unmap a poison page
121 * from the kernel 1:1 map because there are no non-canonical addresses that
122 * we can use to refer to the address without risking a speculative access.
123 * However, this isn't much of an issue because:
124 * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
125 * are only mapped into the kernel as needed
126 * 2) Few people would run a 32-bit kernel on a machine that supports
127 * recoverable errors because they have too much memory to boot 32-bit.
128 */
129static inline void mce_unmap_kpfn(unsigned long pfn) {}
130#define mce_unmap_kpfn mce_unmap_kpfn
131#endif
132
118#endif /* __X86_MCE_INTERNAL_H__ */ 133#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3a8e88a611eb..8ff94d1e2dce 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
105 105
106static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); 106static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
107 107
108#ifndef mce_unmap_kpfn
109static void mce_unmap_kpfn(unsigned long pfn);
110#endif
111
108/* 112/*
109 * CPU/chipset specific EDAC code can register a notifier call here to print 113 * CPU/chipset specific EDAC code can register a notifier call here to print
110 * MCE errors in a human-readable form. 114 * MCE errors in a human-readable form.
@@ -234,7 +238,7 @@ static void __print_mce(struct mce *m)
234 m->cs, m->ip); 238 m->cs, m->ip);
235 239
236 if (m->cs == __KERNEL_CS) 240 if (m->cs == __KERNEL_CS)
237 pr_cont("{%pS}", (void *)m->ip); 241 pr_cont("{%pS}", (void *)(unsigned long)m->ip);
238 pr_cont("\n"); 242 pr_cont("\n");
239 } 243 }
240 244
@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
590 594
591 if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { 595 if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
592 pfn = mce->addr >> PAGE_SHIFT; 596 pfn = mce->addr >> PAGE_SHIFT;
593 memory_failure(pfn, 0); 597 if (!memory_failure(pfn, 0))
598 mce_unmap_kpfn(pfn);
594 } 599 }
595 600
596 return NOTIFY_OK; 601 return NOTIFY_OK;
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
1057 ret = memory_failure(m->addr >> PAGE_SHIFT, flags); 1062 ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1058 if (ret) 1063 if (ret)
1059 pr_err("Memory error not recovered"); 1064 pr_err("Memory error not recovered");
1065 else
1066 mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
1060 return ret; 1067 return ret;
1061} 1068}
1062 1069
1063#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) 1070#ifndef mce_unmap_kpfn
1064 1071static void mce_unmap_kpfn(unsigned long pfn)
1065void arch_unmap_kpfn(unsigned long pfn)
1066{ 1072{
1067 unsigned long decoy_addr; 1073 unsigned long decoy_addr;
1068 1074
@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
1073 * We would like to just call: 1079 * We would like to just call:
1074 * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); 1080 * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
1075 * but doing that would radically increase the odds of a 1081 * but doing that would radically increase the odds of a
1076 * speculative access to the posion page because we'd have 1082 * speculative access to the poison page because we'd have
1077 * the virtual address of the kernel 1:1 mapping sitting 1083 * the virtual address of the kernel 1:1 mapping sitting
1078 * around in registers. 1084 * around in registers.
1079 * Instead we get tricky. We create a non-canonical address 1085 * Instead we get tricky. We create a non-canonical address
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
1098 1104
1099 if (set_memory_np(decoy_addr, 1)) 1105 if (set_memory_np(decoy_addr, 1))
1100 pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); 1106 pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
1101
1102} 1107}
1103#endif 1108#endif
1104 1109
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f7c55b0e753a..a15db2b4e0d6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
921 */ 921 */
922 if (c->x86 == 6 && 922 if (c->x86 == 6 &&
923 c->x86_model == INTEL_FAM6_BROADWELL_X && 923 c->x86_model == INTEL_FAM6_BROADWELL_X &&
924 c->x86_mask == 0x01 && 924 c->x86_stepping == 0x01 &&
925 llc_size_per_core > 2621440 && 925 llc_size_per_core > 2621440 &&
926 c->microcode < 0x0b000021) { 926 c->microcode < 0x0b000021) {
927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
944 return UCODE_NFOUND; 944 return UCODE_NFOUND;
945 945
946 sprintf(name, "intel-ucode/%02x-%02x-%02x", 946 sprintf(name, "intel-ucode/%02x-%02x-%02x",
947 c->x86, c->x86_model, c->x86_mask); 947 c->x86, c->x86_model, c->x86_stepping);
948 948
949 if (request_firmware_direct(&firmware, name, device)) { 949 if (request_firmware_direct(&firmware, name, device)) {
950 pr_debug("data file %s load failed\n", name); 950 pr_debug("data file %s load failed\n", name);
@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
982 982
983static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) 983static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
984{ 984{
985 u64 llc_size = c->x86_cache_size * 1024; 985 u64 llc_size = c->x86_cache_size * 1024ULL;
986 986
987 do_div(llc_size, c->x86_max_cores); 987 do_div(llc_size, c->x86_max_cores);
988 988
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fdc55215d44d..e12ee86906c6 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
859 */ 859 */
860 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 860 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
861 boot_cpu_data.x86_model == 1 && 861 boot_cpu_data.x86_model == 1 &&
862 boot_cpu_data.x86_mask <= 7) { 862 boot_cpu_data.x86_stepping <= 7) {
863 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 863 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
864 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 864 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
865 return -EINVAL; 865 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 40d5a8a75212..7468de429087 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
711 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 711 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
712 boot_cpu_data.x86 == 0xF && 712 boot_cpu_data.x86 == 0xF &&
713 boot_cpu_data.x86_model == 0x3 && 713 boot_cpu_data.x86_model == 0x3 &&
714 (boot_cpu_data.x86_mask == 0x3 || 714 (boot_cpu_data.x86_stepping == 0x3 ||
715 boot_cpu_data.x86_mask == 0x4)) 715 boot_cpu_data.x86_stepping == 0x4))
716 phys_addr = 36; 716 phys_addr = 36;
717 717
718 size_or_mask = SIZE_OR_MASK_BITS(phys_addr); 718 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e7ecedafa1c8..2c8522a39ed5 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
72 c->x86_model, 72 c->x86_model,
73 c->x86_model_id[0] ? c->x86_model_id : "unknown"); 73 c->x86_model_id[0] ? c->x86_model_id : "unknown");
74 74
75 if (c->x86_mask || c->cpuid_level >= 0) 75 if (c->x86_stepping || c->cpuid_level >= 0)
76 seq_printf(m, "stepping\t: %d\n", c->x86_mask); 76 seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
77 else 77 else
78 seq_puts(m, "stepping\t: unknown\n"); 78 seq_puts(m, "stepping\t: unknown\n");
79 if (c->microcode) 79 if (c->microcode)
@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
91 } 91 }
92 92
93 /* Cache size */ 93 /* Cache size */
94 if (c->x86_cache_size >= 0) 94 if (c->x86_cache_size)
95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 95 seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
96 96
97 show_cpuinfo_core(m, c, cpu); 97 show_cpuinfo_core(m, c, cpu);
98 show_cpuinfo_misc(m, c); 98 show_cpuinfo_misc(m, c);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c29020907886..b59e4fb40fd9 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -37,7 +37,7 @@
37#define X86 new_cpu_data+CPUINFO_x86 37#define X86 new_cpu_data+CPUINFO_x86
38#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 38#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
39#define X86_MODEL new_cpu_data+CPUINFO_x86_model 39#define X86_MODEL new_cpu_data+CPUINFO_x86_model
40#define X86_MASK new_cpu_data+CPUINFO_x86_mask 40#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
41#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 41#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
42#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 42#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
43#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 43#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
332 shrb $4,%al 332 shrb $4,%al
333 movb %al,X86_MODEL 333 movb %al,X86_MODEL
334 andb $0x0f,%cl # mask mask revision 334 andb $0x0f,%cl # mask mask revision
335 movb %cl,X86_MASK 335 movb %cl,X86_STEPPING
336 movl %edx,X86_CAPABILITY 336 movl %edx,X86_CAPABILITY
337 337
338.Lis486: 338.Lis486:
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 1f790cf9d38f..3b7427aa7d85 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
542 goto overflow; 542 goto overflow;
543 break; 543 break;
544 case R_X86_64_PC32: 544 case R_X86_64_PC32:
545 case R_X86_64_PLT32:
545 value -= (u64)address; 546 value -= (u64)address;
546 *(u32 *)location = value; 547 *(u32 *)location = value;
547 break; 548 break;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index da0c160e5589..f58336af095c 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
191 goto overflow; 191 goto overflow;
192 break; 192 break;
193 case R_X86_64_PC32: 193 case R_X86_64_PC32:
194 case R_X86_64_PLT32:
194 if (*(u32 *)loc != 0) 195 if (*(u32 *)loc != 0)
195 goto invalid_relocation; 196 goto invalid_relocation;
196 val -= (u64)loc; 197 val -= (u64)loc;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 27d0a1712663..f1c5eb99d445 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -410,7 +410,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
410 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 410 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
411 processor.cpuflag = CPU_ENABLED; 411 processor.cpuflag = CPU_ENABLED;
412 processor.cpufeature = (boot_cpu_data.x86 << 8) | 412 processor.cpufeature = (boot_cpu_data.x86 << 8) |
413 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 413 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
414 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 414 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
415 processor.reserved[0] = 0; 415 processor.reserved[0] = 0;
416 processor.reserved[1] = 0; 416 processor.reserved[1] = 0;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 041096bdef86..99dc79e76bdc 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
200 __native_flush_tlb_global(); 200 __native_flush_tlb_global();
201} 201}
202 202
203static void native_flush_tlb_single(unsigned long addr) 203static void native_flush_tlb_one_user(unsigned long addr)
204{ 204{
205 __native_flush_tlb_single(addr); 205 __native_flush_tlb_one_user(addr);
206} 206}
207 207
208struct static_key paravirt_steal_enabled; 208struct static_key paravirt_steal_enabled;
@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
401 401
402 .flush_tlb_user = native_flush_tlb, 402 .flush_tlb_user = native_flush_tlb,
403 .flush_tlb_kernel = native_flush_tlb_global, 403 .flush_tlb_kernel = native_flush_tlb_global,
404 .flush_tlb_single = native_flush_tlb_single, 404 .flush_tlb_one_user = native_flush_tlb_one_user,
405 .flush_tlb_others = native_flush_tlb_others, 405 .flush_tlb_others = native_flush_tlb_others,
406 406
407 .pgd_alloc = __paravirt_pgd_alloc, 407 .pgd_alloc = __paravirt_pgd_alloc,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6f27facbaa9b..ff99e2b6fc54 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void)
1281 cpu_set_state_online(me); 1281 cpu_set_state_online(me);
1282} 1282}
1283 1283
1284void __init native_smp_cpus_done(unsigned int max_cpus) 1284void __init calculate_max_logical_packages(void)
1285{ 1285{
1286 int ncpus; 1286 int ncpus;
1287 1287
1288 pr_debug("Boot done\n");
1289 /* 1288 /*
1290 * Today neither Intel nor AMD support heterogenous systems so 1289 * Today neither Intel nor AMD support heterogenous systems so
1291 * extrapolate the boot cpu's data to all packages. 1290 * extrapolate the boot cpu's data to all packages.
@@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1293 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); 1292 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1294 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); 1293 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
1295 pr_info("Max logical packages: %u\n", __max_logical_packages); 1294 pr_info("Max logical packages: %u\n", __max_logical_packages);
1295}
1296
1297void __init native_smp_cpus_done(unsigned int max_cpus)
1298{
1299 pr_debug("Boot done\n");
1300
1301 calculate_max_logical_packages();
1296 1302
1297 if (x86_has_numa_in_package) 1303 if (x86_has_numa_in_package)
1298 set_sched_topology(x86_numa_in_package_topology); 1304 set_sched_topology(x86_numa_in_package_topology);
@@ -1430,8 +1436,8 @@ static void remove_siblinginfo(int cpu)
1430 cpumask_clear(cpu_llc_shared_mask(cpu)); 1436 cpumask_clear(cpu_llc_shared_mask(cpu));
1431 cpumask_clear(topology_sibling_cpumask(cpu)); 1437 cpumask_clear(topology_sibling_cpumask(cpu));
1432 cpumask_clear(topology_core_cpumask(cpu)); 1438 cpumask_clear(topology_core_cpumask(cpu));
1433 c->phys_proc_id = 0;
1434 c->cpu_core_id = 0; 1439 c->cpu_core_id = 0;
1440 c->booted_cores = 0;
1435 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1441 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1436 recompute_smt_state(); 1442 recompute_smt_state();
1437} 1443}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 446c9ef8cfc3..3d9b2308e7fa 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
181 break; 181 break;
182 182
183 case BUG_TRAP_TYPE_WARN: 183 case BUG_TRAP_TYPE_WARN:
184 regs->ip += LEN_UD0; 184 regs->ip += LEN_UD2;
185 return 1; 185 return 1;
186 } 186 }
187 187
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8eca1d04aeb8..46ff304140c7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5080,7 +5080,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
5080typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); 5080typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5081 5081
5082/* The caller should hold mmu-lock before calling this function. */ 5082/* The caller should hold mmu-lock before calling this function. */
5083static bool 5083static __always_inline bool
5084slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, 5084slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5085 slot_level_handler fn, int start_level, int end_level, 5085 slot_level_handler fn, int start_level, int end_level,
5086 gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) 5086 gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
@@ -5110,7 +5110,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5110 return flush; 5110 return flush;
5111} 5111}
5112 5112
5113static bool 5113static __always_inline bool
5114slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 5114slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5115 slot_level_handler fn, int start_level, int end_level, 5115 slot_level_handler fn, int start_level, int end_level,
5116 bool lock_flush_tlb) 5116 bool lock_flush_tlb)
@@ -5121,7 +5121,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5121 lock_flush_tlb); 5121 lock_flush_tlb);
5122} 5122}
5123 5123
5124static bool 5124static __always_inline bool
5125slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 5125slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5126 slot_level_handler fn, bool lock_flush_tlb) 5126 slot_level_handler fn, bool lock_flush_tlb)
5127{ 5127{
@@ -5129,7 +5129,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5129 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 5129 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5130} 5130}
5131 5131
5132static bool 5132static __always_inline bool
5133slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 5133slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5134 slot_level_handler fn, bool lock_flush_tlb) 5134 slot_level_handler fn, bool lock_flush_tlb)
5135{ 5135{
@@ -5137,7 +5137,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5137 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 5137 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5138} 5138}
5139 5139
5140static bool 5140static __always_inline bool
5141slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, 5141slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5142 slot_level_handler fn, bool lock_flush_tlb) 5142 slot_level_handler fn, bool lock_flush_tlb)
5143{ 5143{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f427723dc7db..3dec126aa302 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10136,7 +10136,10 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
10136 (unsigned long)(vmcs12->posted_intr_desc_addr & 10136 (unsigned long)(vmcs12->posted_intr_desc_addr &
10137 (PAGE_SIZE - 1))); 10137 (PAGE_SIZE - 1)));
10138 } 10138 }
10139 if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 10139 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
10140 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
10141 CPU_BASED_USE_MSR_BITMAPS);
10142 else
10140 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 10143 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
10141 CPU_BASED_USE_MSR_BITMAPS); 10144 CPU_BASED_USE_MSR_BITMAPS);
10142} 10145}
@@ -10224,8 +10227,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
10224 * updated to reflect this when L1 (or its L2s) actually write to 10227 * updated to reflect this when L1 (or its L2s) actually write to
10225 * the MSR. 10228 * the MSR.
10226 */ 10229 */
10227 bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); 10230 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
10228 bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); 10231 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
10229 10232
10230 /* Nothing to do if the MSR bitmap is not in use. */ 10233 /* Nothing to do if the MSR bitmap is not in use. */
10231 if (!cpu_has_vmx_msr_bitmap() || 10234 if (!cpu_has_vmx_msr_bitmap() ||
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index d6f848d1211d..2dd1fe13a37b 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
18{ 18{
19 unsigned int fam, model; 19 unsigned int fam, model;
20 20
21 fam = x86_family(sig); 21 fam = x86_family(sig);
22 22
23 model = (sig >> 4) & 0xf; 23 model = (sig >> 4) & 0xf;
24 24
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index 7b881d03d0dd..3cdf06128d13 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -7,6 +7,7 @@ asmlinkage void just_return_func(void);
7 7
8asm( 8asm(
9 ".type just_return_func, @function\n" 9 ".type just_return_func, @function\n"
10 ".globl just_return_func\n"
10 "just_return_func:\n" 11 "just_return_func:\n"
11 " ret\n" 12 " ret\n"
12 ".size just_return_func, .-just_return_func\n" 13 ".size just_return_func, .-just_return_func\n"
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 1ab42c852069..8b72923f1d35 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
256 * It's enough to flush this one mapping. 256 * It's enough to flush this one mapping.
257 * (PGE mappings get flushed as well) 257 * (PGE mappings get flushed as well)
258 */ 258 */
259 __flush_tlb_one(vaddr); 259 __flush_tlb_one_kernel(vaddr);
260} 260}
261 261
262void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) 262void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
@@ -1193,8 +1193,8 @@ void __init mem_init(void)
1193 register_page_bootmem_info(); 1193 register_page_bootmem_info();
1194 1194
1195 /* Register memory areas for /proc/kcore */ 1195 /* Register memory areas for /proc/kcore */
1196 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, 1196 if (get_gate_vma(&init_mm))
1197 PAGE_SIZE, KCORE_OTHER); 1197 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1198 1198
1199 mem_init_print_info(NULL); 1199 mem_init_print_info(NULL);
1200} 1200}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c45b6ec5357b..e2db83bebc3b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
820 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 820 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
821 else 821 else
822 pte_clear(&init_mm, addr, pte); 822 pte_clear(&init_mm, addr, pte);
823 __flush_tlb_one(addr); 823 __flush_tlb_one_kernel(addr);
824} 824}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 58477ec3d66d..7c8686709636 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
168 return -1; 168 return -1;
169 } 169 }
170 170
171 __flush_tlb_one(f->addr); 171 __flush_tlb_one_kernel(f->addr);
172 return 0; 172 return 0;
173} 173}
174 174
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index c3c5274410a9..9bb7f0ab9fe6 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
63 * It's enough to flush this one mapping. 63 * It's enough to flush this one mapping.
64 * (PGE mappings get flushed as well) 64 * (PGE mappings get flushed as well)
65 */ 65 */
66 __flush_tlb_one(vaddr); 66 __flush_tlb_one_kernel(vaddr);
67} 67}
68 68
69unsigned long __FIXADDR_TOP = 0xfffff000; 69unsigned long __FIXADDR_TOP = 0xfffff000;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8dcc0607f805..7f1a51399674 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -498,7 +498,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
498 * flush that changes context.tlb_gen from 2 to 3. If they get 498 * flush that changes context.tlb_gen from 2 to 3. If they get
499 * processed on this CPU in reverse order, we'll see 499 * processed on this CPU in reverse order, we'll see
500 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. 500 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
501 * If we were to use __flush_tlb_single() and set local_tlb_gen to 501 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
502 * 3, we'd be break the invariant: we'd update local_tlb_gen above 502 * 3, we'd be break the invariant: we'd update local_tlb_gen above
503 * 1 without the full flush that's needed for tlb_gen 2. 503 * 1 without the full flush that's needed for tlb_gen 2.
504 * 504 *
@@ -519,7 +519,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
519 519
520 addr = f->start; 520 addr = f->start;
521 while (addr < f->end) { 521 while (addr < f->end) {
522 __flush_tlb_single(addr); 522 __flush_tlb_one_user(addr);
523 addr += PAGE_SIZE; 523 addr += PAGE_SIZE;
524 } 524 }
525 if (local) 525 if (local)
@@ -666,7 +666,7 @@ static void do_kernel_range_flush(void *info)
666 666
667 /* flush range by one by one 'invlpg' */ 667 /* flush range by one by one 'invlpg' */
668 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 668 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
669 __flush_tlb_one(addr); 669 __flush_tlb_one_kernel(addr);
670} 670}
671 671
672void flush_tlb_kernel_range(unsigned long start, unsigned long end) 672void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4923d92f918d..45e4eb5bcbb2 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -13,6 +13,7 @@
13#include <linux/if_vlan.h> 13#include <linux/if_vlan.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/set_memory.h> 15#include <asm/set_memory.h>
16#include <asm/nospec-branch.h>
16#include <linux/bpf.h> 17#include <linux/bpf.h>
17 18
18/* 19/*
@@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
290 EMIT2(0x89, 0xD2); /* mov edx, edx */ 291 EMIT2(0x89, 0xD2); /* mov edx, edx */
291 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ 292 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
292 offsetof(struct bpf_array, map.max_entries)); 293 offsetof(struct bpf_array, map.max_entries));
293#define OFFSET1 43 /* number of bytes to jump */ 294#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
294 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 295 EMIT2(X86_JBE, OFFSET1); /* jbe out */
295 label1 = cnt; 296 label1 = cnt;
296 297
@@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
299 */ 300 */
300 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ 301 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
301 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 302 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
302#define OFFSET2 32 303#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
303 EMIT2(X86_JA, OFFSET2); /* ja out */ 304 EMIT2(X86_JA, OFFSET2); /* ja out */
304 label2 = cnt; 305 label2 = cnt;
305 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 306 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
@@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
313 * goto out; 314 * goto out;
314 */ 315 */
315 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ 316 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
316#define OFFSET3 10 317#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
317 EMIT2(X86_JE, OFFSET3); /* je out */ 318 EMIT2(X86_JE, OFFSET3); /* je out */
318 label3 = cnt; 319 label3 = cnt;
319 320
@@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
326 * rdi == ctx (1st arg) 327 * rdi == ctx (1st arg)
327 * rax == prog->bpf_func + prologue_size 328 * rax == prog->bpf_func + prologue_size
328 */ 329 */
329 EMIT2(0xFF, 0xE0); /* jmp rax */ 330 RETPOLINE_RAX_BPF_JIT();
330 331
331 /* out: */ 332 /* out: */
332 BUILD_BUG_ON(cnt - label1 != OFFSET1); 333 BUILD_BUG_ON(cnt - label1 != OFFSET1);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 174c59774cc9..a7a7677265b6 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -460,7 +460,7 @@ static int nmi_setup(void)
460 goto fail; 460 goto fail;
461 461
462 for_each_possible_cpu(cpu) { 462 for_each_possible_cpu(cpu) {
463 if (!cpu) 463 if (!IS_ENABLED(CONFIG_SMP) || !cpu)
464 continue; 464 continue;
465 465
466 memcpy(per_cpu(cpu_msrs, cpu).counters, 466 memcpy(per_cpu(cpu_msrs, cpu).counters,
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index c2e9285d1bf1..db77e087adaf 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
299 local_flush_tlb(); 299 local_flush_tlb();
300 stat->d_alltlb++; 300 stat->d_alltlb++;
301 } else { 301 } else {
302 __flush_tlb_single(msg->address); 302 __flush_tlb_one_user(msg->address);
303 stat->d_onetlb++; 303 stat->d_onetlb++;
304 } 304 }
305 stat->d_requestee++; 305 stat->d_requestee++;
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 5d73c443e778..220e97841e49 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
770 break; 770 break;
771 771
772 case R_X86_64_PC32: 772 case R_X86_64_PC32:
773 case R_X86_64_PLT32:
773 /* 774 /*
774 * PC relative relocations don't need to be adjusted unless 775 * PC relative relocations don't need to be adjusted unless
775 * referencing a percpu symbol. 776 * referencing a percpu symbol.
777 *
778 * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
776 */ 779 */
777 if (is_percpu_sym(sym, symname)) 780 if (is_percpu_sym(sym, symname))
778 add_reloc(&relocs32neg, offset); 781 add_reloc(&relocs32neg, offset);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index d85076223a69..aae88fec9941 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
1300 preempt_enable(); 1300 preempt_enable();
1301} 1301}
1302 1302
1303static void xen_flush_tlb_single(unsigned long addr) 1303static void xen_flush_tlb_one_user(unsigned long addr)
1304{ 1304{
1305 struct mmuext_op *op; 1305 struct mmuext_op *op;
1306 struct multicall_space mcs; 1306 struct multicall_space mcs;
1307 1307
1308 trace_xen_mmu_flush_tlb_single(addr); 1308 trace_xen_mmu_flush_tlb_one_user(addr);
1309 1309
1310 preempt_disable(); 1310 preempt_disable();
1311 1311
@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2370 2370
2371 .flush_tlb_user = xen_flush_tlb, 2371 .flush_tlb_user = xen_flush_tlb,
2372 .flush_tlb_kernel = xen_flush_tlb, 2372 .flush_tlb_kernel = xen_flush_tlb,
2373 .flush_tlb_single = xen_flush_tlb_single, 2373 .flush_tlb_one_user = xen_flush_tlb_one_user,
2374 .flush_tlb_others = xen_flush_tlb_others, 2374 .flush_tlb_others = xen_flush_tlb_others,
2375 2375
2376 .pgd_alloc = xen_pgd_alloc, 2376 .pgd_alloc = xen_pgd_alloc,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 77c959cf81e7..7a43b2ae19f1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
122 122
123 if (xen_hvm_domain()) 123 if (xen_hvm_domain())
124 native_smp_cpus_done(max_cpus); 124 native_smp_cpus_done(max_cpus);
125 else
126 calculate_max_logical_packages();
125 127
126 if (xen_have_vcpu_info_placement) 128 if (xen_have_vcpu_info_placement)
127 return; 129 return;
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 623720a11143..732631ce250f 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/dma-contiguous.h> 18#include <linux/dma-contiguous.h>
19#include <linux/dma-direct.h>
19#include <linux/gfp.h> 20#include <linux/gfp.h>
20#include <linux/highmem.h> 21#include <linux/highmem.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
123 unsigned long attrs) 124 unsigned long attrs)
124{ 125{
125 unsigned long ret; 126 unsigned long ret;
126 unsigned long uncached = 0; 127 unsigned long uncached;
127 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 128 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
128 struct page *page = NULL; 129 struct page *page = NULL;
129 130
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
144 if (!page) 145 if (!page)
145 return NULL; 146 return NULL;
146 147
147 ret = (unsigned long)page_address(page); 148 *handle = phys_to_dma(dev, page_to_phys(page));
148 149
149 /* We currently don't support coherent memory outside KSEG */ 150#ifdef CONFIG_MMU
151 if (PageHighMem(page)) {
152 void *p;
150 153
154 p = dma_common_contiguous_remap(page, size, VM_MAP,
155 pgprot_noncached(PAGE_KERNEL),
156 __builtin_return_address(0));
157 if (!p) {
158 if (!dma_release_from_contiguous(dev, page, count))
159 __free_pages(page, get_order(size));
160 }
161 return p;
162 }
163#endif
164 ret = (unsigned long)page_address(page);
151 BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || 165 BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
152 ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 166 ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
153 167
154 uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; 168 uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
155 *handle = virt_to_bus((void *)ret);
156 __invalidate_dcache_range(ret, size); 169 __invalidate_dcache_range(ret, size);
157 170
158 return (void *)uncached; 171 return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
161static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, 174static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
162 dma_addr_t dma_handle, unsigned long attrs) 175 dma_addr_t dma_handle, unsigned long attrs)
163{ 176{
164 unsigned long addr = (unsigned long)vaddr +
165 XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
166 struct page *page = virt_to_page(addr);
167 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 177 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
168 178 unsigned long addr = (unsigned long)vaddr;
169 BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || 179 struct page *page;
170 addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); 180
181 if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
182 addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
183 addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
184 page = virt_to_page(addr);
185 } else {
186#ifdef CONFIG_MMU
187 dma_common_free_remap(vaddr, size, VM_MAP);
188#endif
189 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
190 }
171 191
172 if (!dma_release_from_contiguous(dev, page, count)) 192 if (!dma_release_from_contiguous(dev, page, count))
173 __free_pages(page, get_order(size)); 193 __free_pages(page, get_order(size));
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index d776ec0d7b22..34aead7dcb48 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -79,19 +79,75 @@ void __init zones_init(void)
79 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); 79 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
80} 80}
81 81
82#ifdef CONFIG_HIGHMEM
83static void __init free_area_high(unsigned long pfn, unsigned long end)
84{
85 for (; pfn < end; pfn++)
86 free_highmem_page(pfn_to_page(pfn));
87}
88
89static void __init free_highpages(void)
90{
91 unsigned long max_low = max_low_pfn;
92 struct memblock_region *mem, *res;
93
94 reset_all_zones_managed_pages();
95 /* set highmem page free */
96 for_each_memblock(memory, mem) {
97 unsigned long start = memblock_region_memory_base_pfn(mem);
98 unsigned long end = memblock_region_memory_end_pfn(mem);
99
100 /* Ignore complete lowmem entries */
101 if (end <= max_low)
102 continue;
103
104 if (memblock_is_nomap(mem))
105 continue;
106
107 /* Truncate partial highmem entries */
108 if (start < max_low)
109 start = max_low;
110
111 /* Find and exclude any reserved regions */
112 for_each_memblock(reserved, res) {
113 unsigned long res_start, res_end;
114
115 res_start = memblock_region_reserved_base_pfn(res);
116 res_end = memblock_region_reserved_end_pfn(res);
117
118 if (res_end < start)
119 continue;
120 if (res_start < start)
121 res_start = start;
122 if (res_start > end)
123 res_start = end;
124 if (res_end > end)
125 res_end = end;
126 if (res_start != start)
127 free_area_high(start, res_start);
128 start = res_end;
129 if (start == end)
130 break;
131 }
132
133 /* And now free anything which remains */
134 if (start < end)
135 free_area_high(start, end);
136 }
137}
138#else
139static void __init free_highpages(void)
140{
141}
142#endif
143
82/* 144/*
83 * Initialize memory pages. 145 * Initialize memory pages.
84 */ 146 */
85 147
86void __init mem_init(void) 148void __init mem_init(void)
87{ 149{
88#ifdef CONFIG_HIGHMEM 150 free_highpages();
89 unsigned long tmp;
90
91 reset_all_zones_managed_pages();
92 for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
93 free_highmem_page(pfn_to_page(tmp));
94#endif
95 151
96 max_mapnr = max_pfn - ARCH_PFN_OFFSET; 152 max_mapnr = max_pfn - ARCH_PFN_OFFSET;
97 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); 153 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index df93102e2149..357492712b0e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3164,6 +3164,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3164 cpu_relax(); 3164 cpu_relax();
3165 } 3165 }
3166 3166
3167 __set_current_state(TASK_RUNNING);
3167 return false; 3168 return false;
3168} 3169}
3169 3170
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 9ed51d0c6b1d..e4929eec547f 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -490,7 +490,7 @@ static int opal_discovery0_end(struct opal_dev *dev)
490 490
491 if (!found_com_id) { 491 if (!found_com_id) {
492 pr_debug("Could not find OPAL comid for device. Returning early\n"); 492 pr_debug("Could not find OPAL comid for device. Returning early\n");
493 return -EOPNOTSUPP;; 493 return -EOPNOTSUPP;
494 } 494 }
495 495
496 dev->comid = comid; 496 dev->comid = comid;
diff --git a/certs/blacklist_nohashes.c b/certs/blacklist_nohashes.c
index 73fd99098ad7..753b703ef0ef 100644
--- a/certs/blacklist_nohashes.c
+++ b/certs/blacklist_nohashes.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include "blacklist.h" 2#include "blacklist.h"
3 3
4const char __initdata *const blacklist_hashes[] = { 4const char __initconst *const blacklist_hashes[] = {
5 NULL 5 NULL
6}; 6};
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 1f4e25f10049..598906b1e28d 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
106 pr_devel("sinfo %u: Direct signer is key %x\n", 106 pr_devel("sinfo %u: Direct signer is key %x\n",
107 sinfo->index, key_serial(key)); 107 sinfo->index, key_serial(key));
108 x509 = NULL; 108 x509 = NULL;
109 sig = sinfo->sig;
109 goto matched; 110 goto matched;
110 } 111 }
111 if (PTR_ERR(key) != -ENOKEY) 112 if (PTR_ERR(key) != -ENOKEY)
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 39e6de0c2761..97c77f66b20d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
270 sinfo->index); 270 sinfo->index);
271 return 0; 271 return 0;
272 } 272 }
273 ret = public_key_verify_signature(p->pub, p->sig); 273 ret = public_key_verify_signature(p->pub, x509->sig);
274 if (ret < 0) 274 if (ret < 0)
275 return ret; 275 return ret;
276 x509->signer = p; 276 x509->signer = p;
@@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
366 * 366 *
367 * (*) -EBADMSG if some part of the message was invalid, or: 367 * (*) -EBADMSG if some part of the message was invalid, or:
368 * 368 *
369 * (*) 0 if no signature chains were found to be blacklisted or to contain 369 * (*) 0 if a signature chain passed verification, or:
370 * unsupported crypto, or:
371 * 370 *
372 * (*) -EKEYREJECTED if a blacklisted key was encountered, or: 371 * (*) -EKEYREJECTED if a blacklisted key was encountered, or:
373 * 372 *
@@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
423 422
424 for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { 423 for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
425 ret = pkcs7_verify_one(pkcs7, sinfo); 424 ret = pkcs7_verify_one(pkcs7, sinfo);
426 if (sinfo->blacklisted && actual_ret == -ENOPKG) 425 if (sinfo->blacklisted) {
427 actual_ret = -EKEYREJECTED; 426 if (actual_ret == -ENOPKG)
427 actual_ret = -EKEYREJECTED;
428 continue;
429 }
428 if (ret < 0) { 430 if (ret < 0) {
429 if (ret == -ENOPKG) { 431 if (ret == -ENOPKG) {
430 sinfo->unsupported_crypto = true; 432 sinfo->unsupported_crypto = true;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index de996586762a..e929fe1e4106 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey,
79 79
80 BUG_ON(!pkey); 80 BUG_ON(!pkey);
81 BUG_ON(!sig); 81 BUG_ON(!sig);
82 BUG_ON(!sig->digest);
83 BUG_ON(!sig->s); 82 BUG_ON(!sig->s);
84 83
84 if (!sig->digest)
85 return -ENOPKG;
86
85 alg_name = sig->pkey_algo; 87 alg_name = sig->pkey_algo;
86 if (strcmp(sig->pkey_algo, "rsa") == 0) { 88 if (strcmp(sig->pkey_algo, "rsa") == 0) {
87 /* The data wangled by the RSA algorithm is typically padded 89 /* The data wangled by the RSA algorithm is typically padded
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 86fb68508952..7c93c7728454 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup);
67 * 67 *
68 * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a 68 * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
69 * matching parent certificate in the trusted list, -EKEYREJECTED if the 69 * matching parent certificate in the trusted list, -EKEYREJECTED if the
70 * signature check fails or the key is blacklisted and some other error if 70 * signature check fails or the key is blacklisted, -ENOPKG if the signature
71 * there is a matching certificate but the signature check cannot be performed. 71 * uses unsupported crypto, or some other error if there is a matching
72 * certificate but the signature check cannot be performed.
72 */ 73 */
73int restrict_link_by_signature(struct key *dest_keyring, 74int restrict_link_by_signature(struct key *dest_keyring,
74 const struct key_type *type, 75 const struct key_type *type,
@@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring,
88 return -EOPNOTSUPP; 89 return -EOPNOTSUPP;
89 90
90 sig = payload->data[asym_auth]; 91 sig = payload->data[asym_auth];
92 if (!sig)
93 return -ENOPKG;
91 if (!sig->auth_ids[0] && !sig->auth_ids[1]) 94 if (!sig->auth_ids[0] && !sig->auth_ids[1])
92 return -ENOKEY; 95 return -ENOKEY;
93 96
@@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring,
139 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
140 143
141 sig = payload->data[asym_auth]; 144 sig = payload->data[asym_auth];
145 if (!sig)
146 return -ENOPKG;
142 if (!sig->auth_ids[0] && !sig->auth_ids[1]) 147 if (!sig->auth_ids[0] && !sig->auth_ids[1])
143 return -ENOKEY; 148 return -ENOKEY;
144 149
@@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring,
222 * 227 *
223 * Returns 0 if the new certificate was accepted, -ENOKEY if we 228 * Returns 0 if the new certificate was accepted, -ENOKEY if we
224 * couldn't find a matching parent certificate in the trusted list, 229 * couldn't find a matching parent certificate in the trusted list,
225 * -EKEYREJECTED if the signature check fails, and some other error if 230 * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
226 * there is a matching certificate but the signature check cannot be 231 * unsupported crypto, or some other error if there is a matching certificate
227 * performed. 232 * but the signature check cannot be performed.
228 */ 233 */
229int restrict_link_by_key_or_keyring(struct key *dest_keyring, 234int restrict_link_by_key_or_keyring(struct key *dest_keyring,
230 const struct key_type *type, 235 const struct key_type *type,
@@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
249 * 254 *
250 * Returns 0 if the new certificate was accepted, -ENOKEY if we 255 * Returns 0 if the new certificate was accepted, -ENOKEY if we
251 * couldn't find a matching parent certificate in the trusted list, 256 * couldn't find a matching parent certificate in the trusted list,
252 * -EKEYREJECTED if the signature check fails, and some other error if 257 * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
253 * there is a matching certificate but the signature check cannot be 258 * unsupported crypto, or some other error if there is a matching certificate
254 * performed. 259 * but the signature check cannot be performed.
255 */ 260 */
256int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, 261int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
257 const struct key_type *type, 262 const struct key_type *type,
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index a965b9d80559..ded148783303 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -20,6 +20,20 @@
20#include <crypto/sha3.h> 20#include <crypto/sha3.h>
21#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22 22
23/*
24 * On some 32-bit architectures (mn10300 and h8300), GCC ends up using
25 * over 1 KB of stack if we inline the round calculation into the loop
26 * in keccakf(). On the other hand, on 64-bit architectures with plenty
27 * of [64-bit wide] general purpose registers, not inlining it severely
28 * hurts performance. So let's use 64-bitness as a heuristic to decide
29 * whether to inline or not.
30 */
31#ifdef CONFIG_64BIT
32#define SHA3_INLINE inline
33#else
34#define SHA3_INLINE noinline
35#endif
36
23#define KECCAK_ROUNDS 24 37#define KECCAK_ROUNDS 24
24 38
25static const u64 keccakf_rndc[24] = { 39static const u64 keccakf_rndc[24] = {
@@ -35,111 +49,115 @@ static const u64 keccakf_rndc[24] = {
35 49
36/* update the state with given number of rounds */ 50/* update the state with given number of rounds */
37 51
38static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) 52static SHA3_INLINE void keccakf_round(u64 st[25])
39{ 53{
40 u64 t[5], tt, bc[5]; 54 u64 t[5], tt, bc[5];
41 int round;
42 55
43 for (round = 0; round < KECCAK_ROUNDS; round++) { 56 /* Theta */
57 bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
58 bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
59 bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
60 bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
61 bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
62
63 t[0] = bc[4] ^ rol64(bc[1], 1);
64 t[1] = bc[0] ^ rol64(bc[2], 1);
65 t[2] = bc[1] ^ rol64(bc[3], 1);
66 t[3] = bc[2] ^ rol64(bc[4], 1);
67 t[4] = bc[3] ^ rol64(bc[0], 1);
68
69 st[0] ^= t[0];
70
71 /* Rho Pi */
72 tt = st[1];
73 st[ 1] = rol64(st[ 6] ^ t[1], 44);
74 st[ 6] = rol64(st[ 9] ^ t[4], 20);
75 st[ 9] = rol64(st[22] ^ t[2], 61);
76 st[22] = rol64(st[14] ^ t[4], 39);
77 st[14] = rol64(st[20] ^ t[0], 18);
78 st[20] = rol64(st[ 2] ^ t[2], 62);
79 st[ 2] = rol64(st[12] ^ t[2], 43);
80 st[12] = rol64(st[13] ^ t[3], 25);
81 st[13] = rol64(st[19] ^ t[4], 8);
82 st[19] = rol64(st[23] ^ t[3], 56);
83 st[23] = rol64(st[15] ^ t[0], 41);
84 st[15] = rol64(st[ 4] ^ t[4], 27);
85 st[ 4] = rol64(st[24] ^ t[4], 14);
86 st[24] = rol64(st[21] ^ t[1], 2);
87 st[21] = rol64(st[ 8] ^ t[3], 55);
88 st[ 8] = rol64(st[16] ^ t[1], 45);
89 st[16] = rol64(st[ 5] ^ t[0], 36);
90 st[ 5] = rol64(st[ 3] ^ t[3], 28);
91 st[ 3] = rol64(st[18] ^ t[3], 21);
92 st[18] = rol64(st[17] ^ t[2], 15);
93 st[17] = rol64(st[11] ^ t[1], 10);
94 st[11] = rol64(st[ 7] ^ t[2], 6);
95 st[ 7] = rol64(st[10] ^ t[0], 3);
96 st[10] = rol64( tt ^ t[1], 1);
97
98 /* Chi */
99 bc[ 0] = ~st[ 1] & st[ 2];
100 bc[ 1] = ~st[ 2] & st[ 3];
101 bc[ 2] = ~st[ 3] & st[ 4];
102 bc[ 3] = ~st[ 4] & st[ 0];
103 bc[ 4] = ~st[ 0] & st[ 1];
104 st[ 0] ^= bc[ 0];
105 st[ 1] ^= bc[ 1];
106 st[ 2] ^= bc[ 2];
107 st[ 3] ^= bc[ 3];
108 st[ 4] ^= bc[ 4];
109
110 bc[ 0] = ~st[ 6] & st[ 7];
111 bc[ 1] = ~st[ 7] & st[ 8];
112 bc[ 2] = ~st[ 8] & st[ 9];
113 bc[ 3] = ~st[ 9] & st[ 5];
114 bc[ 4] = ~st[ 5] & st[ 6];
115 st[ 5] ^= bc[ 0];
116 st[ 6] ^= bc[ 1];
117 st[ 7] ^= bc[ 2];
118 st[ 8] ^= bc[ 3];
119 st[ 9] ^= bc[ 4];
120
121 bc[ 0] = ~st[11] & st[12];
122 bc[ 1] = ~st[12] & st[13];
123 bc[ 2] = ~st[13] & st[14];
124 bc[ 3] = ~st[14] & st[10];
125 bc[ 4] = ~st[10] & st[11];
126 st[10] ^= bc[ 0];
127 st[11] ^= bc[ 1];
128 st[12] ^= bc[ 2];
129 st[13] ^= bc[ 3];
130 st[14] ^= bc[ 4];
131
132 bc[ 0] = ~st[16] & st[17];
133 bc[ 1] = ~st[17] & st[18];
134 bc[ 2] = ~st[18] & st[19];
135 bc[ 3] = ~st[19] & st[15];
136 bc[ 4] = ~st[15] & st[16];
137 st[15] ^= bc[ 0];
138 st[16] ^= bc[ 1];
139 st[17] ^= bc[ 2];
140 st[18] ^= bc[ 3];
141 st[19] ^= bc[ 4];
142
143 bc[ 0] = ~st[21] & st[22];
144 bc[ 1] = ~st[22] & st[23];
145 bc[ 2] = ~st[23] & st[24];
146 bc[ 3] = ~st[24] & st[20];
147 bc[ 4] = ~st[20] & st[21];
148 st[20] ^= bc[ 0];
149 st[21] ^= bc[ 1];
150 st[22] ^= bc[ 2];
151 st[23] ^= bc[ 3];
152 st[24] ^= bc[ 4];
153}
44 154
45 /* Theta */ 155static void __optimize("O3") keccakf(u64 st[25])
46 bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; 156{
47 bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; 157 int round;
48 bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
49 bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
50 bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
51
52 t[0] = bc[4] ^ rol64(bc[1], 1);
53 t[1] = bc[0] ^ rol64(bc[2], 1);
54 t[2] = bc[1] ^ rol64(bc[3], 1);
55 t[3] = bc[2] ^ rol64(bc[4], 1);
56 t[4] = bc[3] ^ rol64(bc[0], 1);
57
58 st[0] ^= t[0];
59
60 /* Rho Pi */
61 tt = st[1];
62 st[ 1] = rol64(st[ 6] ^ t[1], 44);
63 st[ 6] = rol64(st[ 9] ^ t[4], 20);
64 st[ 9] = rol64(st[22] ^ t[2], 61);
65 st[22] = rol64(st[14] ^ t[4], 39);
66 st[14] = rol64(st[20] ^ t[0], 18);
67 st[20] = rol64(st[ 2] ^ t[2], 62);
68 st[ 2] = rol64(st[12] ^ t[2], 43);
69 st[12] = rol64(st[13] ^ t[3], 25);
70 st[13] = rol64(st[19] ^ t[4], 8);
71 st[19] = rol64(st[23] ^ t[3], 56);
72 st[23] = rol64(st[15] ^ t[0], 41);
73 st[15] = rol64(st[ 4] ^ t[4], 27);
74 st[ 4] = rol64(st[24] ^ t[4], 14);
75 st[24] = rol64(st[21] ^ t[1], 2);
76 st[21] = rol64(st[ 8] ^ t[3], 55);
77 st[ 8] = rol64(st[16] ^ t[1], 45);
78 st[16] = rol64(st[ 5] ^ t[0], 36);
79 st[ 5] = rol64(st[ 3] ^ t[3], 28);
80 st[ 3] = rol64(st[18] ^ t[3], 21);
81 st[18] = rol64(st[17] ^ t[2], 15);
82 st[17] = rol64(st[11] ^ t[1], 10);
83 st[11] = rol64(st[ 7] ^ t[2], 6);
84 st[ 7] = rol64(st[10] ^ t[0], 3);
85 st[10] = rol64( tt ^ t[1], 1);
86
87 /* Chi */
88 bc[ 0] = ~st[ 1] & st[ 2];
89 bc[ 1] = ~st[ 2] & st[ 3];
90 bc[ 2] = ~st[ 3] & st[ 4];
91 bc[ 3] = ~st[ 4] & st[ 0];
92 bc[ 4] = ~st[ 0] & st[ 1];
93 st[ 0] ^= bc[ 0];
94 st[ 1] ^= bc[ 1];
95 st[ 2] ^= bc[ 2];
96 st[ 3] ^= bc[ 3];
97 st[ 4] ^= bc[ 4];
98
99 bc[ 0] = ~st[ 6] & st[ 7];
100 bc[ 1] = ~st[ 7] & st[ 8];
101 bc[ 2] = ~st[ 8] & st[ 9];
102 bc[ 3] = ~st[ 9] & st[ 5];
103 bc[ 4] = ~st[ 5] & st[ 6];
104 st[ 5] ^= bc[ 0];
105 st[ 6] ^= bc[ 1];
106 st[ 7] ^= bc[ 2];
107 st[ 8] ^= bc[ 3];
108 st[ 9] ^= bc[ 4];
109
110 bc[ 0] = ~st[11] & st[12];
111 bc[ 1] = ~st[12] & st[13];
112 bc[ 2] = ~st[13] & st[14];
113 bc[ 3] = ~st[14] & st[10];
114 bc[ 4] = ~st[10] & st[11];
115 st[10] ^= bc[ 0];
116 st[11] ^= bc[ 1];
117 st[12] ^= bc[ 2];
118 st[13] ^= bc[ 3];
119 st[14] ^= bc[ 4];
120
121 bc[ 0] = ~st[16] & st[17];
122 bc[ 1] = ~st[17] & st[18];
123 bc[ 2] = ~st[18] & st[19];
124 bc[ 3] = ~st[19] & st[15];
125 bc[ 4] = ~st[15] & st[16];
126 st[15] ^= bc[ 0];
127 st[16] ^= bc[ 1];
128 st[17] ^= bc[ 2];
129 st[18] ^= bc[ 3];
130 st[19] ^= bc[ 4];
131
132 bc[ 0] = ~st[21] & st[22];
133 bc[ 1] = ~st[22] & st[23];
134 bc[ 2] = ~st[23] & st[24];
135 bc[ 3] = ~st[24] & st[20];
136 bc[ 4] = ~st[20] & st[21];
137 st[20] ^= bc[ 0];
138 st[21] ^= bc[ 1];
139 st[22] ^= bc[ 2];
140 st[23] ^= bc[ 3];
141 st[24] ^= bc[ 4];
142 158
159 for (round = 0; round < KECCAK_ROUNDS; round++) {
160 keccakf_round(st);
143 /* Iota */ 161 /* Iota */
144 st[0] ^= keccakf_rndc[round]; 162 st[0] ^= keccakf_rndc[round];
145 } 163 }
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 676c9788e1c8..0dad0bd9327b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
660 * acpi_of_match_device - Match device object using the "compatible" property. 660 * acpi_of_match_device - Match device object using the "compatible" property.
661 * @adev: ACPI device object to match. 661 * @adev: ACPI device object to match.
662 * @of_match_table: List of device IDs to match against. 662 * @of_match_table: List of device IDs to match against.
663 * @of_id: OF ID if matched
663 * 664 *
664 * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of 665 * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
665 * identifiers and a _DSD object with the "compatible" property, use that 666 * identifiers and a _DSD object with the "compatible" property, use that
666 * property to match against the given list of identifiers. 667 * property to match against the given list of identifiers.
667 */ 668 */
668static bool acpi_of_match_device(struct acpi_device *adev, 669static bool acpi_of_match_device(struct acpi_device *adev,
669 const struct of_device_id *of_match_table) 670 const struct of_device_id *of_match_table,
671 const struct of_device_id **of_id)
670{ 672{
671 const union acpi_object *of_compatible, *obj; 673 const union acpi_object *of_compatible, *obj;
672 int i, nval; 674 int i, nval;
@@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,
690 const struct of_device_id *id; 692 const struct of_device_id *id;
691 693
692 for (id = of_match_table; id->compatible[0]; id++) 694 for (id = of_match_table; id->compatible[0]; id++)
693 if (!strcasecmp(obj->string.pointer, id->compatible)) 695 if (!strcasecmp(obj->string.pointer, id->compatible)) {
696 if (of_id)
697 *of_id = id;
694 return true; 698 return true;
699 }
695 } 700 }
696 701
697 return false; 702 return false;
@@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
762 return true; 767 return true;
763} 768}
764 769
765static const struct acpi_device_id *__acpi_match_device( 770static bool __acpi_match_device(struct acpi_device *device,
766 struct acpi_device *device, 771 const struct acpi_device_id *acpi_ids,
767 const struct acpi_device_id *ids, 772 const struct of_device_id *of_ids,
768 const struct of_device_id *of_ids) 773 const struct acpi_device_id **acpi_id,
774 const struct of_device_id **of_id)
769{ 775{
770 const struct acpi_device_id *id; 776 const struct acpi_device_id *id;
771 struct acpi_hardware_id *hwid; 777 struct acpi_hardware_id *hwid;
@@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(
775 * driver for it. 781 * driver for it.
776 */ 782 */
777 if (!device || !device->status.present) 783 if (!device || !device->status.present)
778 return NULL; 784 return false;
779 785
780 list_for_each_entry(hwid, &device->pnp.ids, list) { 786 list_for_each_entry(hwid, &device->pnp.ids, list) {
781 /* First, check the ACPI/PNP IDs provided by the caller. */ 787 /* First, check the ACPI/PNP IDs provided by the caller. */
782 for (id = ids; id->id[0] || id->cls; id++) { 788 if (acpi_ids) {
783 if (id->id[0] && !strcmp((char *) id->id, hwid->id)) 789 for (id = acpi_ids; id->id[0] || id->cls; id++) {
784 return id; 790 if (id->id[0] && !strcmp((char *)id->id, hwid->id))
785 else if (id->cls && __acpi_match_device_cls(id, hwid)) 791 goto out_acpi_match;
786 return id; 792 if (id->cls && __acpi_match_device_cls(id, hwid))
793 goto out_acpi_match;
794 }
787 } 795 }
788 796
789 /* 797 /*
790 * Next, check ACPI_DT_NAMESPACE_HID and try to match the 798 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
791 * "compatible" property if found. 799 * "compatible" property if found.
792 *
793 * The id returned by the below is not valid, but the only
794 * caller passing non-NULL of_ids here is only interested in
795 * whether or not the return value is NULL.
796 */ 800 */
797 if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) 801 if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
798 && acpi_of_match_device(device, of_ids)) 802 return acpi_of_match_device(device, of_ids, of_id);
799 return id;
800 } 803 }
801 return NULL; 804 return false;
805
806out_acpi_match:
807 if (acpi_id)
808 *acpi_id = id;
809 return true;
802} 810}
803 811
804/** 812/**
@@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(
815const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 823const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
816 const struct device *dev) 824 const struct device *dev)
817{ 825{
818 return __acpi_match_device(acpi_companion_match(dev), ids, NULL); 826 const struct acpi_device_id *id = NULL;
827
828 __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
829 return id;
819} 830}
820EXPORT_SYMBOL_GPL(acpi_match_device); 831EXPORT_SYMBOL_GPL(acpi_match_device);
821 832
822void *acpi_get_match_data(const struct device *dev) 833const void *acpi_device_get_match_data(const struct device *dev)
823{ 834{
824 const struct acpi_device_id *match; 835 const struct acpi_device_id *match;
825 836
826 if (!dev->driver)
827 return NULL;
828
829 if (!dev->driver->acpi_match_table)
830 return NULL;
831
832 match = acpi_match_device(dev->driver->acpi_match_table, dev); 837 match = acpi_match_device(dev->driver->acpi_match_table, dev);
833 if (!match) 838 if (!match)
834 return NULL; 839 return NULL;
835 840
836 return (void *)match->driver_data; 841 return (const void *)match->driver_data;
837} 842}
838EXPORT_SYMBOL_GPL(acpi_get_match_data); 843EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
839 844
840int acpi_match_device_ids(struct acpi_device *device, 845int acpi_match_device_ids(struct acpi_device *device,
841 const struct acpi_device_id *ids) 846 const struct acpi_device_id *ids)
842{ 847{
843 return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; 848 return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
844} 849}
845EXPORT_SYMBOL(acpi_match_device_ids); 850EXPORT_SYMBOL(acpi_match_device_ids);
846 851
@@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,
849{ 854{
850 if (!drv->acpi_match_table) 855 if (!drv->acpi_match_table)
851 return acpi_of_match_device(ACPI_COMPANION(dev), 856 return acpi_of_match_device(ACPI_COMPANION(dev),
852 drv->of_match_table); 857 drv->of_match_table,
858 NULL);
853 859
854 return !!__acpi_match_device(acpi_companion_match(dev), 860 return __acpi_match_device(acpi_companion_match(dev),
855 drv->acpi_match_table, drv->of_match_table); 861 drv->acpi_match_table, drv->of_match_table,
862 NULL, NULL);
856} 863}
857EXPORT_SYMBOL_GPL(acpi_driver_match_device); 864EXPORT_SYMBOL_GPL(acpi_driver_match_device);
858 865
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d9f38c645e4a..30a572956557 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
1927 ec->reference_count >= 1) 1927 ec->reference_count >= 1)
1928 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); 1928 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1929 1929
1930 if (acpi_sleep_no_ec_events())
1931 acpi_ec_enter_noirq(ec);
1932
1930 return 0; 1933 return 0;
1931} 1934}
1932 1935
@@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
1934{ 1937{
1935 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); 1938 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1936 1939
1940 if (acpi_sleep_no_ec_events())
1941 acpi_ec_leave_noirq(ec);
1942
1937 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && 1943 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1938 ec->reference_count >= 1) 1944 ec->reference_count >= 1)
1939 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); 1945 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 466d1503aba0..5815356ea6ad 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
1271 return 0; 1271 return 0;
1272} 1272}
1273 1273
1274static void * 1274static const void *
1275acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, 1275acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
1276 const struct device *dev) 1276 const struct device *dev)
1277{ 1277{
1278 return acpi_get_match_data(dev); 1278 return acpi_device_get_match_data(dev);
1279} 1279}
1280 1280
1281#define DECLARE_ACPI_FWNODE_OPS(ops) \ 1281#define DECLARE_ACPI_FWNODE_OPS(ops) \
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 89e97d21a89c..9d52743080a4 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
115 table->serial_port.access_width))) { 115 table->serial_port.access_width))) {
116 default: 116 default:
117 pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); 117 pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
118 /* fall through */
118 case 8: 119 case 8:
119 iotype = "mmio"; 120 iotype = "mmio";
120 break; 121 break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 15e3d3c2260d..764b63a5aade 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
1991 &target_thread->reply_error.work); 1991 &target_thread->reply_error.work);
1992 wake_up_interruptible(&target_thread->wait); 1992 wake_up_interruptible(&target_thread->wait);
1993 } else { 1993 } else {
1994 WARN(1, "Unexpected reply error: %u\n", 1994 /*
1995 target_thread->reply_error.cmd); 1995 * Cannot get here for normal operation, but
1996 * we can if multiple synchronous transactions
1997 * are sent without blocking for responses.
1998 * Just ignore the 2nd error in this case.
1999 */
2000 pr_warn("Unexpected reply error: %u\n",
2001 target_thread->reply_error.cmd);
1996 } 2002 }
1997 binder_inner_proc_unlock(target_thread->proc); 2003 binder_inner_proc_unlock(target_thread->proc);
1998 binder_thread_dec_tmpref(target_thread); 2004 binder_thread_dec_tmpref(target_thread);
@@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2193 int debug_id = buffer->debug_id; 2199 int debug_id = buffer->debug_id;
2194 2200
2195 binder_debug(BINDER_DEBUG_TRANSACTION, 2201 binder_debug(BINDER_DEBUG_TRANSACTION,
2196 "%d buffer release %d, size %zd-%zd, failed at %p\n", 2202 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2197 proc->pid, buffer->debug_id, 2203 proc->pid, buffer->debug_id,
2198 buffer->data_size, buffer->offsets_size, failed_at); 2204 buffer->data_size, buffer->offsets_size, failed_at);
2199 2205
@@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,
3705 } 3711 }
3706 } 3712 }
3707 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3713 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3708 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 3714 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3709 proc->pid, thread->pid, (u64)cookie, 3715 proc->pid, thread->pid, (u64)cookie,
3710 death); 3716 death);
3711 if (death == NULL) { 3717 if (death == NULL) {
@@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc,
4376 4382
4377 binder_inner_proc_unlock(thread->proc); 4383 binder_inner_proc_unlock(thread->proc);
4378 4384
4385 /*
4386 * This is needed to avoid races between wake_up_poll() above and
4387 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4388 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4389 * lock, so we can be sure it's done after calling synchronize_rcu().
4390 */
4391 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4392 synchronize_rcu();
4393
4379 if (send_reply) 4394 if (send_reply)
4380 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 4395 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4381 binder_release_work(proc, &thread->todo); 4396 binder_release_work(proc, &thread->todo);
@@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp,
4391 bool wait_for_proc_work; 4406 bool wait_for_proc_work;
4392 4407
4393 thread = binder_get_thread(proc); 4408 thread = binder_get_thread(proc);
4409 if (!thread)
4410 return POLLERR;
4394 4411
4395 binder_inner_proc_lock(thread->proc); 4412 binder_inner_proc_lock(thread->proc);
4396 thread->looper |= BINDER_LOOPER_STATE_POLL; 4413 thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
5034 spin_lock(&t->lock); 5051 spin_lock(&t->lock);
5035 to_proc = t->to_proc; 5052 to_proc = t->to_proc;
5036 seq_printf(m, 5053 seq_printf(m,
5037 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 5054 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5038 prefix, t->debug_id, t, 5055 prefix, t->debug_id, t,
5039 t->from ? t->from->proc->pid : 0, 5056 t->from ? t->from->proc->pid : 0,
5040 t->from ? t->from->pid : 0, 5057 t->from ? t->from->pid : 0,
@@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
5058 } 5075 }
5059 if (buffer->target_node) 5076 if (buffer->target_node)
5060 seq_printf(m, " node %d", buffer->target_node->debug_id); 5077 seq_printf(m, " node %d", buffer->target_node->debug_id);
5061 seq_printf(m, " size %zd:%zd data %p\n", 5078 seq_printf(m, " size %zd:%zd data %pK\n",
5062 buffer->data_size, buffer->offsets_size, 5079 buffer->data_size, buffer->offsets_size,
5063 buffer->data); 5080 buffer->data);
5064} 5081}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b2261f92f2f1..5847364f25d9 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)
310 dev_info(link->consumer, "Dropping the link to %s\n", 310 dev_info(link->consumer, "Dropping the link to %s\n",
311 dev_name(link->supplier)); 311 dev_name(link->supplier));
312 312
313 if (link->flags & DL_FLAG_PM_RUNTIME)
314 pm_runtime_drop_link(link->consumer);
315
313 list_del(&link->s_node); 316 list_del(&link->s_node);
314 list_del(&link->c_node); 317 list_del(&link->c_node);
315 device_link_free(link); 318 device_link_free(link);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index a8ac86e4d79e..6637fc319269 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
321 return; 321 return;
322 322
323 if (device_may_wakeup(wirq->dev)) { 323 if (device_may_wakeup(wirq->dev)) {
324 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) 324 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
325 !pm_runtime_status_suspended(wirq->dev))
325 enable_irq(wirq->irq); 326 enable_irq(wirq->irq);
326 327
327 enable_irq_wake(wirq->irq); 328 enable_irq_wake(wirq->irq);
@@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
343 if (device_may_wakeup(wirq->dev)) { 344 if (device_may_wakeup(wirq->dev)) {
344 disable_irq_wake(wirq->irq); 345 disable_irq_wake(wirq->irq);
345 346
346 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) 347 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
348 !pm_runtime_status_suspended(wirq->dev))
347 disable_irq_nosync(wirq->irq); 349 disable_irq_nosync(wirq->irq);
348 } 350 }
349} 351}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 302236281d83..8f205f6461ed 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
1410} 1410}
1411EXPORT_SYMBOL(fwnode_graph_parse_endpoint); 1411EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
1412 1412
1413void *device_get_match_data(struct device *dev) 1413const void *device_get_match_data(struct device *dev)
1414{ 1414{
1415 return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, 1415 return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
1416 dev);
1417} 1416}
1418EXPORT_SYMBOL_GPL(device_get_match_data); 1417EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index d1f5bb534e0e..6e9df558325b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
162 /* Enable secondary noise source on CPUs where it is present. */ 162 /* Enable secondary noise source on CPUs where it is present. */
163 163
164 /* Nehemiah stepping 8 and higher */ 164 /* Nehemiah stepping 8 and higher */
165 if ((c->x86_model == 9) && (c->x86_mask > 7)) 165 if ((c->x86_model == 9) && (c->x86_stepping > 7))
166 lo |= VIA_NOISESRC2; 166 lo |= VIA_NOISESRC2;
167 167
168 /* Esther */ 168 /* Esther */
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index a04808a21d4e..65e18c86d9b9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)
205 } else if (of_property_read_u32(node, "clock-frequency", 205 } else if (of_property_read_u32(node, "clock-frequency",
206 &gic_frequency)) { 206 &gic_frequency)) {
207 pr_err("GIC frequency not specified.\n"); 207 pr_err("GIC frequency not specified.\n");
208 return -EINVAL;; 208 return -EINVAL;
209 } 209 }
210 gic_timer_irq = irq_of_parse_and_map(node, 0); 210 gic_timer_irq = irq_of_parse_and_map(node, 0);
211 if (!gic_timer_irq) { 211 if (!gic_timer_irq) {
212 pr_err("GIC timer IRQ not specified.\n"); 212 pr_err("GIC timer IRQ not specified.\n");
213 return -EINVAL;; 213 return -EINVAL;
214 } 214 }
215 215
216 ret = __gic_clocksource_init(); 216 ret = __gic_clocksource_init();
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2a3fe83ec337..3b56ea3f52af 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)
334 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 334 timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
335 if (IS_ERR(timer_base)) { 335 if (IS_ERR(timer_base)) {
336 pr_err("Can't map registers\n"); 336 pr_err("Can't map registers\n");
337 return PTR_ERR(timer_base);; 337 return PTR_ERR(timer_base);
338 } 338 }
339 339
340 irq = irq_of_parse_and_map(node, 0); 340 irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a2ca0f79daf..d0c34df0529c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
629 if (c->x86_vendor == X86_VENDOR_INTEL) { 629 if (c->x86_vendor == X86_VENDOR_INTEL) {
630 if ((c->x86 == 15) && 630 if ((c->x86 == 15) &&
631 (c->x86_model == 6) && 631 (c->x86_model == 6) &&
632 (c->x86_mask == 8)) { 632 (c->x86_stepping == 8)) {
633 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); 633 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
634 return -ENODEV; 634 return -ENODEV;
635 } 635 }
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 942632a27b50..f730b6528c18 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
775 break; 775 break;
776 776
777 case 7: 777 case 7:
778 switch (c->x86_mask) { 778 switch (c->x86_stepping) {
779 case 0: 779 case 0:
780 longhaul_version = TYPE_LONGHAUL_V1; 780 longhaul_version = TYPE_LONGHAUL_V1;
781 cpu_model = CPU_SAMUEL2; 781 cpu_model = CPU_SAMUEL2;
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
787 break; 787 break;
788 case 1 ... 15: 788 case 1 ... 15:
789 longhaul_version = TYPE_LONGHAUL_V2; 789 longhaul_version = TYPE_LONGHAUL_V2;
790 if (c->x86_mask < 8) { 790 if (c->x86_stepping < 8) {
791 cpu_model = CPU_SAMUEL2; 791 cpu_model = CPU_SAMUEL2;
792 cpuname = "C3 'Samuel 2' [C5B]"; 792 cpuname = "C3 'Samuel 2' [C5B]";
793 } else { 793 } else {
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
814 numscales = 32; 814 numscales = 32;
815 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); 815 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
816 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); 816 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
817 switch (c->x86_mask) { 817 switch (c->x86_stepping) {
818 case 0 ... 1: 818 case 0 ... 1:
819 cpu_model = CPU_NEHEMIAH; 819 cpu_model = CPU_NEHEMIAH;
820 cpuname = "C3 'Nehemiah A' [C5XLOE]"; 820 cpuname = "C3 'Nehemiah A' [C5XLOE]";
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index fd77812313f3..a25741b1281b 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
168#endif 168#endif
169 169
170 /* Errata workaround */ 170 /* Errata workaround */
171 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; 171 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
172 switch (cpuid) { 172 switch (cpuid) {
173 case 0x0f07: 173 case 0x0f07:
174 case 0x0f0a: 174 case 0x0f0a:
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 80ac313e6c59..302e9ce793a0 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -131,7 +131,7 @@ static int check_powernow(void)
131 return 0; 131 return 0;
132 } 132 }
133 133
134 if ((c->x86_model == 6) && (c->x86_mask == 0)) { 134 if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
135 pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); 135 pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
136 have_a0 = 1; 136 have_a0 = 1;
137 } 137 }
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 41bc5397f4bb..4fa5adf16c70 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -37,7 +37,7 @@ struct cpu_id
37{ 37{
38 __u8 x86; /* CPU family */ 38 __u8 x86; /* CPU family */
39 __u8 x86_model; /* model */ 39 __u8 x86_model; /* model */
40 __u8 x86_mask; /* stepping */ 40 __u8 x86_stepping; /* stepping */
41}; 41};
42 42
43enum { 43enum {
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
277{ 277{
278 if ((c->x86 == x->x86) && 278 if ((c->x86 == x->x86) &&
279 (c->x86_model == x->x86_model) && 279 (c->x86_model == x->x86_model) &&
280 (c->x86_mask == x->x86_mask)) 280 (c->x86_stepping == x->x86_stepping))
281 return 1; 281 return 1;
282 return 0; 282 return 0;
283} 283}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 8085ec9000d1..e3a9962ee410 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
272 ebx = cpuid_ebx(0x00000001); 272 ebx = cpuid_ebx(0x00000001);
273 ebx &= 0x000000FF; 273 ebx &= 0x000000FF;
274 274
275 pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); 275 pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
276 276
277 switch (c->x86_mask) { 277 switch (c->x86_stepping) {
278 case 4: 278 case 4:
279 /* 279 /*
280 * B-stepping [M-P4-M] 280 * B-stepping [M-P4-M]
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
361 msr_lo, msr_hi); 361 msr_lo, msr_hi);
362 if ((msr_hi & (1<<18)) && 362 if ((msr_hi & (1<<18)) &&
363 (relaxed_check ? 1 : (msr_hi & (3<<24)))) { 363 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
364 if (c->x86_mask == 0x01) { 364 if (c->x86_stepping == 0x01) {
365 pr_debug("early PIII version\n"); 365 pr_debug("early PIII version\n");
366 return SPEEDSTEP_CPU_PIII_C_EARLY; 366 return SPEEDSTEP_CPU_PIII_C_EARLY;
367 } else 367 } else
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 75d280cb2dc0..e843cf410373 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
228 * without any error (HW optimizations for later 228 * without any error (HW optimizations for later
229 * CAAM eras), then try again. 229 * CAAM eras), then try again.
230 */ 230 */
231 if (ret)
232 break;
233
231 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; 234 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
232 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || 235 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
233 !(rdsta_val & (1 << sh_idx))) 236 !(rdsta_val & (1 << sh_idx))) {
234 ret = -EAGAIN; 237 ret = -EAGAIN;
235 if (ret)
236 break; 238 break;
239 }
240
237 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); 241 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
238 /* Clear the contents before recreating the descriptor */ 242 /* Clear the contents before recreating the descriptor */
239 memset(desc, 0x00, CAAM_CMD_SZ * 7); 243 memset(desc, 0x00, CAAM_CMD_SZ * 7);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 4b6642a25df5..1c6cbda56afe 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
512 512
513 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 513 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
514 514
515 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { 515 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
516 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; 516 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
517 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; 517 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
518 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); 518 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 188f44b7eb27..5d64c08b7f47 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1922 uint32_t aes_control; 1922 uint32_t aes_control;
1923 unsigned long flags; 1923 unsigned long flags;
1924 int err; 1924 int err;
1925 u8 *iv;
1925 1926
1926 aes_control = SSS_AES_KEY_CHANGE_MODE; 1927 aes_control = SSS_AES_KEY_CHANGE_MODE;
1927 if (mode & FLAGS_AES_DECRYPT) 1928 if (mode & FLAGS_AES_DECRYPT)
1928 aes_control |= SSS_AES_MODE_DECRYPT; 1929 aes_control |= SSS_AES_MODE_DECRYPT;
1929 1930
1930 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) 1931 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1931 aes_control |= SSS_AES_CHAIN_MODE_CBC; 1932 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1932 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) 1933 iv = req->info;
1934 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1933 aes_control |= SSS_AES_CHAIN_MODE_CTR; 1935 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1936 iv = req->info;
1937 } else {
1938 iv = NULL; /* AES_ECB */
1939 }
1934 1940
1935 if (dev->ctx->keylen == AES_KEYSIZE_192) 1941 if (dev->ctx->keylen == AES_KEYSIZE_192)
1936 aes_control |= SSS_AES_KEY_SIZE_192; 1942 aes_control |= SSS_AES_KEY_SIZE_192;
@@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1961 goto outdata_error; 1967 goto outdata_error;
1962 1968
1963 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 1969 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1964 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 1970 s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
1965 1971
1966 s5p_set_dma_indata(dev, dev->sg_src); 1972 s5p_set_dma_indata(dev, dev->sg_src);
1967 s5p_set_dma_outdata(dev, dev->sg_dst); 1973 s5p_set_dma_outdata(dev, dev->sg_dst);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
index 0d01d1624252..63d636424161 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
28 algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); 28 algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
29 ss = algt->ss; 29 ss = algt->ss;
30 30
31 spin_lock(&ss->slock); 31 spin_lock_bh(&ss->slock);
32 32
33 writel(mode, ss->base + SS_CTL); 33 writel(mode, ss->base + SS_CTL);
34 34
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
51 } 51 }
52 52
53 writel(0, ss->base + SS_CTL); 53 writel(0, ss->base + SS_CTL);
54 spin_unlock(&ss->slock); 54 spin_unlock_bh(&ss->slock);
55 return dlen; 55 return 0;
56} 56}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9c80e0cb1664..6882fa2f8bad 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1138 struct talitos_private *priv = dev_get_drvdata(dev); 1138 struct talitos_private *priv = dev_get_drvdata(dev);
1139 bool is_sec1 = has_ftr_sec1(priv); 1139 bool is_sec1 = has_ftr_sec1(priv);
1140 1140
1141 if (!src) {
1142 to_talitos_ptr(ptr, 0, 0, is_sec1);
1143 return 1;
1144 }
1141 if (sg_count == 1) { 1145 if (sg_count == 1) {
1142 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1146 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1143 return sg_count; 1147 return sg_count;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b16ec595fa7..329cb96f886f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3147 struct amd64_family_type *fam_type = NULL; 3147 struct amd64_family_type *fam_type = NULL;
3148 3148
3149 pvt->ext_model = boot_cpu_data.x86_model >> 4; 3149 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3150 pvt->stepping = boot_cpu_data.x86_mask; 3150 pvt->stepping = boot_cpu_data.x86_stepping;
3151 pvt->model = boot_cpu_data.x86_model; 3151 pvt->model = boot_cpu_data.x86_model;
3152 pvt->fam = boot_cpu_data.x86; 3152 pvt->fam = boot_cpu_data.x86;
3153 3153
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 0a44d43802fe..3ec4c715e240 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver 2 * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
3 * 3 *
4 * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
5 * Copyright (C) 2015 Intel Corporation 4 * Copyright (C) 2015 Intel Corporation
6 * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> 5 * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
7 * 6 *
@@ -98,15 +97,13 @@ struct axp288_extcon_info {
98 struct device *dev; 97 struct device *dev;
99 struct regmap *regmap; 98 struct regmap *regmap;
100 struct regmap_irq_chip_data *regmap_irqc; 99 struct regmap_irq_chip_data *regmap_irqc;
101 struct delayed_work det_work;
102 int irq[EXTCON_IRQ_END]; 100 int irq[EXTCON_IRQ_END];
103 struct extcon_dev *edev; 101 struct extcon_dev *edev;
104 unsigned int previous_cable; 102 unsigned int previous_cable;
105 bool first_detect_done;
106}; 103};
107 104
108/* Power up/down reason string array */ 105/* Power up/down reason string array */
109static char *axp288_pwr_up_down_info[] = { 106static const char * const axp288_pwr_up_down_info[] = {
110 "Last wake caused by user pressing the power button", 107 "Last wake caused by user pressing the power button",
111 "Last wake caused by a charger insertion", 108 "Last wake caused by a charger insertion",
112 "Last wake caused by a battery insertion", 109 "Last wake caused by a battery insertion",
@@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = {
124 */ 121 */
125static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) 122static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
126{ 123{
127 char **rsi; 124 const char * const *rsi;
128 unsigned int val, i, clear_mask = 0; 125 unsigned int val, i, clear_mask = 0;
129 int ret; 126 int ret;
130 127
@@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
140 regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); 137 regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
141} 138}
142 139
143static void axp288_chrg_detect_complete(struct axp288_extcon_info *info)
144{
145 /*
146 * We depend on other drivers to do things like mux the data lines,
147 * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has
148 * not set these things up correctly resulting in the initial charger
149 * cable type detection giving a wrong result and we end up not charging
150 * or charging at only 0.5A.
151 *
152 * So we schedule a second cable type detection after 2 seconds to
153 * give the other drivers time to load and do their thing.
154 */
155 if (!info->first_detect_done) {
156 queue_delayed_work(system_wq, &info->det_work,
157 msecs_to_jiffies(2000));
158 info->first_detect_done = true;
159 }
160}
161
162static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) 140static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
163{ 141{
164 int ret, stat, cfg, pwr_stat; 142 int ret, stat, cfg, pwr_stat;
@@ -223,8 +201,6 @@ no_vbus:
223 info->previous_cable = cable; 201 info->previous_cable = cable;
224 } 202 }
225 203
226 axp288_chrg_detect_complete(info);
227
228 return 0; 204 return 0;
229 205
230dev_det_ret: 206dev_det_ret:
@@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
246 return IRQ_HANDLED; 222 return IRQ_HANDLED;
247} 223}
248 224
249static void axp288_extcon_det_work(struct work_struct *work) 225static void axp288_extcon_enable(struct axp288_extcon_info *info)
250{ 226{
251 struct axp288_extcon_info *info =
252 container_of(work, struct axp288_extcon_info, det_work.work);
253
254 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, 227 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
255 BC_GLOBAL_RUN, 0); 228 BC_GLOBAL_RUN, 0);
256 /* Enable the charger detection logic */ 229 /* Enable the charger detection logic */
@@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
272 info->regmap = axp20x->regmap; 245 info->regmap = axp20x->regmap;
273 info->regmap_irqc = axp20x->regmap_irqc; 246 info->regmap_irqc = axp20x->regmap_irqc;
274 info->previous_cable = EXTCON_NONE; 247 info->previous_cable = EXTCON_NONE;
275 INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);
276 248
277 platform_set_drvdata(pdev, info); 249 platform_set_drvdata(pdev, info);
278 250
@@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
318 } 290 }
319 291
320 /* Start charger cable type detection */ 292 /* Start charger cable type detection */
321 queue_delayed_work(system_wq, &info->det_work, 0); 293 axp288_extcon_enable(info);
322 294
323 return 0; 295 return 0;
324} 296}
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index c8691b5a9cb0..191e99f06a9a 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev)
153 return ret; 153 return ret;
154 } 154 }
155 155
156 /* queue initial processing of id-pin */ 156 /* process id-pin so that we start with the right status */
157 queue_delayed_work(system_wq, &data->work, 0); 157 queue_delayed_work(system_wq, &data->work, 0);
158 flush_delayed_work(&data->work);
158 159
159 platform_set_drvdata(pdev, data); 160 platform_set_drvdata(pdev, data);
160 161
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index e2c3c5ec42d1..c53095b3b0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
568 /* HG _PR3 doesn't seem to work on this A+A weston board */ 568 /* HG _PR3 doesn't seem to work on this A+A weston board */
569 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 569 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
570 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 570 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
571 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
571 { 0, 0, 0, 0, 0 }, 572 { 0, 0, 0, 0, 0 },
572}; 573};
573 574
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8ca3783f2deb..74d2efaec52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
736 enum drm_connector_status ret = connector_status_disconnected; 736 enum drm_connector_status ret = connector_status_disconnected;
737 int r; 737 int r;
738 738
739 r = pm_runtime_get_sync(connector->dev->dev); 739 if (!drm_kms_helper_is_poll_worker()) {
740 if (r < 0) 740 r = pm_runtime_get_sync(connector->dev->dev);
741 return connector_status_disconnected; 741 if (r < 0)
742 return connector_status_disconnected;
743 }
742 744
743 if (encoder) { 745 if (encoder) {
744 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 746 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
757 /* check acpi lid status ??? */ 759 /* check acpi lid status ??? */
758 760
759 amdgpu_connector_update_scratch_regs(connector, ret); 761 amdgpu_connector_update_scratch_regs(connector, ret);
760 pm_runtime_mark_last_busy(connector->dev->dev); 762
761 pm_runtime_put_autosuspend(connector->dev->dev); 763 if (!drm_kms_helper_is_poll_worker()) {
764 pm_runtime_mark_last_busy(connector->dev->dev);
765 pm_runtime_put_autosuspend(connector->dev->dev);
766 }
767
762 return ret; 768 return ret;
763} 769}
764 770
@@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
868 enum drm_connector_status ret = connector_status_disconnected; 874 enum drm_connector_status ret = connector_status_disconnected;
869 int r; 875 int r;
870 876
871 r = pm_runtime_get_sync(connector->dev->dev); 877 if (!drm_kms_helper_is_poll_worker()) {
872 if (r < 0) 878 r = pm_runtime_get_sync(connector->dev->dev);
873 return connector_status_disconnected; 879 if (r < 0)
880 return connector_status_disconnected;
881 }
874 882
875 encoder = amdgpu_connector_best_single_encoder(connector); 883 encoder = amdgpu_connector_best_single_encoder(connector);
876 if (!encoder) 884 if (!encoder)
@@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
924 amdgpu_connector_update_scratch_regs(connector, ret); 932 amdgpu_connector_update_scratch_regs(connector, ret);
925 933
926out: 934out:
927 pm_runtime_mark_last_busy(connector->dev->dev); 935 if (!drm_kms_helper_is_poll_worker()) {
928 pm_runtime_put_autosuspend(connector->dev->dev); 936 pm_runtime_mark_last_busy(connector->dev->dev);
937 pm_runtime_put_autosuspend(connector->dev->dev);
938 }
929 939
930 return ret; 940 return ret;
931} 941}
@@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
988 enum drm_connector_status ret = connector_status_disconnected; 998 enum drm_connector_status ret = connector_status_disconnected;
989 bool dret = false, broken_edid = false; 999 bool dret = false, broken_edid = false;
990 1000
991 r = pm_runtime_get_sync(connector->dev->dev); 1001 if (!drm_kms_helper_is_poll_worker()) {
992 if (r < 0) 1002 r = pm_runtime_get_sync(connector->dev->dev);
993 return connector_status_disconnected; 1003 if (r < 0)
1004 return connector_status_disconnected;
1005 }
994 1006
995 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { 1007 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
996 ret = connector->status; 1008 ret = connector->status;
@@ -1115,8 +1127,10 @@ out:
1115 amdgpu_connector_update_scratch_regs(connector, ret); 1127 amdgpu_connector_update_scratch_regs(connector, ret);
1116 1128
1117exit: 1129exit:
1118 pm_runtime_mark_last_busy(connector->dev->dev); 1130 if (!drm_kms_helper_is_poll_worker()) {
1119 pm_runtime_put_autosuspend(connector->dev->dev); 1131 pm_runtime_mark_last_busy(connector->dev->dev);
1132 pm_runtime_put_autosuspend(connector->dev->dev);
1133 }
1120 1134
1121 return ret; 1135 return ret;
1122} 1136}
@@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1359 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); 1373 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
1360 int r; 1374 int r;
1361 1375
1362 r = pm_runtime_get_sync(connector->dev->dev); 1376 if (!drm_kms_helper_is_poll_worker()) {
1363 if (r < 0) 1377 r = pm_runtime_get_sync(connector->dev->dev);
1364 return connector_status_disconnected; 1378 if (r < 0)
1379 return connector_status_disconnected;
1380 }
1365 1381
1366 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { 1382 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1367 ret = connector->status; 1383 ret = connector->status;
@@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1429 1445
1430 amdgpu_connector_update_scratch_regs(connector, ret); 1446 amdgpu_connector_update_scratch_regs(connector, ret);
1431out: 1447out:
1432 pm_runtime_mark_last_busy(connector->dev->dev); 1448 if (!drm_kms_helper_is_poll_worker()) {
1433 pm_runtime_put_autosuspend(connector->dev->dev); 1449 pm_runtime_mark_last_busy(connector->dev->dev);
1450 pm_runtime_put_autosuspend(connector->dev->dev);
1451 }
1434 1452
1435 return ret; 1453 return ret;
1436} 1454}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 61e8c3e02d16..33d91e4474ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(
718 uint32_t retries_ch_eq; 718 uint32_t retries_ch_eq;
719 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; 719 enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
720 union lane_align_status_updated dpcd_lane_status_updated = {{0}}; 720 union lane_align_status_updated dpcd_lane_status_updated = {{0}};
721 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};; 721 union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
722 722
723 hw_tr_pattern = get_supported_tp(link); 723 hw_tr_pattern = get_supported_tp(link);
724 724
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 4c3223a4d62b..adb6e7b9280c 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)
162 if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 162 if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
163 pr_err("smc start failed\n"); 163 pr_err("smc start failed\n");
164 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 164 hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
165 return -EINVAL;; 165 return -EINVAL;
166 } 166 }
167 if (ret == PP_DPM_DISABLED) 167 if (ret == PP_DPM_DISABLED)
168 goto exit; 168 goto exit;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index cd23b1b28259..c91b9b054e3f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
294{ 294{
295} 295}
296 296
297/* 297static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
298 * This is called after a mode is programmed. It should reverse anything done
299 * by the prepare function
300 */
301static void cirrus_crtc_commit(struct drm_crtc *crtc)
302{
303}
304
305/*
306 * The core can pass us a set of gamma values to program. We actually only
307 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
308 * but it's a requirement that we provide the function
309 */
310static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
311 u16 *blue, uint32_t size,
312 struct drm_modeset_acquire_ctx *ctx)
313{ 298{
314 struct drm_device *dev = crtc->dev; 299 struct drm_device *dev = crtc->dev;
315 struct cirrus_device *cdev = dev->dev_private; 300 struct cirrus_device *cdev = dev->dev_private;
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
317 int i; 302 int i;
318 303
319 if (!crtc->enabled) 304 if (!crtc->enabled)
320 return 0; 305 return;
321 306
322 r = crtc->gamma_store; 307 r = crtc->gamma_store;
323 g = r + crtc->gamma_size; 308 g = r + crtc->gamma_size;
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
330 WREG8(PALETTE_DATA, *g++ >> 8); 315 WREG8(PALETTE_DATA, *g++ >> 8);
331 WREG8(PALETTE_DATA, *b++ >> 8); 316 WREG8(PALETTE_DATA, *b++ >> 8);
332 } 317 }
318}
319
320/*
321 * This is called after a mode is programmed. It should reverse anything done
322 * by the prepare function
323 */
324static void cirrus_crtc_commit(struct drm_crtc *crtc)
325{
326 cirrus_crtc_load_lut(crtc);
327}
328
329/*
330 * The core can pass us a set of gamma values to program. We actually only
331 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
332 * but it's a requirement that we provide the function
333 */
334static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
335 u16 *blue, uint32_t size,
336 struct drm_modeset_acquire_ctx *ctx)
337{
338 cirrus_crtc_load_lut(crtc);
333 339
334 return 0; 340 return 0;
335} 341}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ab4032167094..ae3cbfe9e01c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1878 new_crtc_state->event->base.completion = &commit->flip_done; 1878 new_crtc_state->event->base.completion = &commit->flip_done;
1879 new_crtc_state->event->base.completion_release = release_crtc_commit; 1879 new_crtc_state->event->base.completion_release = release_crtc_commit;
1880 drm_crtc_commit_get(commit); 1880 drm_crtc_commit_get(commit);
1881
1882 commit->abort_completion = true;
1881 } 1883 }
1882 1884
1883 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { 1885 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
3421void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) 3423void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
3422{ 3424{
3423 if (state->commit) { 3425 if (state->commit) {
3426 /*
3427 * In the event that a non-blocking commit returns
3428 * -ERESTARTSYS before the commit_tail work is queued, we will
3429 * have an extra reference to the commit object. Release it, if
3430 * the event has not been consumed by the worker.
3431 *
3432 * state->event may be freed, so we can't directly look at
3433 * state->event->base.completion.
3434 */
3435 if (state->event && state->commit->abort_completion)
3436 drm_crtc_commit_put(state->commit);
3437
3424 kfree(state->commit->event); 3438 kfree(state->commit->event);
3425 state->commit->event = NULL; 3439 state->commit->event = NULL;
3440
3426 drm_crtc_commit_put(state->commit); 3441 drm_crtc_commit_put(state->commit);
3427 } 3442 }
3428 3443
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ddd537914575..4f751a9d71a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
113 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ 113 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
114 { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, 114 { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
115 115
116 /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
117 { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
118
116 /* Belinea 10 15 55 */ 119 /* Belinea 10 15 55 */
117 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, 120 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
118 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, 121 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -162,6 +165,24 @@ static const struct edid_quirk {
162 165
163 /* HTC Vive VR Headset */ 166 /* HTC Vive VR Headset */
164 { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, 167 { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
168
169 /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
170 { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
171 { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
172 { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
173
174 /* Windows Mixed Reality Headsets */
175 { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
176 { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
177 { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
178 { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
179 { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
180 { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
181 { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
182 { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
183
184 /* Sony PlayStation VR Headset */
185 { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
165}; 186};
166 187
167/* 188/*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 186c4e90cc1c..89eef1bb4ddc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
836 if (!mm->color_adjust) 836 if (!mm->color_adjust)
837 return NULL; 837 return NULL;
838 838
839 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); 839 /*
840 hole_start = __drm_mm_hole_node_start(hole); 840 * The hole found during scanning should ideally be the first element
841 hole_end = hole_start + hole->hole_size; 841 * in the hole_stack list, but due to side-effects in the driver it
842 * may not be.
843 */
844 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
845 hole_start = __drm_mm_hole_node_start(hole);
846 hole_end = hole_start + hole->hole_size;
847
848 if (hole_start <= scan->hit_start &&
849 hole_end >= scan->hit_end)
850 break;
851 }
852
853 /* We should only be called after we found the hole previously */
854 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
855 if (unlikely(&hole->hole_stack == &mm->hole_stack))
856 return NULL;
842 857
843 DRM_MM_BUG_ON(hole_start > scan->hit_start); 858 DRM_MM_BUG_ON(hole_start > scan->hit_start);
844 DRM_MM_BUG_ON(hole_end < scan->hit_end); 859 DRM_MM_BUG_ON(hole_end < scan->hit_end);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 555fbe54d6e2..00b8445ba819 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -654,6 +654,26 @@ out:
654} 654}
655 655
656/** 656/**
657 * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
658 *
659 * Determine if %current task is an output poll worker. This can be used
660 * to select distinct code paths for output polling versus other contexts.
661 *
662 * One use case is to avoid a deadlock between the output poll worker and
663 * the autosuspend worker wherein the latter waits for polling to finish
664 * upon calling drm_kms_helper_poll_disable(), while the former waits for
665 * runtime suspend to finish upon calling pm_runtime_get_sync() in a
666 * connector ->detect hook.
667 */
668bool drm_kms_helper_is_poll_worker(void)
669{
670 struct work_struct *work = current_work();
671
672 return work && work->func == output_poll_execute;
673}
674EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
675
676/**
657 * drm_kms_helper_poll_disable - disable output polling 677 * drm_kms_helper_poll_disable - disable output polling
658 * @dev: drm_device 678 * @dev: drm_device
659 * 679 *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2b8bf2dd6387..f68ef1b3a28c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
286 286
287 node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); 287 node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
288 if (!node) { 288 if (!node) {
289 dev_err(dev, "failed to allocate memory\n");
290 ret = -ENOMEM; 289 ret = -ENOMEM;
291 goto err; 290 goto err;
292 } 291 }
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
926 struct drm_device *drm_dev = g2d->subdrv.drm_dev; 925 struct drm_device *drm_dev = g2d->subdrv.drm_dev;
927 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; 926 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
928 struct drm_exynos_pending_g2d_event *e; 927 struct drm_exynos_pending_g2d_event *e;
929 struct timeval now; 928 struct timespec64 now;
930 929
931 if (list_empty(&runqueue_node->event_list)) 930 if (list_empty(&runqueue_node->event_list))
932 return; 931 return;
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
934 e = list_first_entry(&runqueue_node->event_list, 933 e = list_first_entry(&runqueue_node->event_list,
935 struct drm_exynos_pending_g2d_event, base.link); 934 struct drm_exynos_pending_g2d_event, base.link);
936 935
937 do_gettimeofday(&now); 936 ktime_get_ts64(&now);
938 e->event.tv_sec = now.tv_sec; 937 e->event.tv_sec = now.tv_sec;
939 e->event.tv_usec = now.tv_usec; 938 e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
940 e->event.cmdlist_no = cmdlist_no; 939 e->event.cmdlist_no = cmdlist_no;
941 940
942 drm_send_event(drm_dev, &e->base); 941 drm_send_event(drm_dev, &e->base);
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1358 return -EFAULT; 1357 return -EFAULT;
1359 1358
1360 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); 1359 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
1361 if (!runqueue_node) { 1360 if (!runqueue_node)
1362 dev_err(dev, "failed to allocate memory\n");
1363 return -ENOMEM; 1361 return -ENOMEM;
1364 } 1362
1365 run_cmdlist = &runqueue_node->run_cmdlist; 1363 run_cmdlist = &runqueue_node->run_cmdlist;
1366 event_list = &runqueue_node->event_list; 1364 event_list = &runqueue_node->event_list;
1367 INIT_LIST_HEAD(run_cmdlist); 1365 INIT_LIST_HEAD(run_cmdlist);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
deleted file mode 100644
index 71a0b4c0c1e8..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * YoungJun Cho <yj44.cho@samsung.com>
6 * Eunchul Kim <chulspro.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_ROTATOR_H_
15#define _EXYNOS_DRM_ROTATOR_H_
16
17/* TODO */
18
19#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a4b75a46f946..abd84cbcf1c2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
1068 /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ 1068 /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
1069 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) 1069 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
1070 | HDMI_I2S_SEL_LRCK(6)); 1070 | HDMI_I2S_SEL_LRCK(6));
1071 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) 1071
1072 | HDMI_I2S_SEL_SDATA2(4)); 1072 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
1073 | HDMI_I2S_SEL_SDATA0(4));
1074
1073 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) 1075 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
1074 | HDMI_I2S_SEL_SDATA2(2)); 1076 | HDMI_I2S_SEL_SDATA2(2));
1077
1075 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); 1078 hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
1076 1079
1077 /* I2S_CON_1 & 2 */ 1080 /* I2S_CON_1 & 2 */
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 30496134a3d0..d7cbe53c4c01 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) 569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) 570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) 571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) 572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
573 573
574/* Real input DMA size register */ 574/* Real input DMA size register */
575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) 575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 04be0f7e8193..4420c203ac85 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -464,7 +464,7 @@
464 464
465/* I2S_PIN_SEL_1 */ 465/* I2S_PIN_SEL_1 */
466#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) 466#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
467#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) 467#define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7)
468 468
469/* I2S_PIN_SEL_2 */ 469/* I2S_PIN_SEL_2 */
470#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) 470#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 909499b73d03..021f722e2481 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
733 return ret == 0 ? count : ret; 733 return ret == 0 ? count : ret;
734} 734}
735 735
736static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
737{
738 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
739 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
740 struct intel_gvt *gvt = vgpu->gvt;
741 int offset;
742
743 /* Only allow MMIO GGTT entry access */
744 if (index != PCI_BASE_ADDRESS_0)
745 return false;
746
747 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
748 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
749
750 return (offset >= gvt->device_info.gtt_start_offset &&
751 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
752 true : false;
753}
754
736static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, 755static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
737 size_t count, loff_t *ppos) 756 size_t count, loff_t *ppos)
738{ 757{
@@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
742 while (count) { 761 while (count) {
743 size_t filled; 762 size_t filled;
744 763
745 if (count >= 4 && !(*ppos % 4)) { 764 /* Only support GGTT entry 8 bytes read */
765 if (count >= 8 && !(*ppos % 8) &&
766 gtt_entry(mdev, ppos)) {
767 u64 val;
768
769 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
770 ppos, false);
771 if (ret <= 0)
772 goto read_err;
773
774 if (copy_to_user(buf, &val, sizeof(val)))
775 goto read_err;
776
777 filled = 8;
778 } else if (count >= 4 && !(*ppos % 4)) {
746 u32 val; 779 u32 val;
747 780
748 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), 781 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
@@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
802 while (count) { 835 while (count) {
803 size_t filled; 836 size_t filled;
804 837
805 if (count >= 4 && !(*ppos % 4)) { 838 /* Only support GGTT entry 8 bytes write */
839 if (count >= 8 && !(*ppos % 8) &&
840 gtt_entry(mdev, ppos)) {
841 u64 val;
842
843 if (copy_from_user(&val, buf, sizeof(val)))
844 goto write_err;
845
846 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
847 ppos, true);
848 if (ret <= 0)
849 goto write_err;
850
851 filled = 8;
852 } else if (count >= 4 && !(*ppos % 4)) {
806 u32 val; 853 u32 val;
807 854
808 if (copy_from_user(&val, buf, sizeof(val))) 855 if (copy_from_user(&val, buf, sizeof(val)))
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 73ad6e90e49d..256f1bb522b7 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
118 {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ 118 {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
119 {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ 119 {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
120 {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ 120 {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
121 {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
121 {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ 122 {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
122 {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ 123 {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
123 {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ 124 {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 7a2511538f34..736bd2bc5127 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
333 TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, 333 TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
334 unsigned int old_val, unsigned int new_val), 334 unsigned int old_val, unsigned int new_val),
335 335
336 TP_ARGS(old_id, new_id, action, reg, new_val, old_val), 336 TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
337 337
338 TP_STRUCT__entry( 338 TP_STRUCT__entry(
339 __field(int, old_id) 339 __field(int, old_id)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 173d0095e3b2..2f5209de0391 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
1433 1433
1434 intel_modeset_cleanup(dev); 1434 intel_modeset_cleanup(dev);
1435 1435
1436 /* 1436 intel_bios_cleanup(dev_priv);
1437 * free the memory space allocated for the child device
1438 * config parsed from VBT
1439 */
1440 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1441 kfree(dev_priv->vbt.child_dev);
1442 dev_priv->vbt.child_dev = NULL;
1443 dev_priv->vbt.child_dev_num = 0;
1444 }
1445 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1446 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1447 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1448 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1449 1437
1450 vga_switcheroo_unregister_client(pdev); 1438 vga_switcheroo_unregister_client(pdev);
1451 vga_client_register(pdev, NULL, NULL, NULL); 1439 vga_client_register(pdev, NULL, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a42deebedb0f..d307429a5ae0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1349,6 +1349,7 @@ struct intel_vbt_data {
1349 u32 size; 1349 u32 size;
1350 u8 *data; 1350 u8 *data;
1351 const u8 *sequence[MIPI_SEQ_MAX]; 1351 const u8 *sequence[MIPI_SEQ_MAX];
1352 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1352 } dsi; 1353 } dsi;
1353 1354
1354 int crt_ddc_pin; 1355 int crt_ddc_pin;
@@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3657 3658
3658/* intel_bios.c */ 3659/* intel_bios.c */
3659void intel_bios_init(struct drm_i915_private *dev_priv); 3660void intel_bios_init(struct drm_i915_private *dev_priv);
3661void intel_bios_cleanup(struct drm_i915_private *dev_priv);
3660bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3662bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3661bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3663bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3662bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3664bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 648e7536ff51..0c963fcf31ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
803 803
804 case I915_CONTEXT_PARAM_PRIORITY: 804 case I915_CONTEXT_PARAM_PRIORITY:
805 { 805 {
806 int priority = args->value; 806 s64 priority = args->value;
807 807
808 if (args->size) 808 if (args->size)
809 ret = -EINVAL; 809 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 42ff06fe54a3..792facdb6702 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
84void 84void
85i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) 85i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
86{ 86{
87 strncpy(dev_priv->perf.oa.test_config.uuid, 87 strlcpy(dev_priv->perf.oa.test_config.uuid,
88 "577e8e2c-3fa0-4875-8743-3538d585e3b0", 88 "577e8e2c-3fa0-4875-8743-3538d585e3b0",
89 UUID_STRING_LEN); 89 sizeof(dev_priv->perf.oa.test_config.uuid));
90 dev_priv->perf.oa.test_config.id = 1; 90 dev_priv->perf.oa.test_config.id = 1;
91 91
92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; 92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ff0ac3627cc4..ba9140c87cc0 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
96void 96void
97i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) 97i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
98{ 98{
99 strncpy(dev_priv->perf.oa.test_config.uuid, 99 strlcpy(dev_priv->perf.oa.test_config.uuid,
100 "db41edd4-d8e7-4730-ad11-b9a2d6833503", 100 "db41edd4-d8e7-4730-ad11-b9a2d6833503",
101 UUID_STRING_LEN); 101 sizeof(dev_priv->perf.oa.test_config.uuid));
102 dev_priv->perf.oa.test_config.id = 1; 102 dev_priv->perf.oa.test_config.id = 1;
103 103
104 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; 104 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 55a8a1e29424..0e9b98c32b62 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
285 return sum; 285 return sum;
286} 286}
287 287
288static void i915_pmu_event_destroy(struct perf_event *event) 288static void engine_event_destroy(struct perf_event *event)
289{ 289{
290 WARN_ON(event->parent); 290 struct drm_i915_private *i915 =
291 container_of(event->pmu, typeof(*i915), pmu.base);
292 struct intel_engine_cs *engine;
293
294 engine = intel_engine_lookup_user(i915,
295 engine_event_class(event),
296 engine_event_instance(event));
297 if (WARN_ON_ONCE(!engine))
298 return;
299
300 if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
301 intel_engine_supports_stats(engine))
302 intel_disable_engine_stats(engine);
291} 303}
292 304
293static int engine_event_init(struct perf_event *event) 305static void i915_pmu_event_destroy(struct perf_event *event)
294{ 306{
295 struct drm_i915_private *i915 = 307 WARN_ON(event->parent);
296 container_of(event->pmu, typeof(*i915), pmu.base);
297 308
298 if (!intel_engine_lookup_user(i915, engine_event_class(event), 309 if (is_engine_event(event))
299 engine_event_instance(event))) 310 engine_event_destroy(event);
300 return -ENODEV; 311}
301 312
302 switch (engine_event_sample(event)) { 313static int
314engine_event_status(struct intel_engine_cs *engine,
315 enum drm_i915_pmu_engine_sample sample)
316{
317 switch (sample) {
303 case I915_SAMPLE_BUSY: 318 case I915_SAMPLE_BUSY:
304 case I915_SAMPLE_WAIT: 319 case I915_SAMPLE_WAIT:
305 break; 320 break;
306 case I915_SAMPLE_SEMA: 321 case I915_SAMPLE_SEMA:
307 if (INTEL_GEN(i915) < 6) 322 if (INTEL_GEN(engine->i915) < 6)
308 return -ENODEV; 323 return -ENODEV;
309 break; 324 break;
310 default: 325 default:
@@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
314 return 0; 329 return 0;
315} 330}
316 331
332static int engine_event_init(struct perf_event *event)
333{
334 struct drm_i915_private *i915 =
335 container_of(event->pmu, typeof(*i915), pmu.base);
336 struct intel_engine_cs *engine;
337 u8 sample;
338 int ret;
339
340 engine = intel_engine_lookup_user(i915, engine_event_class(event),
341 engine_event_instance(event));
342 if (!engine)
343 return -ENODEV;
344
345 sample = engine_event_sample(event);
346 ret = engine_event_status(engine, sample);
347 if (ret)
348 return ret;
349
350 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
351 ret = intel_enable_engine_stats(engine);
352
353 return ret;
354}
355
317static int i915_pmu_event_init(struct perf_event *event) 356static int i915_pmu_event_init(struct perf_event *event)
318{ 357{
319 struct drm_i915_private *i915 = 358 struct drm_i915_private *i915 =
@@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
370 return 0; 409 return 0;
371} 410}
372 411
373static u64 __i915_pmu_event_read(struct perf_event *event) 412static u64 __get_rc6(struct drm_i915_private *i915)
413{
414 u64 val;
415
416 val = intel_rc6_residency_ns(i915,
417 IS_VALLEYVIEW(i915) ?
418 VLV_GT_RENDER_RC6 :
419 GEN6_GT_GFX_RC6);
420
421 if (HAS_RC6p(i915))
422 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
423
424 if (HAS_RC6pp(i915))
425 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
426
427 return val;
428}
429
430static u64 get_rc6(struct drm_i915_private *i915, bool locked)
431{
432#if IS_ENABLED(CONFIG_PM)
433 unsigned long flags;
434 u64 val;
435
436 if (intel_runtime_pm_get_if_in_use(i915)) {
437 val = __get_rc6(i915);
438 intel_runtime_pm_put(i915);
439
440 /*
441 * If we are coming back from being runtime suspended we must
442 * be careful not to report a larger value than returned
443 * previously.
444 */
445
446 if (!locked)
447 spin_lock_irqsave(&i915->pmu.lock, flags);
448
449 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
450 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
451 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
452 } else {
453 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
454 }
455
456 if (!locked)
457 spin_unlock_irqrestore(&i915->pmu.lock, flags);
458 } else {
459 struct pci_dev *pdev = i915->drm.pdev;
460 struct device *kdev = &pdev->dev;
461 unsigned long flags2;
462
463 /*
464 * We are runtime suspended.
465 *
466 * Report the delta from when the device was suspended to now,
467 * on top of the last known real value, as the approximated RC6
468 * counter value.
469 */
470 if (!locked)
471 spin_lock_irqsave(&i915->pmu.lock, flags);
472
473 spin_lock_irqsave(&kdev->power.lock, flags2);
474
475 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
476 i915->pmu.suspended_jiffies_last =
477 kdev->power.suspended_jiffies;
478
479 val = kdev->power.suspended_jiffies -
480 i915->pmu.suspended_jiffies_last;
481 val += jiffies - kdev->power.accounting_timestamp;
482
483 spin_unlock_irqrestore(&kdev->power.lock, flags2);
484
485 val = jiffies_to_nsecs(val);
486 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
487 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
488
489 if (!locked)
490 spin_unlock_irqrestore(&i915->pmu.lock, flags);
491 }
492
493 return val;
494#else
495 return __get_rc6(i915);
496#endif
497}
498
499static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
374{ 500{
375 struct drm_i915_private *i915 = 501 struct drm_i915_private *i915 =
376 container_of(event->pmu, typeof(*i915), pmu.base); 502 container_of(event->pmu, typeof(*i915), pmu.base);
@@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
387 if (WARN_ON_ONCE(!engine)) { 513 if (WARN_ON_ONCE(!engine)) {
388 /* Do nothing */ 514 /* Do nothing */
389 } else if (sample == I915_SAMPLE_BUSY && 515 } else if (sample == I915_SAMPLE_BUSY &&
390 engine->pmu.busy_stats) { 516 intel_engine_supports_stats(engine)) {
391 val = ktime_to_ns(intel_engine_get_busy_time(engine)); 517 val = ktime_to_ns(intel_engine_get_busy_time(engine));
392 } else { 518 } else {
393 val = engine->pmu.sample[sample].cur; 519 val = engine->pmu.sample[sample].cur;
@@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
408 val = count_interrupts(i915); 534 val = count_interrupts(i915);
409 break; 535 break;
410 case I915_PMU_RC6_RESIDENCY: 536 case I915_PMU_RC6_RESIDENCY:
411 intel_runtime_pm_get(i915); 537 val = get_rc6(i915, locked);
412 val = intel_rc6_residency_ns(i915,
413 IS_VALLEYVIEW(i915) ?
414 VLV_GT_RENDER_RC6 :
415 GEN6_GT_GFX_RC6);
416 if (HAS_RC6p(i915))
417 val += intel_rc6_residency_ns(i915,
418 GEN6_GT_GFX_RC6p);
419 if (HAS_RC6pp(i915))
420 val += intel_rc6_residency_ns(i915,
421 GEN6_GT_GFX_RC6pp);
422 intel_runtime_pm_put(i915);
423 break; 538 break;
424 } 539 }
425 } 540 }
@@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
434 549
435again: 550again:
436 prev = local64_read(&hwc->prev_count); 551 prev = local64_read(&hwc->prev_count);
437 new = __i915_pmu_event_read(event); 552 new = __i915_pmu_event_read(event, false);
438 553
439 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 554 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
440 goto again; 555 goto again;
@@ -442,12 +557,6 @@ again:
442 local64_add(new - prev, &event->count); 557 local64_add(new - prev, &event->count);
443} 558}
444 559
445static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
446{
447 return intel_engine_supports_stats(engine) &&
448 (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
449}
450
451static void i915_pmu_enable(struct perf_event *event) 560static void i915_pmu_enable(struct perf_event *event)
452{ 561{
453 struct drm_i915_private *i915 = 562 struct drm_i915_private *i915 =
@@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
487 596
488 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 597 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
489 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 598 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
490 if (engine->pmu.enable_count[sample]++ == 0) { 599 engine->pmu.enable_count[sample]++;
491 /*
492 * Enable engine busy stats tracking if needed or
493 * alternatively cancel the scheduled disable.
494 *
495 * If the delayed disable was pending, cancel it and
496 * in this case do not enable since it already is.
497 */
498 if (engine_needs_busy_stats(engine) &&
499 !engine->pmu.busy_stats) {
500 engine->pmu.busy_stats = true;
501 if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
502 intel_enable_engine_stats(engine);
503 }
504 }
505 } 600 }
506 601
507 /* 602 /*
@@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
509 * for all listeners. Even when the event was already enabled and has 604 * for all listeners. Even when the event was already enabled and has
510 * an existing non-zero value. 605 * an existing non-zero value.
511 */ 606 */
512 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 607 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
513 608
514 spin_unlock_irqrestore(&i915->pmu.lock, flags); 609 spin_unlock_irqrestore(&i915->pmu.lock, flags);
515} 610}
516 611
517static void __disable_busy_stats(struct work_struct *work)
518{
519 struct intel_engine_cs *engine =
520 container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
521
522 intel_disable_engine_stats(engine);
523}
524
525static void i915_pmu_disable(struct perf_event *event) 612static void i915_pmu_disable(struct perf_event *event)
526{ 613{
527 struct drm_i915_private *i915 = 614 struct drm_i915_private *i915 =
@@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
545 * Decrement the reference count and clear the enabled 632 * Decrement the reference count and clear the enabled
546 * bitmask when the last listener on an event goes away. 633 * bitmask when the last listener on an event goes away.
547 */ 634 */
548 if (--engine->pmu.enable_count[sample] == 0) { 635 if (--engine->pmu.enable_count[sample] == 0)
549 engine->pmu.enable &= ~BIT(sample); 636 engine->pmu.enable &= ~BIT(sample);
550 if (!engine_needs_busy_stats(engine) &&
551 engine->pmu.busy_stats) {
552 engine->pmu.busy_stats = false;
553 /*
554 * We request a delayed disable to handle the
555 * rapid on/off cycles on events, which can
556 * happen when tools like perf stat start, in a
557 * nicer way.
558 *
559 * In addition, this also helps with busy stats
560 * accuracy with background CPU offline/online
561 * migration events.
562 */
563 queue_delayed_work(system_wq,
564 &engine->pmu.disable_busy_stats,
565 round_jiffies_up_relative(HZ));
566 }
567 }
568 } 637 }
569 638
570 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 639 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
@@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
797 866
798void i915_pmu_register(struct drm_i915_private *i915) 867void i915_pmu_register(struct drm_i915_private *i915)
799{ 868{
800 struct intel_engine_cs *engine;
801 enum intel_engine_id id;
802 int ret; 869 int ret;
803 870
804 if (INTEL_GEN(i915) <= 2) { 871 if (INTEL_GEN(i915) <= 2) {
@@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
820 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 887 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
821 i915->pmu.timer.function = i915_sample; 888 i915->pmu.timer.function = i915_sample;
822 889
823 for_each_engine(engine, i915, id)
824 INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
825 __disable_busy_stats);
826
827 ret = perf_pmu_register(&i915->pmu.base, "i915", -1); 890 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
828 if (ret) 891 if (ret)
829 goto err; 892 goto err;
@@ -843,9 +906,6 @@ err:
843 906
844void i915_pmu_unregister(struct drm_i915_private *i915) 907void i915_pmu_unregister(struct drm_i915_private *i915)
845{ 908{
846 struct intel_engine_cs *engine;
847 enum intel_engine_id id;
848
849 if (!i915->pmu.base.event_init) 909 if (!i915->pmu.base.event_init)
850 return; 910 return;
851 911
@@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
853 913
854 hrtimer_cancel(&i915->pmu.timer); 914 hrtimer_cancel(&i915->pmu.timer);
855 915
856 for_each_engine(engine, i915, id) {
857 GEM_BUG_ON(engine->pmu.busy_stats);
858 flush_delayed_work(&engine->pmu.disable_busy_stats);
859 }
860
861 i915_pmu_unregister_cpuhp_state(i915); 916 i915_pmu_unregister_cpuhp_state(i915);
862 917
863 perf_pmu_unregister(&i915->pmu.base); 918 perf_pmu_unregister(&i915->pmu.base);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 40c154d13565..bb62df15afa4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -27,6 +27,8 @@
27enum { 27enum {
28 __I915_SAMPLE_FREQ_ACT = 0, 28 __I915_SAMPLE_FREQ_ACT = 0,
29 __I915_SAMPLE_FREQ_REQ, 29 __I915_SAMPLE_FREQ_REQ,
30 __I915_SAMPLE_RC6,
31 __I915_SAMPLE_RC6_ESTIMATED,
30 __I915_NUM_PMU_SAMPLERS 32 __I915_NUM_PMU_SAMPLERS
31}; 33};
32 34
@@ -94,6 +96,10 @@ struct i915_pmu {
94 * struct intel_engine_cs. 96 * struct intel_engine_cs.
95 */ 97 */
96 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; 98 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
99 /**
100 * @suspended_jiffies_last: Cached suspend time from PM core.
101 */
102 unsigned long suspended_jiffies_last;
97}; 103};
98 104
99#ifdef CONFIG_PERF_EVENTS 105#ifdef CONFIG_PERF_EVENTS
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f7f771749e48..b49a2df44430 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
947 return 0; 947 return 0;
948} 948}
949 949
950/*
951 * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
952 * skip all delay + gpio operands and stop at the first DSI packet op.
953 */
954static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
955{
956 const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
957 int index, len;
958
959 if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
960 return 0;
961
962 /* index = 1 to skip sequence byte */
963 for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
964 switch (data[index]) {
965 case MIPI_SEQ_ELEM_SEND_PKT:
966 return index == 1 ? 0 : index;
967 case MIPI_SEQ_ELEM_DELAY:
968 len = 5; /* 1 byte for operand + uint32 */
969 break;
970 case MIPI_SEQ_ELEM_GPIO:
971 len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
972 break;
973 default:
974 return 0;
975 }
976 }
977
978 return 0;
979}
980
981/*
982 * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
983 * The deassert must be done before calling intel_dsi_device_ready, so for
984 * these devices we split the init OTP sequence into a deassert sequence and
985 * the actual init OTP part.
986 */
987static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
988{
989 u8 *init_otp;
990 int len;
991
992 /* Limit this to VLV for now. */
993 if (!IS_VALLEYVIEW(dev_priv))
994 return;
995
996 /* Limit this to v1 vid-mode sequences */
997 if (dev_priv->vbt.dsi.config->is_cmd_mode ||
998 dev_priv->vbt.dsi.seq_version != 1)
999 return;
1000
1001 /* Only do this if there are otp and assert seqs and no deassert seq */
1002 if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
1003 !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
1004 dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
1005 return;
1006
1007 /* The deassert-sequence ends at the first DSI packet */
1008 len = get_init_otp_deassert_fragment_len(dev_priv);
1009 if (!len)
1010 return;
1011
1012 DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
1013
1014 /* Copy the fragment, update seq byte and terminate it */
1015 init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
1016 dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
1017 if (!dev_priv->vbt.dsi.deassert_seq)
1018 return;
1019 dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
1020 dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
1021 /* Use the copy for deassert */
1022 dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
1023 dev_priv->vbt.dsi.deassert_seq;
1024 /* Replace the last byte of the fragment with init OTP seq byte */
1025 init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
1026 /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
1027 dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
1028}
1029
950static void 1030static void
951parse_mipi_sequence(struct drm_i915_private *dev_priv, 1031parse_mipi_sequence(struct drm_i915_private *dev_priv,
952 const struct bdb_header *bdb) 1032 const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
1016 dev_priv->vbt.dsi.size = seq_size; 1096 dev_priv->vbt.dsi.size = seq_size;
1017 dev_priv->vbt.dsi.seq_version = sequence->version; 1097 dev_priv->vbt.dsi.seq_version = sequence->version;
1018 1098
1099 fixup_mipi_sequences(dev_priv);
1100
1019 DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); 1101 DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
1020 return; 1102 return;
1021 1103
@@ -1589,6 +1671,29 @@ out:
1589} 1671}
1590 1672
1591/** 1673/**
1674 * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
1675 * @dev_priv: i915 device instance
1676 */
1677void intel_bios_cleanup(struct drm_i915_private *dev_priv)
1678{
1679 kfree(dev_priv->vbt.child_dev);
1680 dev_priv->vbt.child_dev = NULL;
1681 dev_priv->vbt.child_dev_num = 0;
1682 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1683 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1684 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1685 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1686 kfree(dev_priv->vbt.dsi.data);
1687 dev_priv->vbt.dsi.data = NULL;
1688 kfree(dev_priv->vbt.dsi.pps);
1689 dev_priv->vbt.dsi.pps = NULL;
1690 kfree(dev_priv->vbt.dsi.config);
1691 dev_priv->vbt.dsi.config = NULL;
1692 kfree(dev_priv->vbt.dsi.deassert_seq);
1693 dev_priv->vbt.dsi.deassert_seq = NULL;
1694}
1695
1696/**
1592 * intel_bios_is_tv_present - is integrated TV present in VBT 1697 * intel_bios_is_tv_present - is integrated TV present in VBT
1593 * @dev_priv: i915 device instance 1698 * @dev_priv: i915 device instance
1594 * 1699 *
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index bd40fea16b4f..f54ddda9fdad 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
594 spin_unlock_irq(&b->rb_lock); 594 spin_unlock_irq(&b->rb_lock);
595} 595}
596 596
597static bool signal_valid(const struct drm_i915_gem_request *request)
598{
599 return intel_wait_check_request(&request->signaling.wait, request);
600}
601
602static bool signal_complete(const struct drm_i915_gem_request *request) 597static bool signal_complete(const struct drm_i915_gem_request *request)
603{ 598{
604 if (!request) 599 if (!request)
605 return false; 600 return false;
606 601
607 /* If another process served as the bottom-half it may have already 602 /*
608 * signalled that this wait is already completed. 603 * Carefully check if the request is complete, giving time for the
609 */
610 if (intel_wait_complete(&request->signaling.wait))
611 return signal_valid(request);
612
613 /* Carefully check if the request is complete, giving time for the
614 * seqno to be visible or if the GPU hung. 604 * seqno to be visible or if the GPU hung.
615 */ 605 */
616 if (__i915_request_irq_complete(request)) 606 return __i915_request_irq_complete(request);
617 return true;
618
619 return false;
620} 607}
621 608
622static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) 609static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
@@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
659 request = i915_gem_request_get_rcu(request); 646 request = i915_gem_request_get_rcu(request);
660 rcu_read_unlock(); 647 rcu_read_unlock();
661 if (signal_complete(request)) { 648 if (signal_complete(request)) {
662 local_bh_disable(); 649 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
663 dma_fence_signal(&request->fence); 650 &request->fence.flags)) {
664 local_bh_enable(); /* kick start the tasklets */ 651 local_bh_disable();
652 dma_fence_signal(&request->fence);
653 GEM_BUG_ON(!i915_gem_request_completed(request));
654 local_bh_enable(); /* kick start the tasklets */
655 }
665 656
666 spin_lock_irq(&b->rb_lock); 657 spin_lock_irq(&b->rb_lock);
667 658
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 5dc118f26b51..1704c8897afd 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
1952 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 1952 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
1953 min_cdclk = max(2 * 96000, min_cdclk); 1953 min_cdclk = max(2 * 96000, min_cdclk);
1954 1954
1955 /*
1956 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
1957 * than 320000KHz.
1958 */
1959 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
1960 IS_VALLEYVIEW(dev_priv))
1961 min_cdclk = max(320000, min_cdclk);
1962
1955 if (min_cdclk > dev_priv->max_cdclk_freq) { 1963 if (min_cdclk > dev_priv->max_cdclk_freq) {
1956 DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", 1964 DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
1957 min_cdclk, dev_priv->max_cdclk_freq); 1965 min_cdclk, dev_priv->max_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index d790bdc227ff..fa960cfd2764 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
1458 struct drm_i915_private *dev_priv = engine->i915; 1458 struct drm_i915_private *dev_priv = engine->i915;
1459 bool idle = true; 1459 bool idle = true;
1460 1460
1461 intel_runtime_pm_get(dev_priv); 1461 /* If the whole device is asleep, the engine must be idle */
1462 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1463 return true;
1462 1464
1463 /* First check that no commands are left in the ring */ 1465 /* First check that no commands are left in the ring */
1464 if ((I915_READ_HEAD(engine) & HEAD_ADDR) != 1466 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1943 */ 1945 */
1944int intel_enable_engine_stats(struct intel_engine_cs *engine) 1946int intel_enable_engine_stats(struct intel_engine_cs *engine)
1945{ 1947{
1948 struct intel_engine_execlists *execlists = &engine->execlists;
1946 unsigned long flags; 1949 unsigned long flags;
1950 int err = 0;
1947 1951
1948 if (!intel_engine_supports_stats(engine)) 1952 if (!intel_engine_supports_stats(engine))
1949 return -ENODEV; 1953 return -ENODEV;
1950 1954
1955 tasklet_disable(&execlists->tasklet);
1951 spin_lock_irqsave(&engine->stats.lock, flags); 1956 spin_lock_irqsave(&engine->stats.lock, flags);
1952 if (engine->stats.enabled == ~0) 1957
1953 goto busy; 1958 if (unlikely(engine->stats.enabled == ~0)) {
1959 err = -EBUSY;
1960 goto unlock;
1961 }
1962
1954 if (engine->stats.enabled++ == 0) { 1963 if (engine->stats.enabled++ == 0) {
1955 struct intel_engine_execlists *execlists = &engine->execlists;
1956 const struct execlist_port *port = execlists->port; 1964 const struct execlist_port *port = execlists->port;
1957 unsigned int num_ports = execlists_num_ports(execlists); 1965 unsigned int num_ports = execlists_num_ports(execlists);
1958 1966
@@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
1967 if (engine->stats.active) 1975 if (engine->stats.active)
1968 engine->stats.start = engine->stats.enabled_at; 1976 engine->stats.start = engine->stats.enabled_at;
1969 } 1977 }
1970 spin_unlock_irqrestore(&engine->stats.lock, flags);
1971
1972 return 0;
1973 1978
1974busy: 1979unlock:
1975 spin_unlock_irqrestore(&engine->stats.lock, flags); 1980 spin_unlock_irqrestore(&engine->stats.lock, flags);
1981 tasklet_enable(&execlists->tasklet);
1976 1982
1977 return -EBUSY; 1983 return err;
1978} 1984}
1979 1985
1980static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) 1986static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5ff203e42d6..a0e7a6c2a57c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -366,20 +366,6 @@ struct intel_engine_cs {
366 */ 366 */
367#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) 367#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
368 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; 368 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
369 /**
370 * @busy_stats: Has enablement of engine stats tracking been
371 * requested.
372 */
373 bool busy_stats;
374 /**
375 * @disable_busy_stats: Work item for busy stats disabling.
376 *
377 * Same as with @enable_busy_stats action, with the difference
378 * that we delay it in case there are rapid enable-disable
379 * actions, which can happen during tool startup (like perf
380 * stat).
381 */
382 struct delayed_work disable_busy_stats;
383 } pmu; 369 } pmu;
384 370
385 /* 371 /*
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 5155f0179b61..05520202c967 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -36,6 +36,7 @@
36#include "meson_venc.h" 36#include "meson_venc.h"
37#include "meson_vpp.h" 37#include "meson_vpp.h"
38#include "meson_viu.h" 38#include "meson_viu.h"
39#include "meson_canvas.h"
39#include "meson_registers.h" 40#include "meson_registers.h"
40 41
41/* CRTC definition */ 42/* CRTC definition */
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
192 } else 193 } else
193 meson_vpp_disable_interlace_vscaler_osd1(priv); 194 meson_vpp_disable_interlace_vscaler_osd1(priv);
194 195
196 meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
197 priv->viu.osd1_addr, priv->viu.osd1_stride,
198 priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
199 MESON_CANVAS_BLKMODE_LINEAR);
200
195 /* Enable OSD1 */ 201 /* Enable OSD1 */
196 writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, 202 writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
197 priv->io_base + _REG(VPP_MISC)); 203 priv->io_base + _REG(VPP_MISC));
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 5e8b392b9d1f..8450d6ac8c9b 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -43,6 +43,9 @@ struct meson_drm {
43 bool osd1_commit; 43 bool osd1_commit;
44 uint32_t osd1_ctrl_stat; 44 uint32_t osd1_ctrl_stat;
45 uint32_t osd1_blk0_cfg[5]; 45 uint32_t osd1_blk0_cfg[5];
46 uint32_t osd1_addr;
47 uint32_t osd1_stride;
48 uint32_t osd1_height;
46 } viu; 49 } viu;
47 50
48 struct { 51 struct {
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d0a6ac8390f3..27bd3503e1e4 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
164 /* Update Canvas with buffer address */ 164 /* Update Canvas with buffer address */
165 gem = drm_fb_cma_get_gem_obj(fb, 0); 165 gem = drm_fb_cma_get_gem_obj(fb, 0);
166 166
167 meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, 167 priv->viu.osd1_addr = gem->paddr;
168 gem->paddr, fb->pitches[0], 168 priv->viu.osd1_stride = fb->pitches[0];
169 fb->height, MESON_CANVAS_WRAP_NONE, 169 priv->viu.osd1_height = fb->height;
170 MESON_CANVAS_BLKMODE_LINEAR);
171 170
172 spin_unlock_irqrestore(&priv->drm->event_lock, flags); 171 spin_unlock_irqrestore(&priv->drm->event_lock, flags);
173} 172}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 3e9bba4d6624..6d8e3a9a6fc0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
680 } else { 680 } else {
681 dev_info(&pdev->dev, 681 dev_info(&pdev->dev,
682 "no iommu, fallback to phys contig buffers for scanout\n"); 682 "no iommu, fallback to phys contig buffers for scanout\n");
683 aspace = NULL;; 683 aspace = NULL;
684 } 684 }
685 685
686 pm_runtime_put_sync(&pdev->dev); 686 pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 69d6e61a01ec..6ed9cb053dfa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
570 nv_connector->edid = NULL; 570 nv_connector->edid = NULL;
571 } 571 }
572 572
573 ret = pm_runtime_get_sync(connector->dev->dev); 573 /* Outputs are only polled while runtime active, so acquiring a
574 if (ret < 0 && ret != -EACCES) 574 * runtime PM ref here is unnecessary (and would deadlock upon
575 return conn_status; 575 * runtime suspend because it waits for polling to finish).
576 */
577 if (!drm_kms_helper_is_poll_worker()) {
578 ret = pm_runtime_get_sync(connector->dev->dev);
579 if (ret < 0 && ret != -EACCES)
580 return conn_status;
581 }
576 582
577 nv_encoder = nouveau_connector_ddc_detect(connector); 583 nv_encoder = nouveau_connector_ddc_detect(connector);
578 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { 584 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -647,8 +653,10 @@ detect_analog:
647 653
648 out: 654 out:
649 655
650 pm_runtime_mark_last_busy(connector->dev->dev); 656 if (!drm_kms_helper_is_poll_worker()) {
651 pm_runtime_put_autosuspend(connector->dev->dev); 657 pm_runtime_mark_last_busy(connector->dev->dev);
658 pm_runtime_put_autosuspend(connector->dev->dev);
659 }
652 660
653 return conn_status; 661 return conn_status;
654} 662}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index bf62303571b3..3695cde669f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
301void 301void
302nvkm_therm_clkgate_enable(struct nvkm_therm *therm) 302nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
303{ 303{
304 if (!therm->func->clkgate_enable || !therm->clkgating_enabled) 304 if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
305 return; 305 return;
306 306
307 nvkm_debug(&therm->subdev, 307 nvkm_debug(&therm->subdev,
@@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
312void 312void
313nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend) 313nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
314{ 314{
315 if (!therm->func->clkgate_fini || !therm->clkgating_enabled) 315 if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
316 return; 316 return;
317 317
318 nvkm_debug(&therm->subdev, 318 nvkm_debug(&therm->subdev,
@@ -395,7 +395,7 @@ void
395nvkm_therm_clkgate_init(struct nvkm_therm *therm, 395nvkm_therm_clkgate_init(struct nvkm_therm *therm,
396 const struct nvkm_therm_clkgate_pack *p) 396 const struct nvkm_therm_clkgate_pack *p)
397{ 397{
398 if (!therm->func->clkgate_init || !therm->clkgating_enabled) 398 if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
399 return; 399 return;
400 400
401 therm->func->clkgate_init(therm, p); 401 therm->func->clkgate_init(therm, p);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5012f5e47a1e..2e2ca3c6b47d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
899 enum drm_connector_status ret = connector_status_disconnected; 899 enum drm_connector_status ret = connector_status_disconnected;
900 int r; 900 int r;
901 901
902 r = pm_runtime_get_sync(connector->dev->dev); 902 if (!drm_kms_helper_is_poll_worker()) {
903 if (r < 0) 903 r = pm_runtime_get_sync(connector->dev->dev);
904 return connector_status_disconnected; 904 if (r < 0)
905 return connector_status_disconnected;
906 }
905 907
906 if (encoder) { 908 if (encoder) {
907 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 909 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
924 /* check acpi lid status ??? */ 926 /* check acpi lid status ??? */
925 927
926 radeon_connector_update_scratch_regs(connector, ret); 928 radeon_connector_update_scratch_regs(connector, ret);
927 pm_runtime_mark_last_busy(connector->dev->dev); 929
928 pm_runtime_put_autosuspend(connector->dev->dev); 930 if (!drm_kms_helper_is_poll_worker()) {
931 pm_runtime_mark_last_busy(connector->dev->dev);
932 pm_runtime_put_autosuspend(connector->dev->dev);
933 }
934
929 return ret; 935 return ret;
930} 936}
931 937
@@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1039 enum drm_connector_status ret = connector_status_disconnected; 1045 enum drm_connector_status ret = connector_status_disconnected;
1040 int r; 1046 int r;
1041 1047
1042 r = pm_runtime_get_sync(connector->dev->dev); 1048 if (!drm_kms_helper_is_poll_worker()) {
1043 if (r < 0) 1049 r = pm_runtime_get_sync(connector->dev->dev);
1044 return connector_status_disconnected; 1050 if (r < 0)
1051 return connector_status_disconnected;
1052 }
1045 1053
1046 encoder = radeon_best_single_encoder(connector); 1054 encoder = radeon_best_single_encoder(connector);
1047 if (!encoder) 1055 if (!encoder)
@@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1108 radeon_connector_update_scratch_regs(connector, ret); 1116 radeon_connector_update_scratch_regs(connector, ret);
1109 1117
1110out: 1118out:
1111 pm_runtime_mark_last_busy(connector->dev->dev); 1119 if (!drm_kms_helper_is_poll_worker()) {
1112 pm_runtime_put_autosuspend(connector->dev->dev); 1120 pm_runtime_mark_last_busy(connector->dev->dev);
1121 pm_runtime_put_autosuspend(connector->dev->dev);
1122 }
1113 1123
1114 return ret; 1124 return ret;
1115} 1125}
@@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1173 if (!radeon_connector->dac_load_detect) 1183 if (!radeon_connector->dac_load_detect)
1174 return ret; 1184 return ret;
1175 1185
1176 r = pm_runtime_get_sync(connector->dev->dev); 1186 if (!drm_kms_helper_is_poll_worker()) {
1177 if (r < 0) 1187 r = pm_runtime_get_sync(connector->dev->dev);
1178 return connector_status_disconnected; 1188 if (r < 0)
1189 return connector_status_disconnected;
1190 }
1179 1191
1180 encoder = radeon_best_single_encoder(connector); 1192 encoder = radeon_best_single_encoder(connector);
1181 if (!encoder) 1193 if (!encoder)
@@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1187 if (ret == connector_status_connected) 1199 if (ret == connector_status_connected)
1188 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); 1200 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
1189 radeon_connector_update_scratch_regs(connector, ret); 1201 radeon_connector_update_scratch_regs(connector, ret);
1190 pm_runtime_mark_last_busy(connector->dev->dev); 1202
1191 pm_runtime_put_autosuspend(connector->dev->dev); 1203 if (!drm_kms_helper_is_poll_worker()) {
1204 pm_runtime_mark_last_busy(connector->dev->dev);
1205 pm_runtime_put_autosuspend(connector->dev->dev);
1206 }
1207
1192 return ret; 1208 return ret;
1193} 1209}
1194 1210
@@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1251 enum drm_connector_status ret = connector_status_disconnected; 1267 enum drm_connector_status ret = connector_status_disconnected;
1252 bool dret = false, broken_edid = false; 1268 bool dret = false, broken_edid = false;
1253 1269
1254 r = pm_runtime_get_sync(connector->dev->dev); 1270 if (!drm_kms_helper_is_poll_worker()) {
1255 if (r < 0) 1271 r = pm_runtime_get_sync(connector->dev->dev);
1256 return connector_status_disconnected; 1272 if (r < 0)
1273 return connector_status_disconnected;
1274 }
1257 1275
1258 if (radeon_connector->detected_hpd_without_ddc) { 1276 if (radeon_connector->detected_hpd_without_ddc) {
1259 force = true; 1277 force = true;
@@ -1436,8 +1454,10 @@ out:
1436 } 1454 }
1437 1455
1438exit: 1456exit:
1439 pm_runtime_mark_last_busy(connector->dev->dev); 1457 if (!drm_kms_helper_is_poll_worker()) {
1440 pm_runtime_put_autosuspend(connector->dev->dev); 1458 pm_runtime_mark_last_busy(connector->dev->dev);
1459 pm_runtime_put_autosuspend(connector->dev->dev);
1460 }
1441 1461
1442 return ret; 1462 return ret;
1443} 1463}
@@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1688 if (radeon_dig_connector->is_mst) 1708 if (radeon_dig_connector->is_mst)
1689 return connector_status_disconnected; 1709 return connector_status_disconnected;
1690 1710
1691 r = pm_runtime_get_sync(connector->dev->dev); 1711 if (!drm_kms_helper_is_poll_worker()) {
1692 if (r < 0) 1712 r = pm_runtime_get_sync(connector->dev->dev);
1693 return connector_status_disconnected; 1713 if (r < 0)
1714 return connector_status_disconnected;
1715 }
1694 1716
1695 if (!force && radeon_check_hpd_status_unchanged(connector)) { 1717 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1696 ret = connector->status; 1718 ret = connector->status;
@@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1777 } 1799 }
1778 1800
1779out: 1801out:
1780 pm_runtime_mark_last_busy(connector->dev->dev); 1802 if (!drm_kms_helper_is_poll_worker()) {
1781 pm_runtime_put_autosuspend(connector->dev->dev); 1803 pm_runtime_mark_last_busy(connector->dev->dev);
1804 pm_runtime_put_autosuspend(connector->dev->dev);
1805 }
1782 1806
1783 return ret; 1807 return ret;
1784} 1808}
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 2c18996d59c5..0d95888ccc3e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
461{ 461{
462 struct drm_sched_job *s_job; 462 struct drm_sched_job *s_job;
463 struct drm_sched_entity *entity, *tmp; 463 struct drm_sched_entity *entity, *tmp;
464 int i;; 464 int i;
465 465
466 spin_lock(&sched->job_list_lock); 466 spin_lock(&sched->job_list_lock);
467 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { 467 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 658fa2d3e40c..48685cddbad1 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
1089{ 1089{
1090 struct ipu_soc *ipu = irq_desc_get_handler_data(desc); 1090 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
1091 struct irq_chip *chip = irq_desc_get_chip(desc); 1091 struct irq_chip *chip = irq_desc_get_chip(desc);
1092 const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; 1092 static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
1093 1093
1094 chained_irq_enter(chip, desc); 1094 chained_irq_enter(chip, desc);
1095 1095
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)
1102{ 1102{
1103 struct ipu_soc *ipu = irq_desc_get_handler_data(desc); 1103 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
1104 struct irq_chip *chip = irq_desc_get_chip(desc); 1104 struct irq_chip *chip = irq_desc_get_chip(desc);
1105 const int int_reg[] = { 4, 5, 8, 9}; 1105 static const int int_reg[] = { 4, 5, 8, 9};
1106 1106
1107 chained_irq_enter(chip, desc); 1107 chained_irq_enter(chip, desc);
1108 1108
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index bb9c087e6c0d..9f2d9ec42add 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
788 case V4L2_PIX_FMT_SGBRG8: 788 case V4L2_PIX_FMT_SGBRG8:
789 case V4L2_PIX_FMT_SGRBG8: 789 case V4L2_PIX_FMT_SGRBG8:
790 case V4L2_PIX_FMT_SRGGB8: 790 case V4L2_PIX_FMT_SRGGB8:
791 case V4L2_PIX_FMT_GREY:
791 offset = image->rect.left + image->rect.top * pix->bytesperline; 792 offset = image->rect.left + image->rect.top * pix->bytesperline;
792 break; 793 break;
793 case V4L2_PIX_FMT_SBGGR16: 794 case V4L2_PIX_FMT_SBGGR16:
794 case V4L2_PIX_FMT_SGBRG16: 795 case V4L2_PIX_FMT_SGBRG16:
795 case V4L2_PIX_FMT_SGRBG16: 796 case V4L2_PIX_FMT_SGRBG16:
796 case V4L2_PIX_FMT_SRGGB16: 797 case V4L2_PIX_FMT_SRGGB16:
798 case V4L2_PIX_FMT_Y16:
797 offset = image->rect.left * 2 + 799 offset = image->rect.left * 2 +
798 image->rect.top * pix->bytesperline; 800 image->rect.top * pix->bytesperline;
799 break; 801 break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 24e12b87a0cb..caa05b0702e1 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
288 case MEDIA_BUS_FMT_SGBRG10_1X10: 288 case MEDIA_BUS_FMT_SGBRG10_1X10:
289 case MEDIA_BUS_FMT_SGRBG10_1X10: 289 case MEDIA_BUS_FMT_SGRBG10_1X10:
290 case MEDIA_BUS_FMT_SRGGB10_1X10: 290 case MEDIA_BUS_FMT_SRGGB10_1X10:
291 case MEDIA_BUS_FMT_Y10_1X10:
291 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 292 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
292 cfg->mipi_dt = MIPI_DT_RAW10; 293 cfg->mipi_dt = MIPI_DT_RAW10;
293 cfg->data_width = IPU_CSI_DATA_WIDTH_10; 294 cfg->data_width = IPU_CSI_DATA_WIDTH_10;
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
296 case MEDIA_BUS_FMT_SGBRG12_1X12: 297 case MEDIA_BUS_FMT_SGBRG12_1X12:
297 case MEDIA_BUS_FMT_SGRBG12_1X12: 298 case MEDIA_BUS_FMT_SGRBG12_1X12:
298 case MEDIA_BUS_FMT_SRGGB12_1X12: 299 case MEDIA_BUS_FMT_SRGGB12_1X12:
300 case MEDIA_BUS_FMT_Y12_1X12:
299 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 301 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
300 cfg->mipi_dt = MIPI_DT_RAW12; 302 cfg->mipi_dt = MIPI_DT_RAW12;
301 cfg->data_width = IPU_CSI_DATA_WIDTH_12; 303 cfg->data_width = IPU_CSI_DATA_WIDTH_12;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index f1cec3d70498..0f70e8847540 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
129 if (pre_node == pre->dev->of_node) { 129 if (pre_node == pre->dev->of_node) {
130 mutex_unlock(&ipu_pre_list_mutex); 130 mutex_unlock(&ipu_pre_list_mutex);
131 device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); 131 device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
132 of_node_put(pre_node);
132 return pre; 133 return pre;
133 } 134 }
134 } 135 }
135 mutex_unlock(&ipu_pre_list_mutex); 136 mutex_unlock(&ipu_pre_list_mutex);
136 137
138 of_node_put(pre_node);
139
137 return NULL; 140 return NULL;
138} 141}
139 142
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 067365c733c6..97b99500153d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
102 mutex_unlock(&ipu_prg_list_mutex); 102 mutex_unlock(&ipu_prg_list_mutex);
103 device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); 103 device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
104 prg->id = ipu_id; 104 prg->id = ipu_id;
105 of_node_put(prg_node);
105 return prg; 106 return prg;
106 } 107 }
107 } 108 }
108 mutex_unlock(&ipu_prg_list_mutex); 109 mutex_unlock(&ipu_prg_list_mutex);
109 110
111 of_node_put(prg_node);
112
110 return NULL; 113 return NULL;
111} 114}
112 115
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 43ddcdfbd0da..9454ac134ce2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -645,6 +645,9 @@
645#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 645#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
646#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 646#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
647#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 647#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
648#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
649#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
650#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
648#define USB_DEVICE_ID_LD_JWM 0x1080 651#define USB_DEVICE_ID_LD_JWM 0x1080
649#define USB_DEVICE_ID_LD_DMMP 0x1081 652#define USB_DEVICE_ID_LD_DMMP 0x1081
650#define USB_DEVICE_ID_LD_UMIP 0x1090 653#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5f6035a5ce36..e92b77fa574a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = {
809 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, 809 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
810 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, 810 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
811 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, 811 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
812 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
813 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
814 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
812 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, 815 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
813 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, 816 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
814 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, 817 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 4bdbf77f7197..72c338eb5fae 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
269 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { 269 for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
270 const struct tjmax_model *tm = &tjmax_model_table[i]; 270 const struct tjmax_model *tm = &tjmax_model_table[i];
271 if (c->x86_model == tm->model && 271 if (c->x86_model == tm->model &&
272 (tm->mask == ANY || c->x86_mask == tm->mask)) 272 (tm->mask == ANY || c->x86_stepping == tm->mask))
273 return tm->tjmax; 273 return tm->tjmax;
274 } 274 }
275 275
276 /* Early chips have no MSR for TjMax */ 276 /* Early chips have no MSR for TjMax */
277 277
278 if (c->x86_model == 0xf && c->x86_mask < 4) 278 if (c->x86_model == 0xf && c->x86_stepping < 4)
279 usemsr_ee = 0; 279 usemsr_ee = 0;
280 280
281 if (c->x86_model > 0xe && usemsr_ee) { 281 if (c->x86_model > 0xe && usemsr_ee) {
@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
426 * Readings might stop update when processor visited too deep sleep, 426 * Readings might stop update when processor visited too deep sleep,
427 * fixed for stepping D0 (6EC). 427 * fixed for stepping D0 (6EC).
428 */ 428 */
429 if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { 429 if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
430 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); 430 pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
431 return -ENODEV; 431 return -ENODEV;
432 } 432 }
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index ef91b8a67549..84e91286fc4f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
293 if (c->x86 < 6) /* Any CPU with family lower than 6 */ 293 if (c->x86 < 6) /* Any CPU with family lower than 6 */
294 return 0; /* doesn't have VID */ 294 return 0; /* doesn't have VID */
295 295
296 vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); 296 vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
297 if (vrm_ret == 134) 297 if (vrm_ret == 134)
298 vrm_ret = get_via_model_d_vrm(); 298 vrm_ret = get_via_model_d_vrm();
299 if (vrm_ret == 0) 299 if (vrm_ret == 0)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 06b4e1c78bd8..051a72eecb24 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,
129 129
130 data->read_tempreg(data->pdev, &regval); 130 data->read_tempreg(data->pdev, &regval);
131 temp = (regval >> 21) * 125; 131 temp = (regval >> 21) * 125;
132 temp -= data->temp_offset; 132 if (temp > data->temp_offset)
133 temp -= data->temp_offset;
134 else
135 temp = 0;
133 136
134 return sprintf(buf, "%u\n", temp); 137 return sprintf(buf, "%u\n", temp);
135} 138}
@@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
227 * and AM3 formats, but that's the best we can do. 230 * and AM3 formats, but that's the best we can do.
228 */ 231 */
229 return boot_cpu_data.x86_model < 4 || 232 return boot_cpu_data.x86_model < 4 ||
230 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); 233 (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
231} 234}
232 235
233static int k10temp_probe(struct pci_dev *pdev, 236static int k10temp_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5a632bcf869b..e59f9113fb93 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 model = boot_cpu_data.x86_model; 189 model = boot_cpu_data.x86_model;
190 stepping = boot_cpu_data.x86_mask; 190 stepping = boot_cpu_data.x86_stepping;
191 191
192 /* feature available since SH-C0, exclude older revisions */ 192 /* feature available since SH-C0, exclude older revisions */
193 if ((model == 4 && stepping == 0) || 193 if ((model == 4 && stepping == 0) ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a9805c7cb305..e2954fb86d65 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -123,8 +123,10 @@ config I2C_I801
123 Wildcat Point (PCH) 123 Wildcat Point (PCH)
124 Wildcat Point-LP (PCH) 124 Wildcat Point-LP (PCH)
125 BayTrail (SOC) 125 BayTrail (SOC)
126 Braswell (SOC)
126 Sunrise Point-H (PCH) 127 Sunrise Point-H (PCH)
127 Sunrise Point-LP (PCH) 128 Sunrise Point-LP (PCH)
129 Kaby Lake-H (PCH)
128 DNV (SOC) 130 DNV (SOC)
129 Broxton (SOC) 131 Broxton (SOC)
130 Lewisburg (PCH) 132 Lewisburg (PCH)
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index cd07a69e2e93..44deae78913e 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,6 +50,9 @@
50#define BCM2835_I2C_S_CLKT BIT(9) 50#define BCM2835_I2C_S_CLKT BIT(9)
51#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ 51#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
52 52
53#define BCM2835_I2C_FEDL_SHIFT 16
54#define BCM2835_I2C_REDL_SHIFT 0
55
53#define BCM2835_I2C_CDIV_MIN 0x0002 56#define BCM2835_I2C_CDIV_MIN 0x0002
54#define BCM2835_I2C_CDIV_MAX 0xFFFE 57#define BCM2835_I2C_CDIV_MAX 0xFFFE
55 58
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
81 84
82static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) 85static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
83{ 86{
84 u32 divider; 87 u32 divider, redl, fedl;
85 88
86 divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), 89 divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
87 i2c_dev->bus_clk_rate); 90 i2c_dev->bus_clk_rate);
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
100 103
101 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); 104 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
102 105
106 /*
107 * Number of core clocks to wait after falling edge before
108 * outputting the next data bit. Note that both FEDL and REDL
109 * can't be greater than CDIV/2.
110 */
111 fedl = max(divider / 16, 1u);
112
113 /*
114 * Number of core clocks to wait after rising edge before
115 * sampling the next incoming data bit.
116 */
117 redl = max(divider / 4, 1u);
118
119 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL,
120 (fedl << BCM2835_I2C_FEDL_SHIFT) |
121 (redl << BCM2835_I2C_REDL_SHIFT));
103 return 0; 122 return 0;
104} 123}
105 124
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index ae691884d071..05732531829f 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
209 i2c_dw_disable_int(dev); 209 i2c_dw_disable_int(dev);
210 210
211 /* Enable the adapter */ 211 /* Enable the adapter */
212 __i2c_dw_enable(dev, true); 212 __i2c_dw_enable_and_wait(dev, true);
213 213
214 /* Clear and enable interrupts */ 214 /* Clear and enable interrupts */
215 dw_readl(dev, DW_IC_CLR_INTR); 215 dw_readl(dev, DW_IC_CLR_INTR);
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
644 gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); 644 gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
645 if (IS_ERR(gpio)) { 645 if (IS_ERR(gpio)) {
646 r = PTR_ERR(gpio); 646 r = PTR_ERR(gpio);
647 if (r == -ENOENT) 647 if (r == -ENOENT || r == -ENOSYS)
648 return 0; 648 return 0;
649 return r; 649 return r;
650 } 650 }
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 8eac00efadc1..692b34125866 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -58,6 +58,7 @@
58 * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes 58 * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes
59 * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes 59 * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
60 * BayTrail (SOC) 0x0f12 32 hard yes yes yes 60 * BayTrail (SOC) 0x0f12 32 hard yes yes yes
61 * Braswell (SOC) 0x2292 32 hard yes yes yes
61 * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes 62 * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 63 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
63 * DNV (SOC) 0x19df 32 hard yes yes yes 64 * DNV (SOC) 0x19df 32 hard yes yes yes
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 2fd8b6d00391..87197ece0f90 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
341 platform_set_drvdata(pdev, adap); 341 platform_set_drvdata(pdev, adap);
342 init_completion(&siic->done); 342 init_completion(&siic->done);
343 343
344 /* Controller Initalisation */ 344 /* Controller initialisation */
345 345
346 writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); 346 writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
347 while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) 347 while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
369 * but they start to affect the speed when clock is set to faster 369 * but they start to affect the speed when clock is set to faster
370 * frequencies. 370 * frequencies.
371 * Through the actual tests, use the different user_div value(which 371 * Through the actual tests, use the different user_div value(which
372 * in the divider formular 'Fio / (Fi2c * user_div)') to adapt 372 * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
373 * the different ranges of i2c bus clock frequency, to make the SCL 373 * the different ranges of i2c bus clock frequency, to make the SCL
374 * more accurate. 374 * more accurate.
375 */ 375 */
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 327a49ba1991..9515ca165dfd 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
243 ASPEED_ADC_INIT_POLLING_TIME, 243 ASPEED_ADC_INIT_POLLING_TIME,
244 ASPEED_ADC_INIT_TIMEOUT); 244 ASPEED_ADC_INIT_TIMEOUT);
245 if (ret) 245 if (ret)
246 goto scaler_error; 246 goto poll_timeout_error;
247 } 247 }
248 248
249 /* Start all channels in normal mode. */ 249 /* Start all channels in normal mode. */
@@ -274,9 +274,10 @@ iio_register_error:
274 writel(ASPEED_OPERATION_MODE_POWER_DOWN, 274 writel(ASPEED_OPERATION_MODE_POWER_DOWN,
275 data->base + ASPEED_REG_ENGINE_CONTROL); 275 data->base + ASPEED_REG_ENGINE_CONTROL);
276 clk_disable_unprepare(data->clk_scaler->clk); 276 clk_disable_unprepare(data->clk_scaler->clk);
277reset_error:
278 reset_control_assert(data->rst);
279clk_enable_error: 277clk_enable_error:
278poll_timeout_error:
279 reset_control_assert(data->rst);
280reset_error:
280 clk_hw_unregister_divider(data->clk_scaler); 281 clk_hw_unregister_divider(data->clk_scaler);
281scaler_error: 282scaler_error:
282 clk_hw_unregister_divider(data->clk_prescaler); 283 clk_hw_unregister_divider(data->clk_prescaler);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 7f5def465340..9a2583caedaa 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
722 int ret; 722 int ret;
723 u32 val; 723 u32 val;
724 724
725 /* Clear ADRDY by writing one, then enable ADC */
726 stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
727 stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); 725 stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
728 726
729 /* Poll for ADRDY to be set (after adc startup time) */ 727 /* Poll for ADRDY to be set (after adc startup time) */
@@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
731 val & STM32H7_ADRDY, 729 val & STM32H7_ADRDY,
732 100, STM32_ADC_TIMEOUT_US); 730 100, STM32_ADC_TIMEOUT_US);
733 if (ret) { 731 if (ret) {
734 stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); 732 stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
735 dev_err(&indio_dev->dev, "Failed to enable ADC\n"); 733 dev_err(&indio_dev->dev, "Failed to enable ADC\n");
734 } else {
735 /* Clear ADRDY by writing one */
736 stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
736 } 737 }
737 738
738 return ret; 739 return ret;
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 0dd5a381be64..457372f36791 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
46 if (adis->trig == NULL) 46 if (adis->trig == NULL)
47 return -ENOMEM; 47 return -ENOMEM;
48 48
49 adis->trig->dev.parent = &adis->spi->dev;
50 adis->trig->ops = &adis_trigger_ops;
51 iio_trigger_set_drvdata(adis->trig, adis);
52
49 ret = request_irq(adis->spi->irq, 53 ret = request_irq(adis->spi->irq,
50 &iio_trigger_generic_data_rdy_poll, 54 &iio_trigger_generic_data_rdy_poll,
51 IRQF_TRIGGER_RISING, 55 IRQF_TRIGGER_RISING,
@@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
54 if (ret) 58 if (ret)
55 goto error_free_trig; 59 goto error_free_trig;
56 60
57 adis->trig->dev.parent = &adis->spi->dev;
58 adis->trig->ops = &adis_trigger_ops;
59 iio_trigger_set_drvdata(adis->trig, adis);
60 ret = iio_trigger_register(adis->trig); 61 ret = iio_trigger_register(adis->trig);
61 62
62 indio_dev->trig = iio_trigger_get(adis->trig); 63 indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 79abf70a126d..cd5bfe39591b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp,
175 struct iio_dev *indio_dev = filp->private_data; 175 struct iio_dev *indio_dev = filp->private_data;
176 struct iio_buffer *rb = indio_dev->buffer; 176 struct iio_buffer *rb = indio_dev->buffer;
177 177
178 if (!indio_dev->info) 178 if (!indio_dev->info || rb == NULL)
179 return 0; 179 return 0;
180 180
181 poll_wait(filp, &rb->pollq, wait); 181 poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index fcb1c4ba5e41..f726f9427602 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -68,6 +68,8 @@ config SX9500
68 68
69config SRF08 69config SRF08
70 tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" 70 tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor"
71 select IIO_BUFFER
72 select IIO_TRIGGERED_BUFFER
71 depends on I2C 73 depends on I2C
72 help 74 help
73 Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 75 Say Y here to build a driver for Devantech SRF02/SRF08/SRF10
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index c4560d84dfae..25bb178f6074 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -305,16 +305,21 @@ void nldev_exit(void);
305static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, 305static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
306 struct ib_pd *pd, 306 struct ib_pd *pd,
307 struct ib_qp_init_attr *attr, 307 struct ib_qp_init_attr *attr,
308 struct ib_udata *udata) 308 struct ib_udata *udata,
309 struct ib_uobject *uobj)
309{ 310{
310 struct ib_qp *qp; 311 struct ib_qp *qp;
311 312
313 if (!dev->create_qp)
314 return ERR_PTR(-EOPNOTSUPP);
315
312 qp = dev->create_qp(pd, attr, udata); 316 qp = dev->create_qp(pd, attr, udata);
313 if (IS_ERR(qp)) 317 if (IS_ERR(qp))
314 return qp; 318 return qp;
315 319
316 qp->device = dev; 320 qp->device = dev;
317 qp->pd = pd; 321 qp->pd = pd;
322 qp->uobject = uobj;
318 /* 323 /*
319 * We don't track XRC QPs for now, because they don't have PD 324 * We don't track XRC QPs for now, because they don't have PD
320 * and more importantly they are created internaly by driver, 325 * and more importantly they are created internaly by driver,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 85b5ee4defa4..d8eead5d106d 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
141 */ 141 */
142 uobj->context = context; 142 uobj->context = context;
143 uobj->type = type; 143 uobj->type = type;
144 atomic_set(&uobj->usecnt, 0); 144 /*
145 * Allocated objects start out as write locked to deny any other
146 * syscalls from accessing them until they are committed. See
147 * rdma_alloc_commit_uobject
148 */
149 atomic_set(&uobj->usecnt, -1);
145 kref_init(&uobj->ref); 150 kref_init(&uobj->ref);
146 151
147 return uobj; 152 return uobj;
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
196 goto free; 201 goto free;
197 } 202 }
198 203
199 uverbs_uobject_get(uobj); 204 /*
205 * The idr_find is guaranteed to return a pointer to something that
206 * isn't freed yet, or NULL, as the free after idr_remove goes through
207 * kfree_rcu(). However the object may still have been released and
208 * kfree() could be called at any time.
209 */
210 if (!kref_get_unless_zero(&uobj->ref))
211 uobj = ERR_PTR(-ENOENT);
212
200free: 213free:
201 rcu_read_unlock(); 214 rcu_read_unlock();
202 return uobj; 215 return uobj;
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
399 return ret; 412 return ret;
400} 413}
401 414
402static void lockdep_check(struct ib_uobject *uobj, bool exclusive) 415static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
403{ 416{
404#ifdef CONFIG_LOCKDEP 417#ifdef CONFIG_LOCKDEP
405 if (exclusive) 418 if (exclusive)
406 WARN_ON(atomic_read(&uobj->usecnt) > 0); 419 WARN_ON(atomic_read(&uobj->usecnt) != -1);
407 else 420 else
408 WARN_ON(atomic_read(&uobj->usecnt) == -1); 421 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
409#endif 422#endif
410} 423}
411 424
@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
444 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); 457 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
445 return 0; 458 return 0;
446 } 459 }
447 lockdep_check(uobj, true); 460 assert_uverbs_usecnt(uobj, true);
448 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); 461 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
449 462
450 up_read(&ucontext->cleanup_rwsem); 463 up_read(&ucontext->cleanup_rwsem);
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
474 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); 487 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
475 return 0; 488 return 0;
476 } 489 }
477 lockdep_check(uobject, true); 490 assert_uverbs_usecnt(uobject, true);
478 ret = uobject->type->type_class->remove_commit(uobject, 491 ret = uobject->type->type_class->remove_commit(uobject,
479 RDMA_REMOVE_DESTROY); 492 RDMA_REMOVE_DESTROY);
480 if (ret) 493 if (ret)
481 return ret; 494 goto out;
482 495
483 uobject->type = &null_obj_type; 496 uobject->type = &null_obj_type;
484 497
498out:
485 up_read(&ucontext->cleanup_rwsem); 499 up_read(&ucontext->cleanup_rwsem);
486 return 0; 500 return ret;
487} 501}
488 502
489static void alloc_commit_idr_uobject(struct ib_uobject *uobj) 503static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
527 return ret; 541 return ret;
528 } 542 }
529 543
544 /* matches atomic_set(-1) in alloc_uobj */
545 assert_uverbs_usecnt(uobj, true);
546 atomic_set(&uobj->usecnt, 0);
547
530 uobj->type->type_class->alloc_commit(uobj); 548 uobj->type->type_class->alloc_commit(uobj);
531 up_read(&uobj->context->cleanup_rwsem); 549 up_read(&uobj->context->cleanup_rwsem);
532 550
@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
561 579
562void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) 580void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
563{ 581{
564 lockdep_check(uobj, exclusive); 582 assert_uverbs_usecnt(uobj, exclusive);
565 uobj->type->type_class->lookup_put(uobj, exclusive); 583 uobj->type->type_class->lookup_put(uobj, exclusive);
566 /* 584 /*
567 * In order to unlock an object, either decrease its usecnt for 585 * In order to unlock an object, either decrease its usecnt for
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 857637bf46da..3dbc4e4cca41 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -7,7 +7,6 @@
7#include <rdma/restrack.h> 7#include <rdma/restrack.h>
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/sched/task.h> 9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
12 11
13void rdma_restrack_init(struct rdma_restrack_root *res) 12void rdma_restrack_init(struct rdma_restrack_root *res)
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
63{ 62{
64 enum rdma_restrack_type type = res->type; 63 enum rdma_restrack_type type = res->type;
65 struct ib_device *dev; 64 struct ib_device *dev;
66 struct ib_xrcd *xrcd;
67 struct ib_pd *pd; 65 struct ib_pd *pd;
68 struct ib_cq *cq; 66 struct ib_cq *cq;
69 struct ib_qp *qp; 67 struct ib_qp *qp;
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
81 qp = container_of(res, struct ib_qp, res); 79 qp = container_of(res, struct ib_qp, res);
82 dev = qp->device; 80 dev = qp->device;
83 break; 81 break;
84 case RDMA_RESTRACK_XRCD:
85 xrcd = container_of(res, struct ib_xrcd, res);
86 dev = xrcd->device;
87 break;
88 default: 82 default:
89 WARN_ONCE(true, "Wrong resource tracking type %u\n", type); 83 WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
90 return NULL; 84 return NULL;
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
93 return dev; 87 return dev;
94} 88}
95 89
90static bool res_is_user(struct rdma_restrack_entry *res)
91{
92 switch (res->type) {
93 case RDMA_RESTRACK_PD:
94 return container_of(res, struct ib_pd, res)->uobject;
95 case RDMA_RESTRACK_CQ:
96 return container_of(res, struct ib_cq, res)->uobject;
97 case RDMA_RESTRACK_QP:
98 return container_of(res, struct ib_qp, res)->uobject;
99 default:
100 WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
101 return false;
102 }
103}
104
96void rdma_restrack_add(struct rdma_restrack_entry *res) 105void rdma_restrack_add(struct rdma_restrack_entry *res)
97{ 106{
98 struct ib_device *dev = res_to_dev(res); 107 struct ib_device *dev = res_to_dev(res);
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
100 if (!dev) 109 if (!dev)
101 return; 110 return;
102 111
103 if (!uaccess_kernel()) { 112 if (res_is_user(res)) {
104 get_task_struct(current); 113 get_task_struct(current);
105 res->task = current; 114 res->task = current;
106 res->kern_name = NULL; 115 res->kern_name = NULL;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 256934d1f64f..a148de35df8d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
562 if (f.file) 562 if (f.file)
563 fdput(f); 563 fdput(f);
564 564
565 mutex_unlock(&file->device->xrcd_tree_mutex);
566
565 uobj_alloc_commit(&obj->uobject); 567 uobj_alloc_commit(&obj->uobject);
566 568
567 mutex_unlock(&file->device->xrcd_tree_mutex);
568 return in_len; 569 return in_len;
569 570
570err_copy: 571err_copy:
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
603 604
604 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, 605 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
605 file->ucontext); 606 file->ucontext);
606 if (IS_ERR(uobj)) { 607 if (IS_ERR(uobj))
607 mutex_unlock(&file->device->xrcd_tree_mutex);
608 return PTR_ERR(uobj); 608 return PTR_ERR(uobj);
609 }
610 609
611 ret = uobj_remove_commit(uobj); 610 ret = uobj_remove_commit(uobj);
612 return ret ?: in_len; 611 return ret ?: in_len;
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
979 struct ib_uverbs_ex_create_cq_resp resp; 978 struct ib_uverbs_ex_create_cq_resp resp;
980 struct ib_cq_init_attr attr = {}; 979 struct ib_cq_init_attr attr = {};
981 980
981 if (!ib_dev->create_cq)
982 return ERR_PTR(-EOPNOTSUPP);
983
982 if (cmd->comp_vector >= file->device->num_comp_vectors) 984 if (cmd->comp_vector >= file->device->num_comp_vectors)
983 return ERR_PTR(-EINVAL); 985 return ERR_PTR(-EINVAL);
984 986
@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1030 resp.response_length = offsetof(typeof(resp), response_length) + 1032 resp.response_length = offsetof(typeof(resp), response_length) +
1031 sizeof(resp.response_length); 1033 sizeof(resp.response_length);
1032 1034
1035 cq->res.type = RDMA_RESTRACK_CQ;
1036 rdma_restrack_add(&cq->res);
1037
1033 ret = cb(file, obj, &resp, ucore, context); 1038 ret = cb(file, obj, &resp, ucore, context);
1034 if (ret) 1039 if (ret)
1035 goto err_cb; 1040 goto err_cb;
1036 1041
1037 uobj_alloc_commit(&obj->uobject); 1042 uobj_alloc_commit(&obj->uobject);
1038 cq->res.type = RDMA_RESTRACK_CQ;
1039 rdma_restrack_add(&cq->res);
1040
1041 return obj; 1043 return obj;
1042 1044
1043err_cb: 1045err_cb:
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
1518 if (cmd->qp_type == IB_QPT_XRC_TGT) 1520 if (cmd->qp_type == IB_QPT_XRC_TGT)
1519 qp = ib_create_qp(pd, &attr); 1521 qp = ib_create_qp(pd, &attr);
1520 else 1522 else
1521 qp = _ib_create_qp(device, pd, &attr, uhw); 1523 qp = _ib_create_qp(device, pd, &attr, uhw,
1524 &obj->uevent.uobject);
1522 1525
1523 if (IS_ERR(qp)) { 1526 if (IS_ERR(qp)) {
1524 ret = PTR_ERR(qp); 1527 ret = PTR_ERR(qp);
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
1550 atomic_inc(&attr.srq->usecnt); 1553 atomic_inc(&attr.srq->usecnt);
1551 if (ind_tbl) 1554 if (ind_tbl)
1552 atomic_inc(&ind_tbl->usecnt); 1555 atomic_inc(&ind_tbl->usecnt);
1556 } else {
1557 /* It is done in _ib_create_qp for other QP types */
1558 qp->uobject = &obj->uevent.uobject;
1553 } 1559 }
1554 qp->uobject = &obj->uevent.uobject;
1555 1560
1556 obj->uevent.uobject.object = qp; 1561 obj->uevent.uobject.object = qp;
1557 1562
@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
1971 goto release_qp; 1976 goto release_qp;
1972 } 1977 }
1973 1978
1979 if ((cmd->base.attr_mask & IB_QP_AV) &&
1980 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1981 ret = -EINVAL;
1982 goto release_qp;
1983 }
1984
1974 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 1985 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1975 !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { 1986 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1987 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
1976 ret = -EINVAL; 1988 ret = -EINVAL;
1977 goto release_qp; 1989 goto release_qp;
1978 } 1990 }
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2941 wq_init_attr.create_flags = cmd.create_flags; 2953 wq_init_attr.create_flags = cmd.create_flags;
2942 obj->uevent.events_reported = 0; 2954 obj->uevent.events_reported = 0;
2943 INIT_LIST_HEAD(&obj->uevent.event_list); 2955 INIT_LIST_HEAD(&obj->uevent.event_list);
2956
2957 if (!pd->device->create_wq) {
2958 err = -EOPNOTSUPP;
2959 goto err_put_cq;
2960 }
2944 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 2961 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2945 if (IS_ERR(wq)) { 2962 if (IS_ERR(wq)) {
2946 err = PTR_ERR(wq); 2963 err = PTR_ERR(wq);
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3084 wq_attr.flags = cmd.flags; 3101 wq_attr.flags = cmd.flags;
3085 wq_attr.flags_mask = cmd.flags_mask; 3102 wq_attr.flags_mask = cmd.flags_mask;
3086 } 3103 }
3104 if (!wq->device->modify_wq) {
3105 ret = -EOPNOTSUPP;
3106 goto out;
3107 }
3087 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3108 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
3109out:
3088 uobj_put_obj_read(wq); 3110 uobj_put_obj_read(wq);
3089 return ret; 3111 return ret;
3090} 3112}
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3181 3203
3182 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3204 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3183 init_attr.ind_tbl = wqs; 3205 init_attr.ind_tbl = wqs;
3206
3207 if (!ib_dev->create_rwq_ind_table) {
3208 err = -EOPNOTSUPP;
3209 goto err_uobj;
3210 }
3184 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3211 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3185 3212
3186 if (IS_ERR(rwq_ind_tbl)) { 3213 if (IS_ERR(rwq_ind_tbl)) {
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3770 struct ib_device_attr attr = {0}; 3797 struct ib_device_attr attr = {0};
3771 int err; 3798 int err;
3772 3799
3800 if (!ib_dev->query_device)
3801 return -EOPNOTSUPP;
3802
3773 if (ucore->inlen < sizeof(cmd)) 3803 if (ucore->inlen < sizeof(cmd))
3774 return -EINVAL; 3804 return -EINVAL;
3775 3805
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index d96dc1d17be1..339b85145044 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
59 return 0; 59 return 0;
60 } 60 }
61 61
62 if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
63 return -EINVAL;
64
62 spec = &attr_spec_bucket->attrs[attr_id]; 65 spec = &attr_spec_bucket->attrs[attr_id];
63 e = &elements[attr_id]; 66 e = &elements[attr_id];
64 e->uattr = uattr_ptr; 67 e->uattr = uattr_ptr;
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
index 062485f9300d..62e1eb1d2a28 100644
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
114 short min = SHRT_MAX; 114 short min = SHRT_MAX;
115 const void *elem; 115 const void *elem;
116 int i, j, last_stored = -1; 116 int i, j, last_stored = -1;
117 unsigned int equal_min = 0;
117 118
118 for_each_element(elem, i, j, elements, num_elements, num_offset, 119 for_each_element(elem, i, j, elements, num_elements, num_offset,
119 data_offset) { 120 data_offset) {
@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
136 */ 137 */
137 iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; 138 iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
138 last_stored = i; 139 last_stored = i;
140 if (min == GET_ID(id))
141 equal_min++;
142 else
143 equal_min = 1;
139 min = GET_ID(id); 144 min = GET_ID(id);
140 } 145 }
141 146
@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
146 * Therefore, we need to clean the beginning of the array to make sure 151 * Therefore, we need to clean the beginning of the array to make sure
147 * all ids of final elements are equal to min. 152 * all ids of final elements are equal to min.
148 */ 153 */
149 for (i = num_iters - 1; i >= 0 && 154 memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
150 GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
151 ;
152
153 num_iters -= i + 1;
154 memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
155 155
156 *min_id = min; 156 *min_id = min;
157 return num_iters; 157 return equal_min;
158} 158}
159 159
160#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ 160#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
322 hash = kzalloc(sizeof(*hash) + 322 hash = kzalloc(sizeof(*hash) +
323 ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), 323 ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
324 sizeof(long)) + 324 sizeof(long)) +
325 BITS_TO_LONGS(attr_max_bucket) * sizeof(long), 325 BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
326 GFP_KERNEL); 326 GFP_KERNEL);
327 if (!hash) { 327 if (!hash) {
328 res = -ENOMEM; 328 res = -ENOMEM;
@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
509 * first handler which != NULL. This also defines the 509 * first handler which != NULL. This also defines the
510 * set of flags used for this handler. 510 * set of flags used for this handler.
511 */ 511 */
512 for (i = num_object_defs - 1; 512 for (i = num_method_defs - 1;
513 i >= 0 && !method_defs[i]->handler; i--) 513 i >= 0 && !method_defs[i]->handler; i--)
514 ; 514 ;
515 hash->methods[min_id++] = method; 515 hash->methods[min_id++] = method;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 395a3b091229..b1ca223aa380 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
650 return -1; 650 return -1;
651} 651}
652 652
653static bool verify_command_idx(u32 command, bool extended)
654{
655 if (extended)
656 return command < ARRAY_SIZE(uverbs_ex_cmd_table);
657
658 return command < ARRAY_SIZE(uverbs_cmd_table);
659}
660
653static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, 661static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
654 size_t count, loff_t *pos) 662 size_t count, loff_t *pos)
655{ 663{
656 struct ib_uverbs_file *file = filp->private_data; 664 struct ib_uverbs_file *file = filp->private_data;
657 struct ib_device *ib_dev; 665 struct ib_device *ib_dev;
658 struct ib_uverbs_cmd_hdr hdr; 666 struct ib_uverbs_cmd_hdr hdr;
667 bool extended_command;
659 __u32 command; 668 __u32 command;
660 __u32 flags; 669 __u32 flags;
661 int srcu_key; 670 int srcu_key;
@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
688 } 697 }
689 698
690 command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; 699 command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
700 flags = (hdr.command &
701 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
702
703 extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
704 if (!verify_command_idx(command, extended_command)) {
705 ret = -EINVAL;
706 goto out;
707 }
708
691 if (verify_command_mask(ib_dev, command)) { 709 if (verify_command_mask(ib_dev, command)) {
692 ret = -EOPNOTSUPP; 710 ret = -EOPNOTSUPP;
693 goto out; 711 goto out;
@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
699 goto out; 717 goto out;
700 } 718 }
701 719
702 flags = (hdr.command &
703 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
704
705 if (!flags) { 720 if (!flags) {
706 if (command >= ARRAY_SIZE(uverbs_cmd_table) || 721 if (!uverbs_cmd_table[command]) {
707 !uverbs_cmd_table[command]) {
708 ret = -EINVAL; 722 ret = -EINVAL;
709 goto out; 723 goto out;
710 } 724 }
@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
725 struct ib_udata uhw; 739 struct ib_udata uhw;
726 size_t written_count = count; 740 size_t written_count = count;
727 741
728 if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || 742 if (!uverbs_ex_cmd_table[command]) {
729 !uverbs_ex_cmd_table[command]) {
730 ret = -ENOSYS; 743 ret = -ENOSYS;
731 goto out; 744 goto out;
732 } 745 }
@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
942 .llseek = no_llseek, 955 .llseek = no_llseek,
943#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) 956#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
944 .unlocked_ioctl = ib_uverbs_ioctl, 957 .unlocked_ioctl = ib_uverbs_ioctl,
958 .compat_ioctl = ib_uverbs_ioctl,
945#endif 959#endif
946}; 960};
947 961
@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
954 .llseek = no_llseek, 968 .llseek = no_llseek,
955#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) 969#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
956 .unlocked_ioctl = ib_uverbs_ioctl, 970 .unlocked_ioctl = ib_uverbs_ioctl,
971 .compat_ioctl = ib_uverbs_ioctl,
957#endif 972#endif
958}; 973};
959 974
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index cab0ac3556eb..df1360e6774f 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
234 uverbs_attr_get(ctx, UVERBS_UHW_OUT); 234 uverbs_attr_get(ctx, UVERBS_UHW_OUT);
235 235
236 if (!IS_ERR(uhw_in)) { 236 if (!IS_ERR(uhw_in)) {
237 udata->inbuf = uhw_in->ptr_attr.ptr;
238 udata->inlen = uhw_in->ptr_attr.len; 237 udata->inlen = uhw_in->ptr_attr.len;
238 if (uverbs_attr_ptr_is_inline(uhw_in))
239 udata->inbuf = &uhw_in->uattr->data;
240 else
241 udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
239 } else { 242 } else {
240 udata->inbuf = NULL; 243 udata->inbuf = NULL;
241 udata->inlen = 0; 244 udata->inlen = 0;
242 } 245 }
243 246
244 if (!IS_ERR(uhw_out)) { 247 if (!IS_ERR(uhw_out)) {
245 udata->outbuf = uhw_out->ptr_attr.ptr; 248 udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
246 udata->outlen = uhw_out->ptr_attr.len; 249 udata->outlen = uhw_out->ptr_attr.len;
247 } else { 250 } else {
248 udata->outbuf = NULL; 251 udata->outbuf = NULL;
@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
323 cq->res.type = RDMA_RESTRACK_CQ; 326 cq->res.type = RDMA_RESTRACK_CQ;
324 rdma_restrack_add(&cq->res); 327 rdma_restrack_add(&cq->res);
325 328
326 ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); 329 ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
330 sizeof(cq->cqe));
327 if (ret) 331 if (ret)
328 goto err_cq; 332 goto err_cq;
329 333
@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
375 resp.comp_events_reported = obj->comp_events_reported; 379 resp.comp_events_reported = obj->comp_events_reported;
376 resp.async_events_reported = obj->async_events_reported; 380 resp.async_events_reported = obj->async_events_reported;
377 381
378 return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); 382 return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
379} 383}
380 384
381static DECLARE_UVERBS_METHOD( 385static DECLARE_UVERBS_METHOD(
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 16ebc6372c31..93025d2009b8 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
887 if (qp_init_attr->cap.max_rdma_ctxs) 887 if (qp_init_attr->cap.max_rdma_ctxs)
888 rdma_rw_init_qp(device, qp_init_attr); 888 rdma_rw_init_qp(device, qp_init_attr);
889 889
890 qp = _ib_create_qp(device, pd, qp_init_attr, NULL); 890 qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
891 if (IS_ERR(qp)) 891 if (IS_ERR(qp))
892 return qp; 892 return qp;
893 893
@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
898 } 898 }
899 899
900 qp->real_qp = qp; 900 qp->real_qp = qp;
901 qp->uobject = NULL;
902 qp->qp_type = qp_init_attr->qp_type; 901 qp->qp_type = qp_init_attr->qp_type;
903 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 902 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
904 903
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ca32057e886f..3eb7a8387116 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -120,7 +120,6 @@ struct bnxt_re_dev {
120#define BNXT_RE_FLAG_HAVE_L2_REF 3 120#define BNXT_RE_FLAG_HAVE_L2_REF 3
121#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 121#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
122#define BNXT_RE_FLAG_QOS_WORK_REG 5 122#define BNXT_RE_FLAG_QOS_WORK_REG 5
123#define BNXT_RE_FLAG_TASK_IN_PROG 6
124#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 123#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
125 struct net_device *netdev; 124 struct net_device *netdev;
126 unsigned int version, major, minor; 125 unsigned int version, major, minor;
@@ -158,6 +157,7 @@ struct bnxt_re_dev {
158 atomic_t srq_count; 157 atomic_t srq_count;
159 atomic_t mr_count; 158 atomic_t mr_count;
160 atomic_t mw_count; 159 atomic_t mw_count;
160 atomic_t sched_count;
161 /* Max of 2 lossless traffic class supported per port */ 161 /* Max of 2 lossless traffic class supported per port */
162 u16 cosq[2]; 162 u16 cosq[2];
163 163
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ae9e9ff54826..643174d949a8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
174 ib_attr->max_pd = dev_attr->max_pd; 174 ib_attr->max_pd = dev_attr->max_pd;
175 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 175 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; 176 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177 if (dev_attr->is_atomic) { 177 ib_attr->atomic_cap = IB_ATOMIC_NONE;
178 ib_attr->atomic_cap = IB_ATOMIC_HCA; 178 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
180 }
181 179
182 ib_attr->max_ee_rd_atom = 0; 180 ib_attr->max_ee_rd_atom = 0;
183 ib_attr->max_res_rd_atom = 0; 181 ib_attr->max_res_rd_atom = 0;
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
787 return 0; 785 return 0;
788} 786}
789 787
788static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790{
791 unsigned long flags;
792
793 spin_lock_irqsave(&qp->scq->cq_lock, flags);
794 if (qp->rcq != qp->scq)
795 spin_lock(&qp->rcq->cq_lock);
796 else
797 __acquire(&qp->rcq->cq_lock);
798
799 return flags;
800}
801
802static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 unsigned long flags)
804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805{
806 if (qp->rcq != qp->scq)
807 spin_unlock(&qp->rcq->cq_lock);
808 else
809 __release(&qp->rcq->cq_lock);
810 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
811}
812
790/* Queue Pairs */ 813/* Queue Pairs */
791int bnxt_re_destroy_qp(struct ib_qp *ib_qp) 814int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
792{ 815{
793 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 816 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
794 struct bnxt_re_dev *rdev = qp->rdev; 817 struct bnxt_re_dev *rdev = qp->rdev;
795 int rc; 818 int rc;
819 unsigned int flags;
796 820
797 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); 821 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
798 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
799 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 822 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
800 if (rc) { 823 if (rc) {
801 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); 824 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
802 return rc; 825 return rc;
803 } 826 }
827
828 flags = bnxt_re_lock_cqs(qp);
829 bnxt_qplib_clean_qp(&qp->qplib_qp);
830 bnxt_re_unlock_cqs(qp, flags);
831 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
832
804 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { 833 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
805 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, 834 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
806 &rdev->sqp_ah->qplib_ah); 835 &rdev->sqp_ah->qplib_ah);
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
810 return rc; 839 return rc;
811 } 840 }
812 841
813 bnxt_qplib_del_flush_qp(&qp->qplib_qp); 842 bnxt_qplib_clean_qp(&qp->qplib_qp);
814 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, 843 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
815 &rdev->qp1_sqp->qplib_qp); 844 &rdev->qp1_sqp->qplib_qp);
816 if (rc) { 845 if (rc) {
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1069 goto fail; 1098 goto fail;
1070 } 1099 }
1071 qp->qplib_qp.scq = &cq->qplib_cq; 1100 qp->qplib_qp.scq = &cq->qplib_cq;
1101 qp->scq = cq;
1072 } 1102 }
1073 1103
1074 if (qp_init_attr->recv_cq) { 1104 if (qp_init_attr->recv_cq) {
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1080 goto fail; 1110 goto fail;
1081 } 1111 }
1082 qp->qplib_qp.rcq = &cq->qplib_cq; 1112 qp->qplib_qp.rcq = &cq->qplib_cq;
1113 qp->rcq = cq;
1083 } 1114 }
1084 1115
1085 if (qp_init_attr->srq) { 1116 if (qp_init_attr->srq) {
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1185 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); 1216 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1186 if (rc) { 1217 if (rc) {
1187 dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); 1218 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1188 goto fail; 1219 goto free_umem;
1189 } 1220 }
1190 } 1221 }
1191 1222
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1213 return &qp->ib_qp; 1244 return &qp->ib_qp;
1214qp_destroy: 1245qp_destroy:
1215 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 1246 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1247free_umem:
1248 if (udata) {
1249 if (qp->rumem)
1250 ib_umem_release(qp->rumem);
1251 if (qp->sumem)
1252 ib_umem_release(qp->sumem);
1253 }
1216fail: 1254fail:
1217 kfree(qp); 1255 kfree(qp);
1218 return ERR_PTR(rc); 1256 return ERR_PTR(rc);
@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1603 dev_dbg(rdev_to_dev(rdev), 1641 dev_dbg(rdev_to_dev(rdev),
1604 "Move QP = %p out of flush list\n", 1642 "Move QP = %p out of flush list\n",
1605 qp); 1643 qp);
1606 bnxt_qplib_del_flush_qp(&qp->qplib_qp); 1644 bnxt_qplib_clean_qp(&qp->qplib_qp);
1607 } 1645 }
1608 } 1646 }
1609 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1647 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 423ebe012f95..b88a48d43a9d 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -89,6 +89,8 @@ struct bnxt_re_qp {
89 /* QP1 */ 89 /* QP1 */
90 u32 send_psn; 90 u32 send_psn;
91 struct ib_ud_header qp1_hdr; 91 struct ib_ud_header qp1_hdr;
92 struct bnxt_re_cq *scq;
93 struct bnxt_re_cq *rcq;
92}; 94};
93 95
94struct bnxt_re_cq { 96struct bnxt_re_cq {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 508d00a5a106..33a448036c2e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
656 mutex_unlock(&bnxt_re_dev_lock); 656 mutex_unlock(&bnxt_re_dev_lock);
657 657
658 synchronize_rcu(); 658 synchronize_rcu();
659 flush_workqueue(bnxt_re_wq);
660 659
661 ib_dealloc_device(&rdev->ibdev); 660 ib_dealloc_device(&rdev->ibdev);
662 /* rdev is gone */ 661 /* rdev is gone */
@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
1441 break; 1440 break;
1442 } 1441 }
1443 smp_mb__before_atomic(); 1442 smp_mb__before_atomic();
1444 clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); 1443 atomic_dec(&rdev->sched_count);
1445 kfree(re_work); 1444 kfree(re_work);
1446} 1445}
1447 1446
@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1503 /* netdev notifier will call NETDEV_UNREGISTER again later since 1502 /* netdev notifier will call NETDEV_UNREGISTER again later since
1504 * we are still holding the reference to the netdev 1503 * we are still holding the reference to the netdev
1505 */ 1504 */
1506 if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) 1505 if (atomic_read(&rdev->sched_count) > 0)
1507 goto exit; 1506 goto exit;
1508 bnxt_re_ib_unreg(rdev, false); 1507 bnxt_re_ib_unreg(rdev, false);
1509 bnxt_re_remove_one(rdev); 1508 bnxt_re_remove_one(rdev);
@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1523 re_work->vlan_dev = (real_dev == netdev ? 1522 re_work->vlan_dev = (real_dev == netdev ?
1524 NULL : netdev); 1523 NULL : netdev);
1525 INIT_WORK(&re_work->work, bnxt_re_task); 1524 INIT_WORK(&re_work->work, bnxt_re_task);
1526 set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); 1525 atomic_inc(&rdev->sched_count);
1527 queue_work(bnxt_re_wq, &re_work->work); 1526 queue_work(bnxt_re_wq, &re_work->work);
1528 } 1527 }
1529 } 1528 }
@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
1578 */ 1577 */
1579 list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { 1578 list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
1580 dev_info(rdev_to_dev(rdev), "Unregistering Device"); 1579 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1580 /*
1581 * Flush out any scheduled tasks before destroying the
1582 * resources
1583 */
1584 flush_workqueue(bnxt_re_wq);
1581 bnxt_re_dev_stop(rdev); 1585 bnxt_re_dev_stop(rdev);
1582 bnxt_re_ib_unreg(rdev, true); 1586 bnxt_re_ib_unreg(rdev, true);
1583 bnxt_re_remove_one(rdev); 1587 bnxt_re_remove_one(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 1b0e94697fe3..3ea5b9624f6b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
173 } 173 }
174} 174}
175 175
176void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 176void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
177{ 177{
178 unsigned long flags; 178 unsigned long flags;
179 179
@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1419 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1419 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1420 struct cmdq_destroy_qp req; 1420 struct cmdq_destroy_qp req;
1421 struct creq_destroy_qp_resp resp; 1421 struct creq_destroy_qp_resp resp;
1422 unsigned long flags;
1423 u16 cmd_flags = 0; 1422 u16 cmd_flags = 0;
1424 int rc; 1423 int rc;
1425 1424
@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1437 return rc; 1436 return rc;
1438 } 1437 }
1439 1438
1440 /* Must walk the associated CQs to nullified the QP ptr */ 1439 return 0;
1441 spin_lock_irqsave(&qp->scq->hwq.lock, flags); 1440}
1442
1443 __clean_cq(qp->scq, (u64)(unsigned long)qp);
1444
1445 if (qp->rcq && qp->rcq != qp->scq) {
1446 spin_lock(&qp->rcq->hwq.lock);
1447 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
1448 spin_unlock(&qp->rcq->hwq.lock);
1449 }
1450
1451 spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
1452 1441
1442void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1443 struct bnxt_qplib_qp *qp)
1444{
1453 bnxt_qplib_free_qp_hdr_buf(res, qp); 1445 bnxt_qplib_free_qp_hdr_buf(res, qp);
1454 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); 1446 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1455 kfree(qp->sq.swq); 1447 kfree(qp->sq.swq);
@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1462 if (qp->orrq.max_elements) 1454 if (qp->orrq.max_elements)
1463 bnxt_qplib_free_hwq(res->pdev, &qp->orrq); 1455 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1464 1456
1465 return 0;
1466} 1457}
1467 1458
1468void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 1459void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 211b27a8f9e2..ca0a2ffa3509 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
478int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 478int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
479int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 479int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
480int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 480int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
481void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
482void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
483 struct bnxt_qplib_qp *qp);
481void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 484void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
482 struct bnxt_qplib_sge *sge); 485 struct bnxt_qplib_sge *sge);
483void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 486void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
500void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 503void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
501int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 504int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
502void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); 505void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
503void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
504void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 506void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
505 unsigned long *flags); 507 unsigned long *flags);
506void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 508void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index c015c1861351..03057983341f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
52 52
53/* Device */ 53/* Device */
54 54
55static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
56{
57 int rc;
58 u16 pcie_ctl2;
59
60 rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
61 &pcie_ctl2);
62 if (rc)
63 return false;
64 return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
65}
66
67static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, 55static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
68 char *fw_ver) 56 char *fw_ver)
69{ 57{
@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
165 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 153 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
166 } 154 }
167 155
168 attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); 156 attr->is_atomic = 0;
169bail: 157bail:
170 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 158 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
171 return rc; 159 return rc;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index faa9478c14a6..f95b97646c25 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
114 union pvrdma_cmd_resp rsp; 114 union pvrdma_cmd_resp rsp;
115 struct pvrdma_cmd_create_cq *cmd = &req.create_cq; 115 struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; 116 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117 struct pvrdma_create_cq_resp cq_resp = {0};
117 struct pvrdma_create_cq ucmd; 118 struct pvrdma_create_cq ucmd;
118 119
119 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); 120 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
197 198
198 cq->ibcq.cqe = resp->cqe; 199 cq->ibcq.cqe = resp->cqe;
199 cq->cq_handle = resp->cq_handle; 200 cq->cq_handle = resp->cq_handle;
201 cq_resp.cqn = resp->cq_handle;
200 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 202 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
201 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; 203 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
202 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 204 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
205 cq->uar = &(to_vucontext(context)->uar); 207 cq->uar = &(to_vucontext(context)->uar);
206 208
207 /* Copy udata back. */ 209 /* Copy udata back. */
208 if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { 210 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
209 dev_warn(&dev->pdev->dev, 211 dev_warn(&dev->pdev->dev,
210 "failed to copy back udata\n"); 212 "failed to copy back udata\n");
211 pvrdma_destroy_cq(&cq->ibcq); 213 pvrdma_destroy_cq(&cq->ibcq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 5acebb1ef631..af235967a9c2 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
113 union pvrdma_cmd_resp rsp; 113 union pvrdma_cmd_resp rsp;
114 struct pvrdma_cmd_create_srq *cmd = &req.create_srq; 114 struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
115 struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; 115 struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
116 struct pvrdma_create_srq_resp srq_resp = {0};
116 struct pvrdma_create_srq ucmd; 117 struct pvrdma_create_srq ucmd;
117 unsigned long flags; 118 unsigned long flags;
118 int ret; 119 int ret;
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
204 } 205 }
205 206
206 srq->srq_handle = resp->srqn; 207 srq->srq_handle = resp->srqn;
208 srq_resp.srqn = resp->srqn;
207 spin_lock_irqsave(&dev->srq_tbl_lock, flags); 209 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
208 dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; 210 dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
209 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); 211 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
210 212
211 /* Copy udata back. */ 213 /* Copy udata back. */
212 if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { 214 if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
213 dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); 215 dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
214 pvrdma_destroy_srq(&srq->ibsrq); 216 pvrdma_destroy_srq(&srq->ibsrq);
215 return ERR_PTR(-EINVAL); 217 return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 16b96616ef7e..a51463cd2f37 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
447 union pvrdma_cmd_resp rsp; 447 union pvrdma_cmd_resp rsp;
448 struct pvrdma_cmd_create_pd *cmd = &req.create_pd; 448 struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
449 struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; 449 struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
450 struct pvrdma_alloc_pd_resp pd_resp = {0};
450 int ret; 451 int ret;
451 void *ptr; 452 void *ptr;
452 453
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
475 pd->privileged = !context; 476 pd->privileged = !context;
476 pd->pd_handle = resp->pd_handle; 477 pd->pd_handle = resp->pd_handle;
477 pd->pdn = resp->pd_handle; 478 pd->pdn = resp->pd_handle;
479 pd_resp.pdn = resp->pd_handle;
478 480
479 if (context) { 481 if (context) {
480 if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { 482 if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
481 dev_warn(&dev->pdev->dev, 483 dev_warn(&dev->pdev->dev,
482 "failed to copy back protection domain\n"); 484 "failed to copy back protection domain\n");
483 pvrdma_dealloc_pd(&pd->ibpd); 485 pvrdma_dealloc_pd(&pd->ibpd);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 11f74cbe6660..ea302b054601 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
281{ 281{
282 struct ipoib_dev_priv *priv = ipoib_priv(dev); 282 struct ipoib_dev_priv *priv = ipoib_priv(dev);
283 283
284 WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
285 WARN_ONCE(!priv->path_dentry, "null path debug file\n");
286 debugfs_remove(priv->mcg_dentry); 284 debugfs_remove(priv->mcg_dentry);
287 debugfs_remove(priv->path_dentry); 285 debugfs_remove(priv->path_dentry);
288 priv->mcg_dentry = priv->path_dentry = NULL; 286 priv->mcg_dentry = priv->path_dentry = NULL;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 35a408d0ae4f..99bc9bd64b9e 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
205 * for example, an "address" value of 0x12345f000 will 205 * for example, an "address" value of 0x12345f000 will
206 * flush from 0x123440000 to 0x12347ffff (256KiB). */ 206 * flush from 0x123440000 to 0x12347ffff (256KiB). */
207 unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); 207 unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
208 unsigned long mask = __rounddown_pow_of_two(address ^ last);; 208 unsigned long mask = __rounddown_pow_of_two(address ^ last);
209 209
210 desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; 210 desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
211 } else { 211 } else {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 55cfb986225b..faf734ff4cf3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
339 goto out_unmap; 339 goto out_unmap;
340 } 340 }
341 341
342 pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
343 intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
344
345 return 0; 342 return 0;
346 343
347out_unmap: 344out_unmap:
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 983640eba418..8968e5e93fcb 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
318 } 318 }
319 } 319 }
320 320
321 pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
322 intc_name, data->map_base[0], data->num_parent_irqs);
323
324 return 0; 321 return 0;
325 322
326out_free_domain: 323out_free_domain:
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 691d20eb0bec..0e65f609352e 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
262 ct->chip.irq_set_wake = irq_gc_set_wake; 262 ct->chip.irq_set_wake = irq_gc_set_wake;
263 } 263 }
264 264
265 pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
266 base, parent_irq);
267
268 return 0; 265 return 0;
269 266
270out_free_domain: 267out_free_domain:
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 993a8426a453..1ff38aff9f29 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = {
94 94
95static struct msi_domain_info gicv2m_msi_domain_info = { 95static struct msi_domain_info gicv2m_msi_domain_info = {
96 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 96 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
97 MSI_FLAG_PCI_MSIX), 97 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
98 .chip = &gicv2m_msi_irq_chip, 98 .chip = &gicv2m_msi_irq_chip,
99}; 99};
100 100
@@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
155 return 0; 155 return 0;
156} 156}
157 157
158static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) 158static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
159 int nr_irqs)
159{ 160{
160 int pos;
161
162 pos = hwirq - v2m->spi_start;
163 if (pos < 0 || pos >= v2m->nr_spis) {
164 pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
165 return;
166 }
167
168 spin_lock(&v2m_lock); 161 spin_lock(&v2m_lock);
169 __clear_bit(pos, v2m->bm); 162 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
163 get_count_order(nr_irqs));
170 spin_unlock(&v2m_lock); 164 spin_unlock(&v2m_lock);
171} 165}
172 166
@@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
174 unsigned int nr_irqs, void *args) 168 unsigned int nr_irqs, void *args)
175{ 169{
176 struct v2m_data *v2m = NULL, *tmp; 170 struct v2m_data *v2m = NULL, *tmp;
177 int hwirq, offset, err = 0; 171 int hwirq, offset, i, err = 0;
178 172
179 spin_lock(&v2m_lock); 173 spin_lock(&v2m_lock);
180 list_for_each_entry(tmp, &v2m_nodes, entry) { 174 list_for_each_entry(tmp, &v2m_nodes, entry) {
181 offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); 175 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
182 if (offset < tmp->nr_spis) { 176 get_count_order(nr_irqs));
183 __set_bit(offset, tmp->bm); 177 if (offset >= 0) {
184 v2m = tmp; 178 v2m = tmp;
185 break; 179 break;
186 } 180 }
@@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
192 186
193 hwirq = v2m->spi_start + offset; 187 hwirq = v2m->spi_start + offset;
194 188
195 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); 189 for (i = 0; i < nr_irqs; i++) {
196 if (err) { 190 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
197 gicv2m_unalloc_msi(v2m, hwirq); 191 if (err)
198 return err; 192 goto fail;
199 }
200 193
201 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 194 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
202 &gicv2m_irq_chip, v2m); 195 &gicv2m_irq_chip, v2m);
196 }
203 197
204 return 0; 198 return 0;
199
200fail:
201 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
202 gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
203 return err;
205} 204}
206 205
207static void gicv2m_irq_domain_free(struct irq_domain *domain, 206static void gicv2m_irq_domain_free(struct irq_domain *domain,
@@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
210 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 209 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
211 struct v2m_data *v2m = irq_data_get_irq_chip_data(d); 210 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
212 211
213 BUG_ON(nr_irqs != 1); 212 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
214 gicv2m_unalloc_msi(v2m, d->hwirq);
215 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 213 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
216} 214}
217 215
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 13a5d9a1de96..4eca5c763766 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -71,6 +71,8 @@ static int __init its_fsl_mc_msi_init(void)
71 71
72 for (np = of_find_matching_node(NULL, its_device_id); np; 72 for (np = of_find_matching_node(NULL, its_device_id); np;
73 np = of_find_matching_node(np, its_device_id)) { 73 np = of_find_matching_node(np, its_device_id)) {
74 if (!of_device_is_available(np))
75 continue;
74 if (!of_property_read_bool(np, "msi-controller")) 76 if (!of_property_read_bool(np, "msi-controller"))
75 continue; 77 continue;
76 78
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 14a8c0a7e095..25a98de5cfb2 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)
132 132
133 for (np = of_find_matching_node(NULL, its_device_id); np; 133 for (np = of_find_matching_node(NULL, its_device_id); np;
134 np = of_find_matching_node(np, its_device_id)) { 134 np = of_find_matching_node(np, its_device_id)) {
135 if (!of_device_is_available(np))
136 continue;
135 if (!of_property_read_bool(np, "msi-controller")) 137 if (!of_property_read_bool(np, "msi-controller"))
136 continue; 138 continue;
137 139
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 833a90fe33ae..8881a053c173 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)
154 154
155 for (np = of_find_matching_node(NULL, its_device_id); np; 155 for (np = of_find_matching_node(NULL, its_device_id); np;
156 np = of_find_matching_node(np, its_device_id)) { 156 np = of_find_matching_node(np, its_device_id)) {
157 if (!of_device_is_available(np))
158 continue;
157 if (!of_property_read_bool(np, "msi-controller")) 159 if (!of_property_read_bool(np, "msi-controller"))
158 continue; 160 continue;
159 161
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 06f025fd5726..1d3056f53747 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3314,6 +3314,8 @@ static int __init its_of_probe(struct device_node *node)
3314 3314
3315 for (np = of_find_matching_node(node, its_device_id); np; 3315 for (np = of_find_matching_node(node, its_device_id); np;
3316 np = of_find_matching_node(np, its_device_id)) { 3316 np = of_find_matching_node(np, its_device_id)) {
3317 if (!of_device_is_available(np))
3318 continue;
3317 if (!of_property_read_bool(np, "msi-controller")) { 3319 if (!of_property_read_bool(np, "msi-controller")) {
3318 pr_warn("%pOF: no msi-controller property, ITS ignored\n", 3320 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3319 np); 3321 np);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index a57c0fbbd34a..d99cc07903ec 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
673 MPIDR_TO_SGI_RS(cluster_id) | 673 MPIDR_TO_SGI_RS(cluster_id) |
674 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 674 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
675 675
676 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 676 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
677 gic_write_sgi1r(val); 677 gic_write_sgi1r(val);
678} 678}
679 679
@@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
688 * Ensure that stores to Normal memory are visible to the 688 * Ensure that stores to Normal memory are visible to the
689 * other CPUs before issuing the IPI. 689 * other CPUs before issuing the IPI.
690 */ 690 */
691 smp_wmb(); 691 wmb();
692 692
693 for_each_cpu(cpu, mask) { 693 for_each_cpu(cpu, mask) {
694 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); 694 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index ef92a4d2038e..d32268cc1174 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
424 spin_lock_irqsave(&gic_lock, flags); 424 spin_lock_irqsave(&gic_lock, flags);
425 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 425 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
426 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 426 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
427 gic_clear_pcpu_masks(intr);
428 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
429 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 427 irq_data_update_effective_affinity(data, cpumask_of(cpu));
430 spin_unlock_irqrestore(&gic_lock, flags); 428 spin_unlock_irqrestore(&gic_lock, flags);
431 429
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 62f541f968f6..07074820a167 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
375 dev->ofdev.dev.of_node = np; 375 dev->ofdev.dev.of_node = np;
376 dev->ofdev.archdata.dma_mask = 0xffffffffUL; 376 dev->ofdev.archdata.dma_mask = 0xffffffffUL;
377 dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; 377 dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
378 dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;
378 dev->ofdev.dev.parent = parent; 379 dev->ofdev.dev.parent = parent;
379 dev->ofdev.dev.bus = &macio_bus_type; 380 dev->ofdev.dev.bus = &macio_bus_type;
380 dev->ofdev.dev.release = macio_release_dev; 381 dev->ofdev.dev.release = macio_release_dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d6de00f367ef..68136806d365 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
903 queue_io(md, bio); 903 queue_io(md, bio);
904 } else { 904 } else {
905 /* done with normal IO or empty flush */ 905 /* done with normal IO or empty flush */
906 bio->bi_status = io_error; 906 if (io_error)
907 bio->bi_status = io_error;
907 bio_endio(bio); 908 bio_endio(bio);
908 } 909 }
909 } 910 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b2eae332e1a2..f978eddc7a21 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
1108 1108
1109 bio_copy_data(behind_bio, bio); 1109 bio_copy_data(behind_bio, bio);
1110skip_copy: 1110skip_copy:
1111 r1_bio->behind_master_bio = behind_bio;; 1111 r1_bio->behind_master_bio = behind_bio;
1112 set_bit(R1BIO_BehindIO, &r1_bio->state); 1112 set_bit(R1BIO_BehindIO, &r1_bio->state);
1113 1113
1114 return; 1114 return;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 8d12017b9893..4470630dd545 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2687 __FILE__, __LINE__, iocnum); 2687 __FILE__, __LINE__, iocnum);
2688 return -ENODEV; 2688 return -ENODEV;
2689 } 2689 }
2690 if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
2691 return -EINVAL;
2690 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", 2692 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
2691 ioc->name)); 2693 ioc->name));
2692 2694
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 3e5eabdae8d9..772d02922529 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
548 goto out; 548 goto out;
549 } 549 }
550 550
551 if (bus->dev_state == MEI_DEV_POWER_DOWN) {
552 dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
553 err = 0;
554 goto out;
555 }
556
557 err = mei_cl_disconnect(cl); 551 err = mei_cl_disconnect(cl);
558 if (err < 0) 552 if (err < 0)
559 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 553 dev_err(bus->dev, "Could not disconnect from the ME client\n");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index be64969d986a..7e60c1817c31 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl)
945 return 0; 945 return 0;
946 } 946 }
947 947
948 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
949 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
950 mei_cl_set_disconnected(cl);
951 return 0;
952 }
953
948 rets = pm_runtime_get(dev->dev); 954 rets = pm_runtime_get(dev->dev);
949 if (rets < 0 && rets != -EINPROGRESS) { 955 if (rets < 0 && rets != -EINPROGRESS) {
950 pm_runtime_put_noidle(dev->dev); 956 pm_runtime_put_noidle(dev->dev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 0ccccbaf530d..e4b10b2d1a08 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -132,6 +132,11 @@
132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
133#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ 133#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
134 134
135#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
136#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
137#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
138#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
139
135/* 140/*
136 * MEI HW Section 141 * MEI HW Section
137 */ 142 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4a0ccda4d04b..ea4e152270a3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
100 100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)},
102 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
103 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
104 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
105
101 /* required last entry */ 106 /* required last entry */
102 {0, } 107 {0, }
103}; 108};
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index d9aa407db06a..337462e1569f 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
133 if (!rc) { 133 if (!rc) {
134 rc = copy_to_user((u64 __user *) args, &irq_offset, 134 rc = copy_to_user((u64 __user *) args, &irq_offset,
135 sizeof(irq_offset)); 135 sizeof(irq_offset));
136 if (rc) 136 if (rc) {
137 ocxl_afu_irq_free(ctx, irq_offset); 137 ocxl_afu_irq_free(ctx, irq_offset);
138 return -EFAULT;
139 }
138 } 140 }
139 break; 141 break;
140 142
@@ -277,7 +279,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
277 struct ocxl_context *ctx = file->private_data; 279 struct ocxl_context *ctx = file->private_data;
278 struct ocxl_kernel_event_header header; 280 struct ocxl_kernel_event_header header;
279 ssize_t rc; 281 ssize_t rc;
280 size_t used = 0; 282 ssize_t used = 0;
281 DEFINE_WAIT(event_wait); 283 DEFINE_WAIT(event_wait);
282 284
283 memset(&header, 0, sizeof(header)); 285 memset(&header, 0, sizeof(header));
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
329 331
330 used += sizeof(header); 332 used += sizeof(header);
331 333
332 rc = (ssize_t) used; 334 rc = used;
333 return rc; 335 return rc;
334} 336}
335 337
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 229dc18f0581..768972af8b85 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
1265 char pio_limit_string[20]; 1265 char pio_limit_string[20];
1266 int ret; 1266 int ret;
1267 1267
1268 mmc->f_max = host->max_clk; 1268 if (!mmc->f_max || mmc->f_max > host->max_clk)
1269 mmc->f_max = host->max_clk;
1269 mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; 1270 mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1270 1271
1271 mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); 1272 mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 22438ebfe4e6..4f972b879fe6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
717static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 717static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
718{ 718{
719 struct meson_host *host = mmc_priv(mmc); 719 struct meson_host *host = mmc_priv(mmc);
720 int ret;
721
722 /*
723 * If this is the initial tuning, try to get a sane Rx starting
724 * phase before doing the actual tuning.
725 */
726 if (!mmc->doing_retune) {
727 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
728
729 if (ret)
730 return ret;
731 }
732
733 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
734 if (ret)
735 return ret;
736 720
737 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 721 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
738} 722}
@@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
763 if (!IS_ERR(mmc->supply.vmmc)) 747 if (!IS_ERR(mmc->supply.vmmc))
764 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 748 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
765 749
766 /* Reset phases */ 750 /* Reset rx phase */
767 clk_set_phase(host->rx_clk, 0); 751 clk_set_phase(host->rx_clk, 0);
768 clk_set_phase(host->tx_clk, 270);
769 752
770 break; 753 break;
771 754
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index e6b8c59f2c0d..736ac887303c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -328,7 +328,7 @@ config MTD_NAND_MARVELL
328 tristate "NAND controller support on Marvell boards" 328 tristate "NAND controller support on Marvell boards"
329 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ 329 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
330 COMPILE_TEST 330 COMPILE_TEST
331 depends on HAS_IOMEM 331 depends on HAS_IOMEM && HAS_DMA
332 help 332 help
333 This enables the NAND flash controller driver for Marvell boards, 333 This enables the NAND flash controller driver for Marvell boards,
334 including: 334 including:
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 80d31a58e558..f367144f3c6f 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
752 if (mtd->oobsize > 64) 752 if (mtd->oobsize > 64)
753 mtd->oobsize = 64; 753 mtd->oobsize = 64;
754 754
755 /* 755 /* Use default large page ECC layout defined in NAND core */
756 * mtd->ecclayout is not specified here because we're using the 756 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
757 * default large page ECC layout defined in NAND core.
758 */
759 if (chip->ecc.strength == 32) { 757 if (chip->ecc.strength == 32) {
760 nfc->ecc_mode = ECC_60_BYTE; 758 nfc->ecc_mode = ECC_60_BYTE;
761 chip->ecc.bytes = 60; 759 chip->ecc.bytes = 60;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 3e5833cf1fab..eb23f9ba1a9a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
426 struct net_device *netdev = pdata->netdev; 426 struct net_device *netdev = pdata->netdev;
427 int ret = 0; 427 int ret = 0;
428 428
429 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
430
429 pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; 431 pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
430 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); 432 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
431 433
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 22889fc158f2..87c4308b52a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,
226 goto err_ioremap; 226 goto err_ioremap;
227 227
228 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); 228 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
229 if (!self->aq_hw) {
230 err = -ENOMEM;
231 goto err_ioremap;
232 }
229 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); 233 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
230 234
231 for (bar = 0; bar < 4; ++bar) { 235 for (bar = 0; bar < 4; ++bar) {
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
235 mmio_pa = pci_resource_start(pdev, bar); 239 mmio_pa = pci_resource_start(pdev, bar);
236 if (mmio_pa == 0U) { 240 if (mmio_pa == 0U) {
237 err = -EIO; 241 err = -EIO;
238 goto err_ioremap; 242 goto err_free_aq_hw;
239 } 243 }
240 244
241 reg_sz = pci_resource_len(pdev, bar); 245 reg_sz = pci_resource_len(pdev, bar);
242 if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { 246 if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
243 err = -EIO; 247 err = -EIO;
244 goto err_ioremap; 248 goto err_free_aq_hw;
245 } 249 }
246 250
247 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); 251 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
248 if (!self->aq_hw->mmio) { 252 if (!self->aq_hw->mmio) {
249 err = -EIO; 253 err = -EIO;
250 goto err_ioremap; 254 goto err_free_aq_hw;
251 } 255 }
252 break; 256 break;
253 } 257 }
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
255 259
256 if (bar == 4) { 260 if (bar == 4) {
257 err = -EIO; 261 err = -EIO;
258 goto err_ioremap; 262 goto err_free_aq_hw;
259 } 263 }
260 264
261 numvecs = min((u8)AQ_CFG_VECS_DEF, 265 numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -290,6 +294,8 @@ err_register:
290 aq_pci_free_irq_vectors(self); 294 aq_pci_free_irq_vectors(self);
291err_hwinit: 295err_hwinit:
292 iounmap(self->aq_hw->mmio); 296 iounmap(self->aq_hw->mmio);
297err_free_aq_hw:
298 kfree(self->aq_hw);
293err_ioremap: 299err_ioremap:
294 free_netdev(ndev); 300 free_netdev(ndev);
295err_pci_func: 301err_pci_func:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a77ee2f8fb8d..c1841db1b500 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
820 820
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 822
823 udelay(10); 823 usleep_range(10, 20);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 } 825 }
826 826
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)
922 if (!(apedata & APE_FW_STATUS_READY)) 922 if (!(apedata & APE_FW_STATUS_READY))
923 return -EAGAIN; 923 return -EAGAIN;
924 924
925 /* Wait for up to 1 millisecond for APE to service previous event. */ 925 /* Wait for up to 20 millisecond for APE to service previous event. */
926 err = tg3_ape_event_lock(tp, 1000); 926 err = tg3_ape_event_lock(tp, 20000);
927 if (err) 927 if (err)
928 return err; 928 return err;
929 929
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
946 946
947 switch (kind) { 947 switch (kind) {
948 case RESET_KIND_INIT: 948 case RESET_KIND_INIT:
949 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 APE_HOST_SEG_SIG_MAGIC); 951 APE_HOST_SEG_SIG_MAGIC);
951 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
962 event = APE_EVENT_STATUS_STATE_START; 963 event = APE_EVENT_STATUS_STATE_START;
963 break; 964 break;
964 case RESET_KIND_SHUTDOWN: 965 case RESET_KIND_SHUTDOWN:
965 /* With the interface we are currently using,
966 * APE does not track driver state. Wiping
967 * out the HOST SEGMENT SIGNATURE forces
968 * the APE to assume OS absent status.
969 */
970 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
971
972 if (device_may_wakeup(&tp->pdev->dev) && 966 if (device_may_wakeup(&tp->pdev->dev) &&
973 tg3_flag(tp, WOL_ENABLE)) { 967 tg3_flag(tp, WOL_ENABLE)) {
974 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 968 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
990 tg3_ape_send_event(tp, event); 984 tg3_ape_send_event(tp, event);
991} 985}
992 986
987static void tg3_send_ape_heartbeat(struct tg3 *tp,
988 unsigned long interval)
989{
990 /* Check if hb interval has exceeded */
991 if (!tg3_flag(tp, ENABLE_APE) ||
992 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 return;
994
995 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996 tp->ape_hb_jiffies = jiffies;
997}
998
993static void tg3_disable_ints(struct tg3 *tp) 999static void tg3_disable_ints(struct tg3 *tp)
994{ 1000{
995 int i; 1001 int i;
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
7262 } 7268 }
7263 } 7269 }
7264 7270
7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7265 return work_done; 7272 return work_done;
7266 7273
7267tx_recovery: 7274tx_recovery:
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
7344 } 7351 }
7345 } 7352 }
7346 7353
7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7347 return work_done; 7355 return work_done;
7348 7356
7349tx_recovery: 7357tx_recovery:
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10732 if (tg3_flag(tp, ENABLE_APE)) 10740 if (tg3_flag(tp, ENABLE_APE))
10733 /* Write our heartbeat update interval to APE. */ 10741 /* Write our heartbeat update interval to APE. */
10734 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10742 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10735 APE_HOST_HEARTBEAT_INT_DISABLE); 10743 APE_HOST_HEARTBEAT_INT_5SEC);
10736 10744
10737 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10745 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10738 10746
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)
11077 tp->asf_counter = tp->asf_multiplier; 11085 tp->asf_counter = tp->asf_multiplier;
11078 } 11086 }
11079 11087
11088 /* Update the APE heartbeat every 5 seconds.*/
11089 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11090
11080 spin_unlock(&tp->lock); 11091 spin_unlock(&tp->lock);
11081 11092
11082restart_timer: 11093restart_timer:
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16653 pci_state_reg); 16664 pci_state_reg);
16654 16665
16655 tg3_ape_lock_init(tp); 16666 tg3_ape_lock_init(tp);
16667 tp->ape_hb_interval =
16668 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16656 } 16669 }
16657 16670
16658 /* Set up tp->grc_local_ctrl before calling 16671 /* Set up tp->grc_local_ctrl before calling
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 47f51cc0566d..1d61aa3efda1 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2508,6 +2508,7 @@
2508#define TG3_APE_LOCK_PHY3 5 2508#define TG3_APE_LOCK_PHY3 5
2509#define TG3_APE_LOCK_GPIO 7 2509#define TG3_APE_LOCK_GPIO 7
2510 2510
2511#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval)
2511#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2512#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
2512 2513
2513 2514
@@ -3423,6 +3424,10 @@ struct tg3 {
3423 struct device *hwmon_dev; 3424 struct device *hwmon_dev;
3424 bool link_up; 3425 bool link_up;
3425 bool pcierr_recovery; 3426 bool pcierr_recovery;
3427
3428 u32 ape_hb;
3429 unsigned long ape_hb_interval;
3430 unsigned long ape_hb_jiffies;
3426}; 3431};
3427 3432
3428/* Accessor macros for chip and asic attributes 3433/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index c87c9c684a33..d59497a7bdce 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);
75 75
76void cavium_ptp_put(struct cavium_ptp *ptp) 76void cavium_ptp_put(struct cavium_ptp *ptp)
77{ 77{
78 if (!ptp)
79 return;
78 pci_dev_put(ptp->pdev); 80 pci_dev_put(ptp->pdev);
79} 81}
80EXPORT_SYMBOL(cavium_ptp_put); 82EXPORT_SYMBOL(cavium_ptp_put);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b68cde9f17d2..7d9c5ffbd041 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);
67MODULE_PARM_DESC(cpi_alg, 67MODULE_PARM_DESC(cpi_alg,
68 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 68 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
69 69
70struct nicvf_xdp_tx {
71 u64 dma_addr;
72 u8 qidx;
73};
74
75static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 70static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
76{ 71{
77 if (nic->sqs_mode) 72 if (nic->sqs_mode)
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)
507 return 0; 502 return 0;
508} 503}
509 504
510static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
511{
512 /* Check if it's a recycled page, if not unmap the DMA mapping.
513 * Recycled page holds an extra reference.
514 */
515 if (page_ref_count(page) == 1) {
516 dma_addr &= PAGE_MASK;
517 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
518 RCV_FRAG_LEN + XDP_HEADROOM,
519 DMA_FROM_DEVICE,
520 DMA_ATTR_SKIP_CPU_SYNC);
521 }
522}
523
524static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, 505static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
525 struct cqe_rx_t *cqe_rx, struct snd_queue *sq, 506 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
526 struct rcv_queue *rq, struct sk_buff **skb) 507 struct rcv_queue *rq, struct sk_buff **skb)
527{ 508{
528 struct xdp_buff xdp; 509 struct xdp_buff xdp;
529 struct page *page; 510 struct page *page;
530 struct nicvf_xdp_tx *xdp_tx = NULL;
531 u32 action; 511 u32 action;
532 u16 len, err, offset = 0; 512 u16 len, offset = 0;
533 u64 dma_addr, cpu_addr; 513 u64 dma_addr, cpu_addr;
534 void *orig_data; 514 void *orig_data;
535 515
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
543 cpu_addr = (u64)phys_to_virt(cpu_addr); 523 cpu_addr = (u64)phys_to_virt(cpu_addr);
544 page = virt_to_page((void *)cpu_addr); 524 page = virt_to_page((void *)cpu_addr);
545 525
546 xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; 526 xdp.data_hard_start = page_address(page);
547 xdp.data = (void *)cpu_addr; 527 xdp.data = (void *)cpu_addr;
548 xdp_set_data_meta_invalid(&xdp); 528 xdp_set_data_meta_invalid(&xdp);
549 xdp.data_end = xdp.data + len; 529 xdp.data_end = xdp.data + len;
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
563 543
564 switch (action) { 544 switch (action) {
565 case XDP_PASS: 545 case XDP_PASS:
566 nicvf_unmap_page(nic, page, dma_addr); 546 /* Check if it's a recycled page, if not
547 * unmap the DMA mapping.
548 *
549 * Recycled page holds an extra reference.
550 */
551 if (page_ref_count(page) == 1) {
552 dma_addr &= PAGE_MASK;
553 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
554 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
555 DMA_FROM_DEVICE,
556 DMA_ATTR_SKIP_CPU_SYNC);
557 }
567 558
568 /* Build SKB and pass on packet to network stack */ 559 /* Build SKB and pass on packet to network stack */
569 *skb = build_skb(xdp.data, 560 *skb = build_skb(xdp.data,
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
576 case XDP_TX: 567 case XDP_TX:
577 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); 568 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
578 return true; 569 return true;
579 case XDP_REDIRECT:
580 /* Save DMA address for use while transmitting */
581 xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
582 xdp_tx->dma_addr = dma_addr;
583 xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
584
585 err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
586 if (!err)
587 return true;
588
589 /* Free the page on error */
590 nicvf_unmap_page(nic, page, dma_addr);
591 put_page(page);
592 break;
593 default: 570 default:
594 bpf_warn_invalid_xdp_action(action); 571 bpf_warn_invalid_xdp_action(action);
595 /* fall through */ 572 /* fall through */
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
597 trace_xdp_exception(nic->netdev, prog, action); 574 trace_xdp_exception(nic->netdev, prog, action);
598 /* fall through */ 575 /* fall through */
599 case XDP_DROP: 576 case XDP_DROP:
600 nicvf_unmap_page(nic, page, dma_addr); 577 /* Check if it's a recycled page, if not
578 * unmap the DMA mapping.
579 *
580 * Recycled page holds an extra reference.
581 */
582 if (page_ref_count(page) == 1) {
583 dma_addr &= PAGE_MASK;
584 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
585 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
586 DMA_FROM_DEVICE,
587 DMA_ATTR_SKIP_CPU_SYNC);
588 }
601 put_page(page); 589 put_page(page);
602 return true; 590 return true;
603 } 591 }
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1864 } 1852 }
1865} 1853}
1866 1854
1867static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
1868{
1869 struct nicvf *nic = netdev_priv(netdev);
1870 struct nicvf *snic = nic;
1871 struct nicvf_xdp_tx *xdp_tx;
1872 struct snd_queue *sq;
1873 struct page *page;
1874 int err, qidx;
1875
1876 if (!netif_running(netdev) || !nic->xdp_prog)
1877 return -EINVAL;
1878
1879 page = virt_to_page(xdp->data);
1880 xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
1881 qidx = xdp_tx->qidx;
1882
1883 if (xdp_tx->qidx >= nic->xdp_tx_queues)
1884 return -EINVAL;
1885
1886 /* Get secondary Qset's info */
1887 if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
1888 qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
1889 snic = (struct nicvf *)nic->snicvf[qidx - 1];
1890 if (!snic)
1891 return -EINVAL;
1892 qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
1893 }
1894
1895 sq = &snic->qs->sq[qidx];
1896 err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
1897 xdp_tx->dma_addr,
1898 xdp->data_end - xdp->data);
1899 if (err)
1900 return -ENOMEM;
1901
1902 nicvf_xdp_sq_doorbell(snic, sq, qidx);
1903 return 0;
1904}
1905
1906static void nicvf_xdp_flush(struct net_device *dev)
1907{
1908 return;
1909}
1910
1911static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) 1855static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1912{ 1856{
1913 struct hwtstamp_config config; 1857 struct hwtstamp_config config;
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {
1986 .ndo_fix_features = nicvf_fix_features, 1930 .ndo_fix_features = nicvf_fix_features,
1987 .ndo_set_features = nicvf_set_features, 1931 .ndo_set_features = nicvf_set_features,
1988 .ndo_bpf = nicvf_xdp, 1932 .ndo_bpf = nicvf_xdp,
1989 .ndo_xdp_xmit = nicvf_xdp_xmit,
1990 .ndo_xdp_flush = nicvf_xdp_flush,
1991 .ndo_do_ioctl = nicvf_ioctl, 1933 .ndo_do_ioctl = nicvf_ioctl,
1992}; 1934};
1993 1935
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 3eae9ff9b53a..d42704d07484 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
204 204
205 /* Reserve space for header modifications by BPF program */ 205 /* Reserve space for header modifications by BPF program */
206 if (rbdr->is_xdp) 206 if (rbdr->is_xdp)
207 buf_len += XDP_HEADROOM; 207 buf_len += XDP_PACKET_HEADROOM;
208 208
209 /* Check if it's recycled */ 209 /* Check if it's recycled */
210 if (pgcache) 210 if (pgcache)
@@ -224,9 +224,8 @@ ret:
224 nic->rb_page = NULL; 224 nic->rb_page = NULL;
225 return -ENOMEM; 225 return -ENOMEM;
226 } 226 }
227
228 if (pgcache) 227 if (pgcache)
229 pgcache->dma_addr = *rbuf + XDP_HEADROOM; 228 pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
230 nic->rb_page_offset += buf_len; 229 nic->rb_page_offset += buf_len;
231 } 230 }
232 231
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1244 int qentry; 1243 int qentry;
1245 1244
1246 if (subdesc_cnt > sq->xdp_free_cnt) 1245 if (subdesc_cnt > sq->xdp_free_cnt)
1247 return -1; 1246 return 0;
1248 1247
1249 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1248 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1250 1249
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1255 1254
1256 sq->xdp_desc_cnt += subdesc_cnt; 1255 sq->xdp_desc_cnt += subdesc_cnt;
1257 1256
1258 return 0; 1257 return 1;
1259} 1258}
1260 1259
1261/* Calculate no of SQ subdescriptors needed to transmit all 1260/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1656 if (page_ref_count(page) != 1) 1655 if (page_ref_count(page) != 1)
1657 return; 1656 return;
1658 1657
1659 len += XDP_HEADROOM; 1658 len += XDP_PACKET_HEADROOM;
1660 /* Receive buffers in XDP mode are mapped from page start */ 1659 /* Receive buffers in XDP mode are mapped from page start */
1661 dma_addr &= PAGE_MASK; 1660 dma_addr &= PAGE_MASK;
1662 } 1661 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index ce1eed7a6d63..5e9a03cf1b4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,7 +11,6 @@
11 11
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/iommu.h> 13#include <linux/iommu.h>
14#include <linux/bpf.h>
15#include <net/xdp.h> 14#include <net/xdp.h>
16#include "q_struct.h" 15#include "q_struct.h"
17 16
@@ -94,9 +93,6 @@
94#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ 93#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
95 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 94 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
96 95
97#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
98#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
99
100#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 96#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
101 MAX_CQE_PER_PKT_XMIT) 97 MAX_CQE_PER_PKT_XMIT)
102 98
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 557fd8bfd54e..00a1d2d13169 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
472 472
473 if (is_t6(padap->params.chip)) { 473 if (is_t6(padap->params.chip)) {
474 size = padap->params.cim_la_size / 10 + 1; 474 size = padap->params.cim_la_size / 10 + 1;
475 size *= 11 * sizeof(u32); 475 size *= 10 * sizeof(u32);
476 } else { 476 } else {
477 size = padap->params.cim_la_size / 8; 477 size = padap->params.cim_la_size / 8;
478 size *= 8 * sizeof(u32); 478 size *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 30485f9a598f..143686c60234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
102 case CUDBG_CIM_LA: 102 case CUDBG_CIM_LA:
103 if (is_t6(adap->params.chip)) { 103 if (is_t6(adap->params.chip)) {
104 len = adap->params.cim_la_size / 10 + 1; 104 len = adap->params.cim_la_size / 10 + 1;
105 len *= 11 * sizeof(u32); 105 len *= 10 * sizeof(u32);
106 } else { 106 } else {
107 len = adap->params.cim_la_size / 8; 107 len = adap->params.cim_la_size / 8;
108 len *= 8 * sizeof(u32); 108 len *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 56bc626ef006..7b452e85de2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4982 4982
4983 pcie_fw = readl(adap->regs + PCIE_FW_A); 4983 pcie_fw = readl(adap->regs + PCIE_FW_A);
4984 /* Check if cxgb4 is the MASTER and fw is initialized */ 4984 /* Check if cxgb4 is the MASTER and fw is initialized */
4985 if (!(pcie_fw & PCIE_FW_INIT_F) || 4985 if (num_vfs &&
4986 (!(pcie_fw & PCIE_FW_INIT_F) ||
4986 !(pcie_fw & PCIE_FW_MASTER_VLD_F) || 4987 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4987 PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { 4988 PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
4988 dev_warn(&pdev->dev, 4989 dev_warn(&pdev->dev,
4989 "cxgb4 driver needs to be MASTER to support SRIOV\n"); 4990 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4990 return -EOPNOTSUPP; 4991 return -EOPNOTSUPP;
@@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev)
5599#if IS_ENABLED(CONFIG_IPV6) 5600#if IS_ENABLED(CONFIG_IPV6)
5600 t4_cleanup_clip_tbl(adapter); 5601 t4_cleanup_clip_tbl(adapter);
5601#endif 5602#endif
5602 iounmap(adapter->regs);
5603 if (!is_t4(adapter->params.chip)) 5603 if (!is_t4(adapter->params.chip))
5604 iounmap(adapter->bar2); 5604 iounmap(adapter->bar2);
5605 pci_disable_pcie_error_reporting(pdev);
5606 if ((adapter->flags & DEV_ENABLED)) {
5607 pci_disable_device(pdev);
5608 adapter->flags &= ~DEV_ENABLED;
5609 }
5610 pci_release_regions(pdev);
5611 kfree(adapter->mbox_log);
5612 synchronize_rcu();
5613 kfree(adapter);
5614 } 5605 }
5615#ifdef CONFIG_PCI_IOV 5606#ifdef CONFIG_PCI_IOV
5616 else { 5607 else {
5617 cxgb4_iov_configure(adapter->pdev, 0); 5608 cxgb4_iov_configure(adapter->pdev, 0);
5618 } 5609 }
5619#endif 5610#endif
5611 iounmap(adapter->regs);
5612 pci_disable_pcie_error_reporting(pdev);
5613 if ((adapter->flags & DEV_ENABLED)) {
5614 pci_disable_device(pdev);
5615 adapter->flags &= ~DEV_ENABLED;
5616 }
5617 pci_release_regions(pdev);
5618 kfree(adapter->mbox_log);
5619 synchronize_rcu();
5620 kfree(adapter);
5620} 5621}
5621 5622
5622/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt 5623/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 047609ef0515..920bccd6bc40 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
2637} 2637}
2638 2638
2639#define EEPROM_STAT_ADDR 0x7bfc 2639#define EEPROM_STAT_ADDR 0x7bfc
2640#define VPD_SIZE 0x800
2641#define VPD_BASE 0x400 2640#define VPD_BASE 0x400
2642#define VPD_BASE_OLD 0 2641#define VPD_BASE_OLD 0
2643#define VPD_LEN 1024 2642#define VPD_LEN 1024
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2704 if (!vpd) 2703 if (!vpd)
2705 return -ENOMEM; 2704 return -ENOMEM;
2706 2705
2707 /* We have two VPD data structures stored in the adapter VPD area.
2708 * By default, Linux calculates the size of the VPD area by traversing
2709 * the first VPD area at offset 0x0, so we need to tell the OS what
2710 * our real VPD size is.
2711 */
2712 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2713 if (ret < 0)
2714 goto out;
2715
2716 /* Card information normally starts at VPD_BASE but early cards had 2706 /* Card information normally starts at VPD_BASE but early cards had
2717 * it at 0. 2707 * it at 0.
2718 */ 2708 */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3bdeb295514b..f5c87bd35fa1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2934{ 2934{
2935 int size = lstatus & BD_LENGTH_MASK; 2935 int size = lstatus & BD_LENGTH_MASK;
2936 struct page *page = rxb->page; 2936 struct page *page = rxb->page;
2937 bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
2938
2939 /* Remove the FCS from the packet length */
2940 if (last)
2941 size -= ETH_FCS_LEN;
2942 2937
2943 if (likely(first)) { 2938 if (likely(first)) {
2944 skb_put(skb, size); 2939 skb_put(skb, size);
2945 } else { 2940 } else {
2946 /* the last fragments' length contains the full frame length */ 2941 /* the last fragments' length contains the full frame length */
2947 if (last) 2942 if (lstatus & BD_LFLAG(RXBD_LAST))
2948 size -= skb->len; 2943 size -= skb->len;
2949 2944
2950 /* Add the last fragment if it contains something other than 2945 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2951 * the FCS, otherwise drop it and trim off any part of the FCS 2946 rxb->page_offset + RXBUF_ALIGNMENT,
2952 * that was already received. 2947 size, GFAR_RXB_TRUESIZE);
2953 */
2954 if (size > 0)
2955 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2956 rxb->page_offset + RXBUF_ALIGNMENT,
2957 size, GFAR_RXB_TRUESIZE);
2958 else if (size < 0)
2959 pskb_trim(skb, skb->len + size);
2960 } 2948 }
2961 2949
2962 /* try reuse page */ 2950 /* try reuse page */
@@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3069 if (priv->padding) 3057 if (priv->padding)
3070 skb_pull(skb, priv->padding); 3058 skb_pull(skb, priv->padding);
3071 3059
3060 /* Trim off the FCS */
3061 pskb_trim(skb, skb->len - ETH_FCS_LEN);
3062
3072 if (ndev->features & NETIF_F_RXCSUM) 3063 if (ndev->features & NETIF_F_RXCSUM)
3073 gfar_rx_checksum(skb, fcb); 3064 gfar_rx_checksum(skb, fcb);
3074 3065
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 27447260215d..1b3cc8bb0705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)
791 return 0; 791 return 0;
792} 792}
793 793
794static void release_login_buffer(struct ibmvnic_adapter *adapter)
795{
796 kfree(adapter->login_buf);
797 adapter->login_buf = NULL;
798}
799
800static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
801{
802 kfree(adapter->login_rsp_buf);
803 adapter->login_rsp_buf = NULL;
804}
805
794static void release_resources(struct ibmvnic_adapter *adapter) 806static void release_resources(struct ibmvnic_adapter *adapter)
795{ 807{
796 int i; 808 int i;
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)
813 } 825 }
814 } 826 }
815 } 827 }
828 kfree(adapter->napi);
829 adapter->napi = NULL;
830
831 release_login_rsp_buffer(adapter);
816} 832}
817 833
818static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 834static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)
1057 return rc; 1073 return rc;
1058} 1074}
1059 1075
1076static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1077{
1078 struct ibmvnic_rx_pool *rx_pool;
1079 u64 rx_entries;
1080 int rx_scrqs;
1081 int i, j;
1082
1083 if (!adapter->rx_pool)
1084 return;
1085
1086 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1087 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1088
1089 /* Free any remaining skbs in the rx buffer pools */
1090 for (i = 0; i < rx_scrqs; i++) {
1091 rx_pool = &adapter->rx_pool[i];
1092 if (!rx_pool)
1093 continue;
1094
1095 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1096 for (j = 0; j < rx_entries; j++) {
1097 if (rx_pool->rx_buff[j].skb) {
1098 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
1099 rx_pool->rx_buff[j].skb = NULL;
1100 }
1101 }
1102 }
1103}
1104
1060static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1105static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1061{ 1106{
1062 struct ibmvnic_tx_pool *tx_pool; 1107 struct ibmvnic_tx_pool *tx_pool;
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)
1134 } 1179 }
1135 } 1180 }
1136 } 1181 }
1137 1182 clean_rx_pools(adapter);
1138 clean_tx_pools(adapter); 1183 clean_tx_pools(adapter);
1139 adapter->state = VNIC_CLOSED; 1184 adapter->state = VNIC_CLOSED;
1140 return rc; 1185 return rc;
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1670 return 0; 1715 return 0;
1671 } 1716 }
1672 1717
1673 netif_carrier_on(netdev);
1674
1675 /* kick napi */ 1718 /* kick napi */
1676 for (i = 0; i < adapter->req_rx_queues; i++) 1719 for (i = 0; i < adapter->req_rx_queues; i++)
1677 napi_schedule(&adapter->napi[i]); 1720 napi_schedule(&adapter->napi[i]);
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1679 if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1722 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1680 netdev_notify_peers(netdev); 1723 netdev_notify_peers(netdev);
1681 1724
1725 netif_carrier_on(netdev);
1726
1682 return 0; 1727 return 0;
1683} 1728}
1684 1729
@@ -1853,6 +1898,12 @@ restart_poll:
1853 be16_to_cpu(next->rx_comp.rc)); 1898 be16_to_cpu(next->rx_comp.rc));
1854 /* free the entry */ 1899 /* free the entry */
1855 next->rx_comp.first = 0; 1900 next->rx_comp.first = 0;
1901 dev_kfree_skb_any(rx_buff->skb);
1902 remove_buff_from_pool(adapter, rx_buff);
1903 continue;
1904 } else if (!rx_buff->skb) {
1905 /* free the entry */
1906 next->rx_comp.first = 0;
1856 remove_buff_from_pool(adapter, rx_buff); 1907 remove_buff_from_pool(adapter, rx_buff);
1857 continue; 1908 continue;
1858 } 1909 }
@@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
3013 struct vnic_login_client_data *vlcd; 3064 struct vnic_login_client_data *vlcd;
3014 int i; 3065 int i;
3015 3066
3067 release_login_rsp_buffer(adapter);
3016 client_data_len = vnic_client_data_len(adapter); 3068 client_data_len = vnic_client_data_len(adapter);
3017 3069
3018 buffer_size = 3070 buffer_size =
@@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3738 ibmvnic_remove(adapter->vdev); 3790 ibmvnic_remove(adapter->vdev);
3739 return -EIO; 3791 return -EIO;
3740 } 3792 }
3793 release_login_buffer(adapter);
3741 complete(&adapter->init_done); 3794 complete(&adapter->init_done);
3742 3795
3743 return 0; 3796 return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a1d7b88cf083..5a1668cdb461 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7137 int id = port->id; 7137 int id = port->id;
7138 bool allmulti = dev->flags & IFF_ALLMULTI; 7138 bool allmulti = dev->flags & IFF_ALLMULTI;
7139 7139
7140retry:
7140 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); 7141 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
7141 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); 7142 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
7142 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); 7143 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7144 /* Remove all port->id's mcast enries */ 7145 /* Remove all port->id's mcast enries */
7145 mvpp2_prs_mcast_del_all(priv, id); 7146 mvpp2_prs_mcast_del_all(priv, id);
7146 7147
7147 if (allmulti && !netdev_mc_empty(dev)) { 7148 if (!allmulti) {
7148 netdev_for_each_mc_addr(ha, dev) 7149 netdev_for_each_mc_addr(ha, dev) {
7149 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); 7150 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
7151 allmulti = true;
7152 goto retry;
7153 }
7154 }
7150 } 7155 }
7151} 7156}
7152 7157
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 0be4575b58a2..fd509160c8f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
96 "%pI4"); 96 "%pI4");
97 } else if (ethertype.v == ETH_P_IPV6) { 97 } else if (ethertype.v == ETH_P_IPV6) {
98 static const struct in6_addr full_ones = { 98 static const struct in6_addr full_ones = {
99 .in6_u.u6_addr32 = {htonl(0xffffffff), 99 .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
100 htonl(0xffffffff), 100 __constant_htonl(0xffffffff),
101 htonl(0xffffffff), 101 __constant_htonl(0xffffffff),
102 htonl(0xffffffff)}, 102 __constant_htonl(0xffffffff)},
103 }; 103 };
104 DECLARE_MASK_VAL(struct in6_addr, src_ipv6); 104 DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
105 DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); 105 DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47bab842c5ee..da94c8cba5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1768 param->wq.linear = 1; 1768 param->wq.linear = 1;
1769} 1769}
1770 1770
1771static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) 1771static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1772 struct mlx5e_rq_param *param)
1772{ 1773{
1773 void *rqc = param->rqc; 1774 void *rqc = param->rqc;
1774 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 1775 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1775 1776
1776 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); 1777 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1777 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); 1778 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1779
1780 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1778} 1781}
1779 1782
1780static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, 1783static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2634 struct mlx5e_cq *cq, 2637 struct mlx5e_cq *cq,
2635 struct mlx5e_cq_param *param) 2638 struct mlx5e_cq_param *param)
2636{ 2639{
2640 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2641 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2642
2637 return mlx5e_alloc_cq_common(mdev, param, cq); 2643 return mlx5e_alloc_cq_common(mdev, param, cq);
2638} 2644}
2639 2645
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2645 struct mlx5e_cq *cq = &drop_rq->cq; 2651 struct mlx5e_cq *cq = &drop_rq->cq;
2646 int err; 2652 int err;
2647 2653
2648 mlx5e_build_drop_rq_param(&rq_param); 2654 mlx5e_build_drop_rq_param(mdev, &rq_param);
2649 2655
2650 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); 2656 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2651 if (err) 2657 if (err)
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
2994} 3000}
2995#endif 3001#endif
2996 3002
2997int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, 3003static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
2998 void *type_data) 3004 void *type_data)
2999{ 3005{
3000 switch (type) { 3006 switch (type) {
3001#ifdef CONFIG_MLX5_ESWITCH 3007#ifdef CONFIG_MLX5_ESWITCH
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0d4bb0688faa..e5c3ab46a24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
36#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/bpf_trace.h> 37#include <linux/bpf_trace.h>
38#include <net/busy_poll.h> 38#include <net/busy_poll.h>
39#include <net/ip6_checksum.h>
39#include "en.h" 40#include "en.h"
40#include "en_tc.h" 41#include "en_tc.h"
41#include "eswitch.h" 42#include "eswitch.h"
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
546 return true; 547 return true;
547} 548}
548 549
550static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
551{
552 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
553 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
554 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
555
556 tcp->check = 0;
557 tcp->psh = get_cqe_lro_tcppsh(cqe);
558
559 if (tcp_ack) {
560 tcp->ack = 1;
561 tcp->ack_seq = cqe->lro_ack_seq_num;
562 tcp->window = cqe->lro_tcp_win;
563 }
564}
565
549static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 566static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
550 u32 cqe_bcnt) 567 u32 cqe_bcnt)
551{ 568{
552 struct ethhdr *eth = (struct ethhdr *)(skb->data); 569 struct ethhdr *eth = (struct ethhdr *)(skb->data);
553 struct tcphdr *tcp; 570 struct tcphdr *tcp;
554 int network_depth = 0; 571 int network_depth = 0;
572 __wsum check;
555 __be16 proto; 573 __be16 proto;
556 u16 tot_len; 574 u16 tot_len;
557 void *ip_p; 575 void *ip_p;
558 576
559 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
560 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
561 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
562
563 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); 577 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
564 578
565 tot_len = cqe_bcnt - network_depth; 579 tot_len = cqe_bcnt - network_depth;
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
576 ipv4->check = 0; 590 ipv4->check = 0;
577 ipv4->check = ip_fast_csum((unsigned char *)ipv4, 591 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
578 ipv4->ihl); 592 ipv4->ihl);
593
594 mlx5e_lro_update_tcp_hdr(cqe, tcp);
595 check = csum_partial(tcp, tcp->doff * 4,
596 csum_unfold((__force __sum16)cqe->check_sum));
597 /* Almost done, don't forget the pseudo header */
598 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
599 tot_len - sizeof(struct iphdr),
600 IPPROTO_TCP, check);
579 } else { 601 } else {
602 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
580 struct ipv6hdr *ipv6 = ip_p; 603 struct ipv6hdr *ipv6 = ip_p;
581 604
582 tcp = ip_p + sizeof(struct ipv6hdr); 605 tcp = ip_p + sizeof(struct ipv6hdr);
583 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 606 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
584 607
585 ipv6->hop_limit = cqe->lro_min_ttl; 608 ipv6->hop_limit = cqe->lro_min_ttl;
586 ipv6->payload_len = cpu_to_be16(tot_len - 609 ipv6->payload_len = cpu_to_be16(payload_len);
587 sizeof(struct ipv6hdr)); 610
588 } 611 mlx5e_lro_update_tcp_hdr(cqe, tcp);
589 612 check = csum_partial(tcp, tcp->doff * 4,
590 tcp->psh = get_cqe_lro_tcppsh(cqe); 613 csum_unfold((__force __sum16)cqe->check_sum));
591 614 /* Almost done, don't forget the pseudo header */
592 if (tcp_ack) { 615 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
593 tcp->ack = 1; 616 IPPROTO_TCP, check);
594 tcp->ack_seq = cqe->lro_ack_seq_num;
595 tcp->window = cqe->lro_tcp_win;
596 } 617 }
597} 618}
598 619
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5a4608281f38..707976482c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
216 if (iph->protocol != IPPROTO_UDP) 216 if (iph->protocol != IPPROTO_UDP)
217 goto out; 217 goto out;
218 218
219 udph = udp_hdr(skb); 219 /* Don't assume skb_transport_header() was set */
220 udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
220 if (udph->dest != htons(9)) 221 if (udph->dest != htons(9))
221 goto out; 222 goto out;
222 223
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fd98b0dc610f..fa86a1466718 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2529 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { 2529 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2530 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 2530 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2531 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { 2531 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2532 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 2532 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2533 tcf_vlan_push_prio(a))
2533 return -EOPNOTSUPP; 2534 return -EOPNOTSUPP;
2534 2535
2535 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 2536 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 569b42a01026..11b4f1089d1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
176 default: 176 default:
177 hlen = mlx5e_skb_l2_header_offset(skb); 177 hlen = mlx5e_skb_l2_header_offset(skb);
178 } 178 }
179 return min_t(u16, hlen, skb->len); 179 return min_t(u16, hlen, skb_headlen(skb));
180} 180}
181 181
182static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 182static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 5ecf2cddc16d..c2b1d7d351fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1529 1529
1530 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 1530 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1531 1531
1532 /* Create steering drop counters for ingress and egress ACLs */
1533 if (vport_num && esw->mode == SRIOV_LEGACY)
1534 esw_vport_create_drop_counters(vport);
1535
1532 /* Restore old vport configuration */ 1536 /* Restore old vport configuration */
1533 esw_apply_vport_conf(esw, vport); 1537 esw_apply_vport_conf(esw, vport);
1534 1538
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1545 if (!vport_num) 1549 if (!vport_num)
1546 vport->info.trusted = true; 1550 vport->info.trusted = true;
1547 1551
1548 /* create steering drop counters for ingress and egress ACLs */
1549 if (vport_num && esw->mode == SRIOV_LEGACY)
1550 esw_vport_create_drop_counters(vport);
1551
1552 esw_vport_change_handle_locked(vport); 1552 esw_vport_change_handle_locked(vport);
1553 1553
1554 esw->enabled_vports++; 1554 esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c025c98700e4..31fc2cfac3b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
1429 1429
1430 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | 1430 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1431 MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1431 MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1432 MLX5_FLOW_CONTEXT_ACTION_DECAP)) 1432 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1433 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
1433 return true; 1434 return true;
1434 1435
1435 return false; 1436 return false;
@@ -1758,8 +1759,11 @@ search_again_locked:
1758 1759
1759 /* Collect all fgs which has a matching match_criteria */ 1760 /* Collect all fgs which has a matching match_criteria */
1760 err = build_match_list(&match_head, ft, spec); 1761 err = build_match_list(&match_head, ft, spec);
1761 if (err) 1762 if (err) {
1763 if (take_write)
1764 up_write_ref_node(&ft->node);
1762 return ERR_PTR(err); 1765 return ERR_PTR(err);
1766 }
1763 1767
1764 if (!take_write) 1768 if (!take_write)
1765 up_read_ref_node(&ft->node); 1769 up_read_ref_node(&ft->node);
@@ -1768,8 +1772,11 @@ search_again_locked:
1768 dest_num, version); 1772 dest_num, version);
1769 free_match_list(&match_head); 1773 free_match_list(&match_head);
1770 if (!IS_ERR(rule) || 1774 if (!IS_ERR(rule) ||
1771 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) 1775 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1776 if (take_write)
1777 up_write_ref_node(&ft->node);
1772 return rule; 1778 return rule;
1779 }
1773 1780
1774 if (!take_write) { 1781 if (!take_write) {
1775 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); 1782 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index e159243e0fcf..857035583ccd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -34,6 +34,7 @@
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <rdma/mlx5-abi.h> 35#include <rdma/mlx5-abi.h>
36#include "en.h" 36#include "en.h"
37#include "clock.h"
37 38
38enum { 39enum {
39 MLX5_CYCLES_SHIFT = 23 40 MLX5_CYCLES_SHIFT = 23
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2ef641c91c26..ae391e4b7070 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
551 MLX5_SET(cmd_hca_cap, 551 MLX5_SET(cmd_hca_cap,
552 set_hca_cap, 552 set_hca_cap,
553 cache_line_128byte, 553 cache_line_128byte,
554 cache_line_size() == 128 ? 1 : 0); 554 cache_line_size() >= 128 ? 1 : 0);
555 555
556 if (MLX5_CAP_GEN_MAX(dev, dct)) 556 if (MLX5_CAP_GEN_MAX(dev, dct))
557 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); 557 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f0b25baba09a..f7948e983637 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
788 u32 tb_id, 788 u32 tb_id,
789 struct netlink_ext_ack *extack) 789 struct netlink_ext_ack *extack)
790{ 790{
791 struct mlxsw_sp_mr_table *mr4_table;
792 struct mlxsw_sp_fib *fib4;
793 struct mlxsw_sp_fib *fib6;
791 struct mlxsw_sp_vr *vr; 794 struct mlxsw_sp_vr *vr;
792 int err; 795 int err;
793 796
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
796 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); 799 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
797 return ERR_PTR(-EBUSY); 800 return ERR_PTR(-EBUSY);
798 } 801 }
799 vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 802 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
800 if (IS_ERR(vr->fib4)) 803 if (IS_ERR(fib4))
801 return ERR_CAST(vr->fib4); 804 return ERR_CAST(fib4);
802 vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 805 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
803 if (IS_ERR(vr->fib6)) { 806 if (IS_ERR(fib6)) {
804 err = PTR_ERR(vr->fib6); 807 err = PTR_ERR(fib6);
805 goto err_fib6_create; 808 goto err_fib6_create;
806 } 809 }
807 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 810 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
808 MLXSW_SP_L3_PROTO_IPV4); 811 MLXSW_SP_L3_PROTO_IPV4);
809 if (IS_ERR(vr->mr4_table)) { 812 if (IS_ERR(mr4_table)) {
810 err = PTR_ERR(vr->mr4_table); 813 err = PTR_ERR(mr4_table);
811 goto err_mr_table_create; 814 goto err_mr_table_create;
812 } 815 }
816 vr->fib4 = fib4;
817 vr->fib6 = fib6;
818 vr->mr4_table = mr4_table;
813 vr->tb_id = tb_id; 819 vr->tb_id = tb_id;
814 return vr; 820 return vr;
815 821
816err_mr_table_create: 822err_mr_table_create:
817 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); 823 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
818 vr->fib6 = NULL;
819err_fib6_create: 824err_fib6_create:
820 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); 825 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
821 vr->fib4 = NULL;
822 return ERR_PTR(err); 826 return ERR_PTR(err);
823} 827}
824 828
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3790 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 3794 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3791 int i; 3795 int i;
3792 3796
3797 if (!list_is_singular(&nh_grp->fib_list))
3798 return;
3799
3793 for (i = 0; i < nh_grp->count; i++) { 3800 for (i = 0; i < nh_grp->count; i++) {
3794 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 3801 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3795 3802
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 7e7704daf5f1..c4949183eef3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -43,12 +43,6 @@
43 43
44/* Local Definitions and Declarations */ 44/* Local Definitions and Declarations */
45 45
46struct rmnet_walk_data {
47 struct net_device *real_dev;
48 struct list_head *head;
49 struct rmnet_port *port;
50};
51
52static int rmnet_is_real_dev_registered(const struct net_device *real_dev) 46static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53{ 47{
54 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; 48 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)
112static void rmnet_unregister_bridge(struct net_device *dev, 106static void rmnet_unregister_bridge(struct net_device *dev,
113 struct rmnet_port *port) 107 struct rmnet_port *port)
114{ 108{
115 struct net_device *rmnet_dev, *bridge_dev;
116 struct rmnet_port *bridge_port; 109 struct rmnet_port *bridge_port;
110 struct net_device *bridge_dev;
117 111
118 if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) 112 if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
119 return; 113 return;
120 114
121 /* bridge slave handling */ 115 /* bridge slave handling */
122 if (!port->nr_rmnet_devs) { 116 if (!port->nr_rmnet_devs) {
123 rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
124 netdev_upper_dev_unlink(dev, rmnet_dev);
125
126 bridge_dev = port->bridge_ep; 117 bridge_dev = port->bridge_ep;
127 118
128 bridge_port = rmnet_get_port_rtnl(bridge_dev); 119 bridge_port = rmnet_get_port_rtnl(bridge_dev);
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,
132 bridge_dev = port->bridge_ep; 123 bridge_dev = port->bridge_ep;
133 124
134 bridge_port = rmnet_get_port_rtnl(bridge_dev); 125 bridge_port = rmnet_get_port_rtnl(bridge_dev);
135 rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
136 netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
137
138 rmnet_unregister_real_device(bridge_dev, bridge_port); 126 rmnet_unregister_real_device(bridge_dev, bridge_port);
139 } 127 }
140} 128}
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
173 if (err) 161 if (err)
174 goto err1; 162 goto err1;
175 163
176 err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
177 if (err)
178 goto err2;
179
180 port->rmnet_mode = mode; 164 port->rmnet_mode = mode;
181 165
182 hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); 166 hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
193 177
194 return 0; 178 return 0;
195 179
196err2:
197 rmnet_vnd_dellink(mux_id, port, ep);
198err1: 180err1:
199 rmnet_unregister_real_device(real_dev, port); 181 rmnet_unregister_real_device(real_dev, port);
200err0: 182err0:
@@ -204,14 +186,13 @@ err0:
204 186
205static void rmnet_dellink(struct net_device *dev, struct list_head *head) 187static void rmnet_dellink(struct net_device *dev, struct list_head *head)
206{ 188{
189 struct rmnet_priv *priv = netdev_priv(dev);
207 struct net_device *real_dev; 190 struct net_device *real_dev;
208 struct rmnet_endpoint *ep; 191 struct rmnet_endpoint *ep;
209 struct rmnet_port *port; 192 struct rmnet_port *port;
210 u8 mux_id; 193 u8 mux_id;
211 194
212 rcu_read_lock(); 195 real_dev = priv->real_dev;
213 real_dev = netdev_master_upper_dev_get_rcu(dev);
214 rcu_read_unlock();
215 196
216 if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) 197 if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
217 return; 198 return;
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
219 port = rmnet_get_port_rtnl(real_dev); 200 port = rmnet_get_port_rtnl(real_dev);
220 201
221 mux_id = rmnet_vnd_get_mux(dev); 202 mux_id = rmnet_vnd_get_mux(dev);
222 netdev_upper_dev_unlink(dev, real_dev);
223 203
224 ep = rmnet_get_endpoint(port, mux_id); 204 ep = rmnet_get_endpoint(port, mux_id);
225 if (ep) { 205 if (ep) {
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
233 unregister_netdevice_queue(dev, head); 213 unregister_netdevice_queue(dev, head);
234} 214}
235 215
236static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
237{
238 struct rmnet_walk_data *d = data;
239 struct rmnet_endpoint *ep;
240 u8 mux_id;
241
242 mux_id = rmnet_vnd_get_mux(rmnet_dev);
243 ep = rmnet_get_endpoint(d->port, mux_id);
244 if (ep) {
245 hlist_del_init_rcu(&ep->hlnode);
246 rmnet_vnd_dellink(mux_id, d->port, ep);
247 kfree(ep);
248 }
249 netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
250 unregister_netdevice_queue(rmnet_dev, d->head);
251
252 return 0;
253}
254
255static void rmnet_force_unassociate_device(struct net_device *dev) 216static void rmnet_force_unassociate_device(struct net_device *dev)
256{ 217{
257 struct net_device *real_dev = dev; 218 struct net_device *real_dev = dev;
258 struct rmnet_walk_data d; 219 struct hlist_node *tmp_ep;
220 struct rmnet_endpoint *ep;
259 struct rmnet_port *port; 221 struct rmnet_port *port;
222 unsigned long bkt_ep;
260 LIST_HEAD(list); 223 LIST_HEAD(list);
261 224
262 if (!rmnet_is_real_dev_registered(real_dev)) 225 if (!rmnet_is_real_dev_registered(real_dev))
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
264 227
265 ASSERT_RTNL(); 228 ASSERT_RTNL();
266 229
267 d.real_dev = real_dev;
268 d.head = &list;
269
270 port = rmnet_get_port_rtnl(dev); 230 port = rmnet_get_port_rtnl(dev);
271 d.port = port;
272 231
273 rcu_read_lock(); 232 rcu_read_lock();
274 rmnet_unregister_bridge(dev, port); 233 rmnet_unregister_bridge(dev, port);
275 234
276 netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); 235 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
236 unregister_netdevice_queue(ep->egress_dev, &list);
237 rmnet_vnd_dellink(ep->mux_id, port, ep);
238
239 hlist_del_init_rcu(&ep->hlnode);
240 kfree(ep);
241 }
242
277 rcu_read_unlock(); 243 rcu_read_unlock();
278 unregister_netdevice_many(&list); 244 unregister_netdevice_many(&list);
279 245
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
422 if (err) 388 if (err)
423 return -EBUSY; 389 return -EBUSY;
424 390
425 err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
426 extack);
427 if (err)
428 return -EINVAL;
429
430 slave_port = rmnet_get_port(slave_dev); 391 slave_port = rmnet_get_port(slave_dev);
431 slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; 392 slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
432 slave_port->bridge_ep = real_dev; 393 slave_port->bridge_ep = real_dev;
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
449 port->rmnet_mode = RMNET_EPMODE_VND; 410 port->rmnet_mode = RMNET_EPMODE_VND;
450 port->bridge_ep = NULL; 411 port->bridge_ep = NULL;
451 412
452 netdev_upper_dev_unlink(slave_dev, rmnet_dev);
453 slave_port = rmnet_get_port(slave_dev); 413 slave_port = rmnet_get_port(slave_dev);
454 rmnet_unregister_real_device(slave_dev, slave_port); 414 rmnet_unregister_real_device(slave_dev, slave_port);
455 415
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 6bc328fb88e1..b0dbca070c00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
38 } 38 }
39 39
40 ep = rmnet_get_endpoint(port, mux_id); 40 ep = rmnet_get_endpoint(port, mux_id);
41 if (!ep) {
42 kfree_skb(skb);
43 return RX_HANDLER_CONSUMED;
44 }
45
41 vnd = ep->egress_dev; 46 vnd = ep->egress_dev;
42 47
43 ip_family = cmd->flow_control.ip_family; 48 ip_family = cmd->flow_control.ip_family;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 570a227acdd8..346d310914df 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,
121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); 121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
122 122
123 for_each_possible_cpu(cpu) { 123 for_each_possible_cpu(cpu) {
124 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); 124 pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
125 125
126 do { 126 do {
127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); 127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c87f57ca4437..a95fbd5510d9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)
2255 /* Enable MagicPacket */ 2255 /* Enable MagicPacket */
2256 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 2256 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2257 2257
2258 /* Increased clock usage so device won't be suspended */
2259 clk_enable(priv->clk);
2260
2261 return enable_irq_wake(priv->emac_irq); 2258 return enable_irq_wake(priv->emac_irq);
2262} 2259}
2263 2260
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)
2276 if (ret < 0) 2273 if (ret < 0)
2277 return ret; 2274 return ret;
2278 2275
2279 /* Restore clock usage count */
2280 clk_disable(priv->clk);
2281
2282 return disable_irq_wake(priv->emac_irq); 2276 return disable_irq_wake(priv->emac_irq);
2283} 2277}
2284 2278
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a197e11f3a56..92dcf8717fc6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -40,7 +40,6 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#include <linux/clk.h>
44#include <linux/sh_eth.h> 43#include <linux/sh_eth.h>
45#include <linux/of_mdio.h> 44#include <linux/of_mdio.h>
46 45
@@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2304 wol->supported = 0; 2303 wol->supported = 0;
2305 wol->wolopts = 0; 2304 wol->wolopts = 0;
2306 2305
2307 if (mdp->cd->magic && mdp->clk) { 2306 if (mdp->cd->magic) {
2308 wol->supported = WAKE_MAGIC; 2307 wol->supported = WAKE_MAGIC;
2309 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; 2308 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2310 } 2309 }
@@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2314{ 2313{
2315 struct sh_eth_private *mdp = netdev_priv(ndev); 2314 struct sh_eth_private *mdp = netdev_priv(ndev);
2316 2315
2317 if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) 2316 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2318 return -EOPNOTSUPP; 2317 return -EOPNOTSUPP;
2319 2318
2320 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); 2319 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3153 goto out_release; 3152 goto out_release;
3154 } 3153 }
3155 3154
3156 /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
3157 mdp->clk = devm_clk_get(&pdev->dev, NULL);
3158 if (IS_ERR(mdp->clk))
3159 mdp->clk = NULL;
3160
3161 ndev->base_addr = res->start; 3155 ndev->base_addr = res->start;
3162 3156
3163 spin_lock_init(&mdp->lock); 3157 spin_lock_init(&mdp->lock);
@@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3278 if (ret) 3272 if (ret)
3279 goto out_napi_del; 3273 goto out_napi_del;
3280 3274
3281 if (mdp->cd->magic && mdp->clk) 3275 if (mdp->cd->magic)
3282 device_set_wakeup_capable(&pdev->dev, 1); 3276 device_set_wakeup_capable(&pdev->dev, 1);
3283 3277
3284 /* print device information */ 3278 /* print device information */
@@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)
3331 /* Enable MagicPacket */ 3325 /* Enable MagicPacket */
3332 sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 3326 sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3333 3327
3334 /* Increased clock usage so device won't be suspended */
3335 clk_enable(mdp->clk);
3336
3337 return enable_irq_wake(ndev->irq); 3328 return enable_irq_wake(ndev->irq);
3338} 3329}
3339 3330
@@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)
3359 if (ret < 0) 3350 if (ret < 0)
3360 return ret; 3351 return ret;
3361 3352
3362 /* Restore clock usage count */
3363 clk_disable(mdp->clk);
3364
3365 return disable_irq_wake(ndev->irq); 3353 return disable_irq_wake(ndev->irq);
3366} 3354}
3367 3355
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 63aca9f847e1..4c2f612e4414 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC
20 20
21config SMC9194 21config SMC9194
22 tristate "SMC 9194 support" 22 tristate "SMC 9194 support"
23 depends on (ISA || MAC && BROKEN) 23 depends on ISA
24 select CRC32 24 select CRC32
25 ---help--- 25 ---help---
26 This is support for the SMC9xxx based Ethernet cards. Choose this 26 This is support for the SMC9xxx based Ethernet cards. Choose this
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a0f2be81d52e..8fc02d9db3d0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1451,7 +1451,7 @@ destroy_macvlan_port:
1451 /* the macvlan port may be freed by macvlan_uninit when fail to register. 1451 /* the macvlan port may be freed by macvlan_uninit when fail to register.
1452 * so we destroy the macvlan port only when it's valid. 1452 * so we destroy the macvlan port only when it's valid.
1453 */ 1453 */
1454 if (create && macvlan_port_get_rtnl(dev)) 1454 if (create && macvlan_port_get_rtnl(lowerdev))
1455 macvlan_port_destroy(port->dev); 1455 macvlan_port_destroy(port->dev);
1456 return err; 1456 return err;
1457} 1457}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b13eed21c87d..d39ae77707ef 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev)
1382 ctl |= BMCR_FULLDPLX; 1382 ctl |= BMCR_FULLDPLX;
1383 1383
1384 return phy_modify(phydev, MII_BMCR, 1384 return phy_modify(phydev, MII_BMCR,
1385 BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); 1385 ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
1386} 1386}
1387EXPORT_SYMBOL(genphy_setup_forced); 1387EXPORT_SYMBOL(genphy_setup_forced);
1388 1388
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ca5e375de27c..e0d6760f3219 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -166,6 +166,8 @@ struct tbnet_ring {
166 * @connected_work: Worker that finalizes the ThunderboltIP connection 166 * @connected_work: Worker that finalizes the ThunderboltIP connection
167 * setup and enables DMA paths for high speed data 167 * setup and enables DMA paths for high speed data
168 * transfers 168 * transfers
169 * @disconnect_work: Worker that handles tearing down the ThunderboltIP
170 * connection
169 * @rx_hdr: Copy of the currently processed Rx frame. Used when a 171 * @rx_hdr: Copy of the currently processed Rx frame. Used when a
170 * network packet consists of multiple Thunderbolt frames. 172 * network packet consists of multiple Thunderbolt frames.
171 * In host byte order. 173 * In host byte order.
@@ -190,6 +192,7 @@ struct tbnet {
190 int login_retries; 192 int login_retries;
191 struct delayed_work login_work; 193 struct delayed_work login_work;
192 struct work_struct connected_work; 194 struct work_struct connected_work;
195 struct work_struct disconnect_work;
193 struct thunderbolt_ip_frame_header rx_hdr; 196 struct thunderbolt_ip_frame_header rx_hdr;
194 struct tbnet_ring rx_ring; 197 struct tbnet_ring rx_ring;
195 atomic_t frame_id; 198 atomic_t frame_id;
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
445 case TBIP_LOGOUT: 448 case TBIP_LOGOUT:
446 ret = tbnet_logout_response(net, route, sequence, command_id); 449 ret = tbnet_logout_response(net, route, sequence, command_id);
447 if (!ret) 450 if (!ret)
448 tbnet_tear_down(net, false); 451 queue_work(system_long_wq, &net->disconnect_work);
449 break; 452 break;
450 453
451 default: 454 default:
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)
659 } 662 }
660} 663}
661 664
665static void tbnet_disconnect_work(struct work_struct *work)
666{
667 struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
668
669 tbnet_tear_down(net, false);
670}
671
662static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, 672static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
663 const struct thunderbolt_ip_frame_header *hdr) 673 const struct thunderbolt_ip_frame_header *hdr)
664{ 674{
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)
881 891
882 napi_disable(&net->napi); 892 napi_disable(&net->napi);
883 893
894 cancel_work_sync(&net->disconnect_work);
884 tbnet_tear_down(net, true); 895 tbnet_tear_down(net, true);
885 896
886 tb_ring_free(net->rx_ring.ring); 897 tb_ring_free(net->rx_ring.ring);
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1195 net = netdev_priv(dev); 1206 net = netdev_priv(dev);
1196 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); 1207 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1197 INIT_WORK(&net->connected_work, tbnet_connected_work); 1208 INIT_WORK(&net->connected_work, tbnet_connected_work);
1209 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1198 mutex_init(&net->connection_lock); 1210 mutex_init(&net->connection_lock);
1199 atomic_set(&net->command_id, 0); 1211 atomic_set(&net->command_id, 0);
1200 atomic_set(&net->frame_id, 0); 1212 atomic_set(&net->frame_id, 0);
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
1270 stop_login(net); 1282 stop_login(net);
1271 if (netif_running(net->dev)) { 1283 if (netif_running(net->dev)) {
1272 netif_device_detach(net->dev); 1284 netif_device_detach(net->dev);
1273 tb_ring_stop(net->rx_ring.ring); 1285 tbnet_tear_down(net, true);
1274 tb_ring_stop(net->tx_ring.ring);
1275 tbnet_free_buffers(&net->rx_ring);
1276 tbnet_free_buffers(&net->tx_ring);
1277 } 1286 }
1278 1287
1279 return 0; 1288 return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 81e6cc951e7f..b52258c327d2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1489 skb->truesize += skb->data_len; 1489 skb->truesize += skb->data_len;
1490 1490
1491 for (i = 1; i < it->nr_segs; i++) { 1491 for (i = 1; i < it->nr_segs; i++) {
1492 struct page_frag *pfrag = &current->task_frag;
1492 size_t fragsz = it->iov[i].iov_len; 1493 size_t fragsz = it->iov[i].iov_len;
1493 unsigned long offset;
1494 struct page *page;
1495 void *data;
1496 1494
1497 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1495 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1498 err = -EINVAL; 1496 err = -EINVAL;
1499 goto free; 1497 goto free;
1500 } 1498 }
1501 1499
1502 local_bh_disable(); 1500 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
1503 data = napi_alloc_frag(fragsz);
1504 local_bh_enable();
1505 if (!data) {
1506 err = -ENOMEM; 1501 err = -ENOMEM;
1507 goto free; 1502 goto free;
1508 } 1503 }
1509 1504
1510 page = virt_to_head_page(data); 1505 skb_fill_page_desc(skb, i - 1, pfrag->page,
1511 offset = data - page_address(page); 1506 pfrag->offset, fragsz);
1512 skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 1507 page_ref_inc(pfrag->page);
1508 pfrag->offset += fragsz;
1513 } 1509 }
1514 1510
1515 return skb; 1511 return skb;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index d0a113743195..7a6a1fe79309 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
954 /* it's racing here! */ 954 /* it's racing here! */
955 955
956 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 956 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
957 if (ret < 0) 957 if (ret < 0) {
958 netdev_warn(dev->net, "Error writing RFE_CTL\n"); 958 netdev_warn(dev->net, "Error writing RFE_CTL\n");
959 959 return ret;
960 return ret; 960 }
961 return 0;
961} 962}
962 963
963static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) 964static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 626c27352ae2..9bb9e562b893 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
443 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 443 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
444 444
445 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); 445 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
446 if (unlikely(err)) { 446 if (unlikely(err))
447 struct page *page = virt_to_head_page(xdp->data); 447 return false; /* Caller handle free/refcnt */
448
449 put_page(page);
450 return false;
451 }
452 448
453 return true; 449 return true;
454} 450}
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
456static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) 452static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
457{ 453{
458 struct virtnet_info *vi = netdev_priv(dev); 454 struct virtnet_info *vi = netdev_priv(dev);
459 bool sent = __virtnet_xdp_xmit(vi, xdp); 455 struct receive_queue *rq = vi->rq;
456 struct bpf_prog *xdp_prog;
457 bool sent;
460 458
459 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
460 * indicate XDP resources have been successfully allocated.
461 */
462 xdp_prog = rcu_dereference(rq->xdp_prog);
463 if (!xdp_prog)
464 return -ENXIO;
465
466 sent = __virtnet_xdp_xmit(vi, xdp);
461 if (!sent) 467 if (!sent)
462 return -ENOSPC; 468 return -ENOSPC;
463 return 0; 469 return 0;
@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
546 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 552 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
547 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 553 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
548 struct page *page = virt_to_head_page(buf); 554 struct page *page = virt_to_head_page(buf);
549 unsigned int delta = 0, err; 555 unsigned int delta = 0;
550 struct page *xdp_page; 556 struct page *xdp_page;
557 bool sent;
558 int err;
559
551 len -= vi->hdr_len; 560 len -= vi->hdr_len;
552 561
553 rcu_read_lock(); 562 rcu_read_lock();
@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
558 void *orig_data; 567 void *orig_data;
559 u32 act; 568 u32 act;
560 569
561 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) 570 if (unlikely(hdr->hdr.gso_type))
562 goto err_xdp; 571 goto err_xdp;
563 572
564 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 573 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
596 delta = orig_data - xdp.data; 605 delta = orig_data - xdp.data;
597 break; 606 break;
598 case XDP_TX: 607 case XDP_TX:
599 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) 608 sent = __virtnet_xdp_xmit(vi, &xdp);
609 if (unlikely(!sent)) {
600 trace_xdp_exception(vi->dev, xdp_prog, act); 610 trace_xdp_exception(vi->dev, xdp_prog, act);
601 else 611 goto err_xdp;
602 *xdp_xmit = true; 612 }
613 *xdp_xmit = true;
603 rcu_read_unlock(); 614 rcu_read_unlock();
604 goto xdp_xmit; 615 goto xdp_xmit;
605 case XDP_REDIRECT: 616 case XDP_REDIRECT:
606 err = xdp_do_redirect(dev, &xdp, xdp_prog); 617 err = xdp_do_redirect(dev, &xdp, xdp_prog);
607 if (!err) 618 if (err)
608 *xdp_xmit = true; 619 goto err_xdp;
620 *xdp_xmit = true;
609 rcu_read_unlock(); 621 rcu_read_unlock();
610 goto xdp_xmit; 622 goto xdp_xmit;
611 default: 623 default:
@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
677 struct bpf_prog *xdp_prog; 689 struct bpf_prog *xdp_prog;
678 unsigned int truesize; 690 unsigned int truesize;
679 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 691 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
680 int err; 692 bool sent;
681 693
682 head_skb = NULL; 694 head_skb = NULL;
683 695
@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
746 } 758 }
747 break; 759 break;
748 case XDP_TX: 760 case XDP_TX:
749 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) 761 sent = __virtnet_xdp_xmit(vi, &xdp);
762 if (unlikely(!sent)) {
750 trace_xdp_exception(vi->dev, xdp_prog, act); 763 trace_xdp_exception(vi->dev, xdp_prog, act);
751 else 764 if (unlikely(xdp_page != page))
752 *xdp_xmit = true; 765 put_page(xdp_page);
766 goto err_xdp;
767 }
768 *xdp_xmit = true;
753 if (unlikely(xdp_page != page)) 769 if (unlikely(xdp_page != page))
754 goto err_xdp; 770 goto err_xdp;
755 rcu_read_unlock(); 771 rcu_read_unlock();
756 goto xdp_xmit; 772 goto xdp_xmit;
757 case XDP_REDIRECT:
758 err = xdp_do_redirect(dev, &xdp, xdp_prog);
759 if (!err)
760 *xdp_xmit = true;
761 rcu_read_unlock();
762 goto xdp_xmit;
763 default: 773 default:
764 bpf_warn_invalid_xdp_action(act); 774 bpf_warn_invalid_xdp_action(act);
765 case XDP_ABORTED: 775 case XDP_ABORTED:
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1cf22e62e3dd..6e0af815f25e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void)
3516 3516
3517 spin_lock_init(&hwsim_radio_lock); 3517 spin_lock_init(&hwsim_radio_lock);
3518 3518
3519 hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); 3519 hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
3520 if (!hwsim_wq) 3520 if (!hwsim_wq)
3521 return -ENOMEM; 3521 return -ENOMEM;
3522 rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); 3522 rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f431c32774f3..0fe7ea35c221 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
120 int ret; 120 int ret;
121 121
122 ret = nvme_reset_ctrl(ctrl); 122 ret = nvme_reset_ctrl(ctrl);
123 if (!ret) 123 if (!ret) {
124 flush_work(&ctrl->reset_work); 124 flush_work(&ctrl->reset_work);
125 if (ctrl->state != NVME_CTRL_LIVE)
126 ret = -ENETRESET;
127 }
128
125 return ret; 129 return ret;
126} 130}
127EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 131EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
265 switch (new_state) { 269 switch (new_state) {
266 case NVME_CTRL_ADMIN_ONLY: 270 case NVME_CTRL_ADMIN_ONLY:
267 switch (old_state) { 271 switch (old_state) {
268 case NVME_CTRL_RECONNECTING: 272 case NVME_CTRL_CONNECTING:
269 changed = true; 273 changed = true;
270 /* FALLTHRU */ 274 /* FALLTHRU */
271 default: 275 default:
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
276 switch (old_state) { 280 switch (old_state) {
277 case NVME_CTRL_NEW: 281 case NVME_CTRL_NEW:
278 case NVME_CTRL_RESETTING: 282 case NVME_CTRL_RESETTING:
279 case NVME_CTRL_RECONNECTING: 283 case NVME_CTRL_CONNECTING:
280 changed = true; 284 changed = true;
281 /* FALLTHRU */ 285 /* FALLTHRU */
282 default: 286 default:
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
294 break; 298 break;
295 } 299 }
296 break; 300 break;
297 case NVME_CTRL_RECONNECTING: 301 case NVME_CTRL_CONNECTING:
298 switch (old_state) { 302 switch (old_state) {
299 case NVME_CTRL_LIVE: 303 case NVME_CTRL_NEW:
300 case NVME_CTRL_RESETTING: 304 case NVME_CTRL_RESETTING:
301 changed = true; 305 changed = true;
302 /* FALLTHRU */ 306 /* FALLTHRU */
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
309 case NVME_CTRL_LIVE: 313 case NVME_CTRL_LIVE:
310 case NVME_CTRL_ADMIN_ONLY: 314 case NVME_CTRL_ADMIN_ONLY:
311 case NVME_CTRL_RESETTING: 315 case NVME_CTRL_RESETTING:
312 case NVME_CTRL_RECONNECTING: 316 case NVME_CTRL_CONNECTING:
313 changed = true; 317 changed = true;
314 /* FALLTHRU */ 318 /* FALLTHRU */
315 default: 319 default:
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
518 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 522 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
519 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 523 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
520 524
521 range[n].cattr = cpu_to_le32(0); 525 if (n < segments) {
522 range[n].nlb = cpu_to_le32(nlb); 526 range[n].cattr = cpu_to_le32(0);
523 range[n].slba = cpu_to_le64(slba); 527 range[n].nlb = cpu_to_le32(nlb);
528 range[n].slba = cpu_to_le64(slba);
529 }
524 n++; 530 n++;
525 } 531 }
526 532
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
794 800
795static int nvme_keep_alive(struct nvme_ctrl *ctrl) 801static int nvme_keep_alive(struct nvme_ctrl *ctrl)
796{ 802{
797 struct nvme_command c;
798 struct request *rq; 803 struct request *rq;
799 804
800 memset(&c, 0, sizeof(c)); 805 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
801 c.common.opcode = nvme_admin_keep_alive;
802
803 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
804 NVME_QID_ANY); 806 NVME_QID_ANY);
805 if (IS_ERR(rq)) 807 if (IS_ERR(rq))
806 return PTR_ERR(rq); 808 return PTR_ERR(rq);
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
832 return; 834 return;
833 835
834 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 836 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
837 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
838 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
835 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 839 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
836} 840}
837EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 841EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1117 1121
1118static void nvme_update_formats(struct nvme_ctrl *ctrl) 1122static void nvme_update_formats(struct nvme_ctrl *ctrl)
1119{ 1123{
1120 struct nvme_ns *ns; 1124 struct nvme_ns *ns, *next;
1125 LIST_HEAD(rm_list);
1121 1126
1122 mutex_lock(&ctrl->namespaces_mutex); 1127 mutex_lock(&ctrl->namespaces_mutex);
1123 list_for_each_entry(ns, &ctrl->namespaces, list) { 1128 list_for_each_entry(ns, &ctrl->namespaces, list) {
1124 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1129 if (ns->disk && nvme_revalidate_disk(ns->disk)) {
1125 nvme_ns_remove(ns); 1130 list_move_tail(&ns->list, &rm_list);
1131 }
1126 } 1132 }
1127 mutex_unlock(&ctrl->namespaces_mutex); 1133 mutex_unlock(&ctrl->namespaces_mutex);
1134
1135 list_for_each_entry_safe(ns, next, &rm_list, list)
1136 nvme_ns_remove(ns);
1128} 1137}
1129 1138
1130static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1139static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
2687 [NVME_CTRL_LIVE] = "live", 2696 [NVME_CTRL_LIVE] = "live",
2688 [NVME_CTRL_ADMIN_ONLY] = "only-admin", 2697 [NVME_CTRL_ADMIN_ONLY] = "only-admin",
2689 [NVME_CTRL_RESETTING] = "resetting", 2698 [NVME_CTRL_RESETTING] = "resetting",
2690 [NVME_CTRL_RECONNECTING]= "reconnecting", 2699 [NVME_CTRL_CONNECTING] = "connecting",
2691 [NVME_CTRL_DELETING] = "deleting", 2700 [NVME_CTRL_DELETING] = "deleting",
2692 [NVME_CTRL_DEAD] = "dead", 2701 [NVME_CTRL_DEAD] = "dead",
2693 }; 2702 };
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 25b19f722f5b..a3145d90c1d2 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
171 cmd->common.opcode != nvme_fabrics_command || 171 cmd->common.opcode != nvme_fabrics_command ||
172 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 172 cmd->fabrics.fctype != nvme_fabrics_type_connect) {
173 /* 173 /*
174 * Reconnecting state means transport disruption, which can take 174 * Connecting state means transport disruption or initial
175 * a long time and even might fail permanently, fail fast to 175 * establishment, which can take a long time and even might
176 * give upper layers a chance to failover. 176 * fail permanently, fail fast to give upper layers a chance
177 * to failover.
177 * Deleting state means that the ctrl will never accept commands 178 * Deleting state means that the ctrl will never accept commands
178 * again, fail it permanently. 179 * again, fail it permanently.
179 */ 180 */
180 if (ctrl->state == NVME_CTRL_RECONNECTING || 181 if (ctrl->state == NVME_CTRL_CONNECTING ||
181 ctrl->state == NVME_CTRL_DELETING) { 182 ctrl->state == NVME_CTRL_DELETING) {
182 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 183 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
183 return BLK_STS_IOERR; 184 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b856d7c919d2..7f51f8414b97 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
55 55
56enum nvme_fcop_flags { 56enum nvme_fcop_flags {
57 FCOP_FLAGS_TERMIO = (1 << 0), 57 FCOP_FLAGS_TERMIO = (1 << 0),
58 FCOP_FLAGS_RELEASED = (1 << 1), 58 FCOP_FLAGS_AEN = (1 << 1),
59 FCOP_FLAGS_COMPLETE = (1 << 2),
60 FCOP_FLAGS_AEN = (1 << 3),
61}; 59};
62 60
63struct nvmefc_ls_req_op { 61struct nvmefc_ls_req_op {
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
532{ 530{
533 switch (ctrl->ctrl.state) { 531 switch (ctrl->ctrl.state) {
534 case NVME_CTRL_NEW: 532 case NVME_CTRL_NEW:
535 case NVME_CTRL_RECONNECTING: 533 case NVME_CTRL_CONNECTING:
536 /* 534 /*
537 * As all reconnects were suppressed, schedule a 535 * As all reconnects were suppressed, schedule a
538 * connect. 536 * connect.
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
777 } 775 }
778 break; 776 break;
779 777
780 case NVME_CTRL_RECONNECTING: 778 case NVME_CTRL_CONNECTING:
781 /* 779 /*
782 * The association has already been terminated and the 780 * The association has already been terminated and the
783 * controller is attempting reconnects. No need to do anything 781 * controller is attempting reconnects. No need to do anything
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1470 1468
1471/* *********************** NVME Ctrl Routines **************************** */ 1469/* *********************** NVME Ctrl Routines **************************** */
1472 1470
1473static void __nvme_fc_final_op_cleanup(struct request *rq);
1474static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1471static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1475 1472
1476static int 1473static int
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1512static int 1509static int
1513__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1510__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1514{ 1511{
1515 int state; 1512 unsigned long flags;
1513 int opstate;
1514
1515 spin_lock_irqsave(&ctrl->lock, flags);
1516 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1517 if (opstate != FCPOP_STATE_ACTIVE)
1518 atomic_set(&op->state, opstate);
1519 else if (ctrl->flags & FCCTRL_TERMIO)
1520 ctrl->iocnt++;
1521 spin_unlock_irqrestore(&ctrl->lock, flags);
1516 1522
1517 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1523 if (opstate != FCPOP_STATE_ACTIVE)
1518 if (state != FCPOP_STATE_ACTIVE) {
1519 atomic_set(&op->state, state);
1520 return -ECANCELED; 1524 return -ECANCELED;
1521 }
1522 1525
1523 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1526 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1524 &ctrl->rport->remoteport, 1527 &ctrl->rport->remoteport,
@@ -1532,60 +1535,26 @@ static void
1532nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1535nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1533{ 1536{
1534 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1537 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1535 unsigned long flags; 1538 int i;
1536 int i, ret;
1537
1538 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1539 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1540 continue;
1541
1542 spin_lock_irqsave(&ctrl->lock, flags);
1543 if (ctrl->flags & FCCTRL_TERMIO) {
1544 ctrl->iocnt++;
1545 aen_op->flags |= FCOP_FLAGS_TERMIO;
1546 }
1547 spin_unlock_irqrestore(&ctrl->lock, flags);
1548
1549 ret = __nvme_fc_abort_op(ctrl, aen_op);
1550 if (ret) {
1551 /*
1552 * if __nvme_fc_abort_op failed the io wasn't
1553 * active. Thus this call path is running in
1554 * parallel to the io complete. Treat as non-error.
1555 */
1556 1539
1557 /* back out the flags/counters */ 1540 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1558 spin_lock_irqsave(&ctrl->lock, flags); 1541 __nvme_fc_abort_op(ctrl, aen_op);
1559 if (ctrl->flags & FCCTRL_TERMIO)
1560 ctrl->iocnt--;
1561 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1562 spin_unlock_irqrestore(&ctrl->lock, flags);
1563 return;
1564 }
1565 }
1566} 1542}
1567 1543
1568static inline int 1544static inline void
1569__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1545__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1570 struct nvme_fc_fcp_op *op) 1546 struct nvme_fc_fcp_op *op, int opstate)
1571{ 1547{
1572 unsigned long flags; 1548 unsigned long flags;
1573 bool complete_rq = false;
1574 1549
1575 spin_lock_irqsave(&ctrl->lock, flags); 1550 if (opstate == FCPOP_STATE_ABORTED) {
1576 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1551 spin_lock_irqsave(&ctrl->lock, flags);
1577 if (ctrl->flags & FCCTRL_TERMIO) { 1552 if (ctrl->flags & FCCTRL_TERMIO) {
1578 if (!--ctrl->iocnt) 1553 if (!--ctrl->iocnt)
1579 wake_up(&ctrl->ioabort_wait); 1554 wake_up(&ctrl->ioabort_wait);
1580 } 1555 }
1556 spin_unlock_irqrestore(&ctrl->lock, flags);
1581 } 1557 }
1582 if (op->flags & FCOP_FLAGS_RELEASED)
1583 complete_rq = true;
1584 else
1585 op->flags |= FCOP_FLAGS_COMPLETE;
1586 spin_unlock_irqrestore(&ctrl->lock, flags);
1587
1588 return complete_rq;
1589} 1558}
1590 1559
1591static void 1560static void
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1601 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1570 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1602 union nvme_result result; 1571 union nvme_result result;
1603 bool terminate_assoc = true; 1572 bool terminate_assoc = true;
1573 int opstate;
1604 1574
1605 /* 1575 /*
1606 * WARNING: 1576 * WARNING:
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1639 * association to be terminated. 1609 * association to be terminated.
1640 */ 1610 */
1641 1611
1612 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1613
1642 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1614 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1643 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1615 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1644 1616
1645 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || 1617 if (opstate == FCPOP_STATE_ABORTED)
1646 op->flags & FCOP_FLAGS_TERMIO)
1647 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1618 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1648 else if (freq->status) 1619 else if (freq->status)
1649 status = cpu_to_le16(NVME_SC_INTERNAL << 1); 1620 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1708done: 1679done:
1709 if (op->flags & FCOP_FLAGS_AEN) { 1680 if (op->flags & FCOP_FLAGS_AEN) {
1710 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1681 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1711 __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1682 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1712 atomic_set(&op->state, FCPOP_STATE_IDLE); 1683 atomic_set(&op->state, FCPOP_STATE_IDLE);
1713 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1684 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1714 nvme_fc_ctrl_put(ctrl); 1685 nvme_fc_ctrl_put(ctrl);
@@ -1722,13 +1693,11 @@ done:
1722 if (status && 1693 if (status &&
1723 (blk_queue_dying(rq->q) || 1694 (blk_queue_dying(rq->q) ||
1724 ctrl->ctrl.state == NVME_CTRL_NEW || 1695 ctrl->ctrl.state == NVME_CTRL_NEW ||
1725 ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) 1696 ctrl->ctrl.state == NVME_CTRL_CONNECTING))
1726 status |= cpu_to_le16(NVME_SC_DNR << 1); 1697 status |= cpu_to_le16(NVME_SC_DNR << 1);
1727 1698
1728 if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) 1699 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1729 __nvme_fc_final_op_cleanup(rq); 1700 nvme_end_request(rq, status, result);
1730 else
1731 nvme_end_request(rq, status, result);
1732 1701
1733check_error: 1702check_error:
1734 if (terminate_assoc) 1703 if (terminate_assoc)
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2415} 2384}
2416 2385
2417static void 2386static void
2418__nvme_fc_final_op_cleanup(struct request *rq) 2387nvme_fc_complete_rq(struct request *rq)
2419{ 2388{
2420 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2389 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2421 struct nvme_fc_ctrl *ctrl = op->ctrl; 2390 struct nvme_fc_ctrl *ctrl = op->ctrl;
2422 2391
2423 atomic_set(&op->state, FCPOP_STATE_IDLE); 2392 atomic_set(&op->state, FCPOP_STATE_IDLE);
2424 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2425 FCOP_FLAGS_COMPLETE);
2426 2393
2427 nvme_fc_unmap_data(ctrl, rq, op); 2394 nvme_fc_unmap_data(ctrl, rq, op);
2428 nvme_complete_rq(rq); 2395 nvme_complete_rq(rq);
2429 nvme_fc_ctrl_put(ctrl); 2396 nvme_fc_ctrl_put(ctrl);
2430
2431}
2432
2433static void
2434nvme_fc_complete_rq(struct request *rq)
2435{
2436 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2437 struct nvme_fc_ctrl *ctrl = op->ctrl;
2438 unsigned long flags;
2439 bool completed = false;
2440
2441 /*
2442 * the core layer, on controller resets after calling
2443 * nvme_shutdown_ctrl(), calls complete_rq without our
2444 * calling blk_mq_complete_request(), thus there may still
2445 * be live i/o outstanding with the LLDD. Means transport has
2446 * to track complete calls vs fcpio_done calls to know what
2447 * path to take on completes and dones.
2448 */
2449 spin_lock_irqsave(&ctrl->lock, flags);
2450 if (op->flags & FCOP_FLAGS_COMPLETE)
2451 completed = true;
2452 else
2453 op->flags |= FCOP_FLAGS_RELEASED;
2454 spin_unlock_irqrestore(&ctrl->lock, flags);
2455
2456 if (completed)
2457 __nvme_fc_final_op_cleanup(rq);
2458} 2397}
2459 2398
2460/* 2399/*
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2476 struct nvme_ctrl *nctrl = data; 2415 struct nvme_ctrl *nctrl = data;
2477 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2416 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2478 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2417 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2479 unsigned long flags;
2480 int status;
2481 2418
2482 if (!blk_mq_request_started(req)) 2419 if (!blk_mq_request_started(req))
2483 return; 2420 return;
2484 2421
2485 spin_lock_irqsave(&ctrl->lock, flags); 2422 __nvme_fc_abort_op(ctrl, op);
2486 if (ctrl->flags & FCCTRL_TERMIO) {
2487 ctrl->iocnt++;
2488 op->flags |= FCOP_FLAGS_TERMIO;
2489 }
2490 spin_unlock_irqrestore(&ctrl->lock, flags);
2491
2492 status = __nvme_fc_abort_op(ctrl, op);
2493 if (status) {
2494 /*
2495 * if __nvme_fc_abort_op failed the io wasn't
2496 * active. Thus this call path is running in
2497 * parallel to the io complete. Treat as non-error.
2498 */
2499
2500 /* back out the flags/counters */
2501 spin_lock_irqsave(&ctrl->lock, flags);
2502 if (ctrl->flags & FCCTRL_TERMIO)
2503 ctrl->iocnt--;
2504 op->flags &= ~FCOP_FLAGS_TERMIO;
2505 spin_unlock_irqrestore(&ctrl->lock, flags);
2506 return;
2507 }
2508} 2423}
2509 2424
2510 2425
@@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2943 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 2858 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2944 bool recon = true; 2859 bool recon = true;
2945 2860
2946 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) 2861 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2947 return; 2862 return;
2948 2863
2949 if (portptr->port_state == FC_OBJSTATE_ONLINE) 2864 if (portptr->port_state == FC_OBJSTATE_ONLINE)
@@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2991 /* will block will waiting for io to terminate */ 2906 /* will block will waiting for io to terminate */
2992 nvme_fc_delete_association(ctrl); 2907 nvme_fc_delete_association(ctrl);
2993 2908
2994 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 2909 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2995 dev_err(ctrl->ctrl.device, 2910 dev_err(ctrl->ctrl.device,
2996 "NVME-FC{%d}: error_recovery: Couldn't change state " 2911 "NVME-FC{%d}: error_recovery: Couldn't change state "
2997 "to RECONNECTING\n", ctrl->cnum); 2912 "to CONNECTING\n", ctrl->cnum);
2998 return; 2913 return;
2999 } 2914 }
3000 2915
@@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3195 * transport errors (frame drop, LS failure) inherently must kill 3110 * transport errors (frame drop, LS failure) inherently must kill
3196 * the association. The transport is coded so that any command used 3111 * the association. The transport is coded so that any command used
3197 * to create the association (prior to a LIVE state transition 3112 * to create the association (prior to a LIVE state transition
3198 * while NEW or RECONNECTING) will fail if it completes in error or 3113 * while NEW or CONNECTING) will fail if it completes in error or
3199 * times out. 3114 * times out.
3200 * 3115 *
3201 * As such: as the connect request was mostly likely due to a 3116 * As such: as the connect request was mostly likely due to a
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8e4550fa08f8..0521e4707d1c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -123,7 +123,7 @@ enum nvme_ctrl_state {
123 NVME_CTRL_LIVE, 123 NVME_CTRL_LIVE,
124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
125 NVME_CTRL_RESETTING, 125 NVME_CTRL_RESETTING,
126 NVME_CTRL_RECONNECTING, 126 NVME_CTRL_CONNECTING,
127 NVME_CTRL_DELETING, 127 NVME_CTRL_DELETING,
128 NVME_CTRL_DEAD, 128 NVME_CTRL_DEAD,
129}; 129};
@@ -183,6 +183,7 @@ struct nvme_ctrl {
183 struct work_struct scan_work; 183 struct work_struct scan_work;
184 struct work_struct async_event_work; 184 struct work_struct async_event_work;
185 struct delayed_work ka_work; 185 struct delayed_work ka_work;
186 struct nvme_command ka_cmd;
186 struct work_struct fw_act_work; 187 struct work_struct fw_act_work;
187 188
188 /* Power saving configuration */ 189 /* Power saving configuration */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6fe7af00a1f4..73036d2fbbd5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1141 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1142 switch (dev->ctrl.state) { 1142 switch (dev->ctrl.state) {
1143 case NVME_CTRL_RESETTING: 1143 case NVME_CTRL_RESETTING:
1144 case NVME_CTRL_RECONNECTING: 1144 case NVME_CTRL_CONNECTING:
1145 return false; 1145 return false;
1146 default: 1146 default:
1147 break; 1147 break;
@@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1215 * cancellation error. All outstanding requests are completed on 1215 * cancellation error. All outstanding requests are completed on
1216 * shutdown, so we return BLK_EH_HANDLED. 1216 * shutdown, so we return BLK_EH_HANDLED.
1217 */ 1217 */
1218 if (dev->ctrl.state == NVME_CTRL_RESETTING) { 1218 switch (dev->ctrl.state) {
1219 case NVME_CTRL_CONNECTING:
1220 case NVME_CTRL_RESETTING:
1219 dev_warn(dev->ctrl.device, 1221 dev_warn(dev->ctrl.device,
1220 "I/O %d QID %d timeout, disable controller\n", 1222 "I/O %d QID %d timeout, disable controller\n",
1221 req->tag, nvmeq->qid); 1223 req->tag, nvmeq->qid);
1222 nvme_dev_disable(dev, false); 1224 nvme_dev_disable(dev, false);
1223 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1225 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1224 return BLK_EH_HANDLED; 1226 return BLK_EH_HANDLED;
1227 default:
1228 break;
1225 } 1229 }
1226 1230
1227 /* 1231 /*
@@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1364static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1368static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1365 int qid, int depth) 1369 int qid, int depth)
1366{ 1370{
1367 if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 1371 /* CMB SQEs will be mapped before creation */
1368 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1372 if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
1369 dev->ctrl.page_size); 1373 return 0;
1370 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1371 nvmeq->sq_cmds_io = dev->cmb + offset;
1372 } else {
1373 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1374 &nvmeq->sq_dma_addr, GFP_KERNEL);
1375 if (!nvmeq->sq_cmds)
1376 return -ENOMEM;
1377 }
1378 1374
1375 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1376 &nvmeq->sq_dma_addr, GFP_KERNEL);
1377 if (!nvmeq->sq_cmds)
1378 return -ENOMEM;
1379 return 0; 1379 return 0;
1380} 1380}
1381 1381
@@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1449 struct nvme_dev *dev = nvmeq->dev; 1449 struct nvme_dev *dev = nvmeq->dev;
1450 int result; 1450 int result;
1451 1451
1452 if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1453 unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
1454 dev->ctrl.page_size);
1455 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1456 nvmeq->sq_cmds_io = dev->cmb + offset;
1457 }
1458
1452 nvmeq->cq_vector = qid - 1; 1459 nvmeq->cq_vector = qid - 1;
1453 result = adapter_alloc_cq(dev, qid, nvmeq); 1460 result = adapter_alloc_cq(dev, qid, nvmeq);
1454 if (result < 0) 1461 if (result < 0)
@@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
2288 nvme_dev_disable(dev, false); 2295 nvme_dev_disable(dev, false);
2289 2296
2290 /* 2297 /*
2291 * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the 2298 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2292 * initializing procedure here. 2299 * initializing procedure here.
2293 */ 2300 */
2294 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { 2301 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2295 dev_warn(dev->ctrl.device, 2302 dev_warn(dev->ctrl.device,
2296 "failed to mark controller RECONNECTING\n"); 2303 "failed to mark controller CONNECTING\n");
2297 goto out; 2304 goto out;
2298 } 2305 }
2299 2306
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2bc059f7d73c..3a51ed50eff2 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -887,7 +887,7 @@ free_ctrl:
887static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 887static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
888{ 888{
889 /* If we are resetting/deleting then do nothing */ 889 /* If we are resetting/deleting then do nothing */
890 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 890 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 891 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
892 ctrl->ctrl.state == NVME_CTRL_LIVE); 892 ctrl->ctrl.state == NVME_CTRL_LIVE);
893 return; 893 return;
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 973 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
974 nvme_start_queues(&ctrl->ctrl); 974 nvme_start_queues(&ctrl->ctrl);
975 975
976 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 976 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
977 /* state change failure should never happen */ 977 /* state change failure should never happen */
978 WARN_ON_ONCE(1); 978 WARN_ON_ONCE(1);
979 return; 979 return;
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1756 nvme_stop_ctrl(&ctrl->ctrl); 1756 nvme_stop_ctrl(&ctrl->ctrl);
1757 nvme_rdma_shutdown_ctrl(ctrl, false); 1757 nvme_rdma_shutdown_ctrl(ctrl, false);
1758 1758
1759 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1759 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1760 /* state change failure should never happen */ 1760 /* state change failure should never happen */
1761 WARN_ON_ONCE(1); 1761 WARN_ON_ONCE(1);
1762 return; 1762 return;
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1784 return; 1784 return;
1785 1785
1786out_fail: 1786out_fail:
1787 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1787 ++ctrl->ctrl.nr_reconnects;
1788 nvme_remove_namespaces(&ctrl->ctrl); 1788 nvme_rdma_reconnect_or_remove(ctrl);
1789 nvme_rdma_shutdown_ctrl(ctrl, true);
1790 nvme_uninit_ctrl(&ctrl->ctrl);
1791 nvme_put_ctrl(&ctrl->ctrl);
1792} 1789}
1793 1790
1794static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1791static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1942 if (!ctrl->queues) 1939 if (!ctrl->queues)
1943 goto out_uninit_ctrl; 1940 goto out_uninit_ctrl;
1944 1941
1942 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
1943 WARN_ON_ONCE(!changed);
1944
1945 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1945 ret = nvme_rdma_configure_admin_queue(ctrl, true);
1946 if (ret) 1946 if (ret)
1947 goto out_kfree_queues; 1947 goto out_kfree_queues;
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0a4372a016f2..28bbdff4a88b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
105static u16 nvmet_discard_range(struct nvmet_ns *ns, 105static u16 nvmet_discard_range(struct nvmet_ns *ns,
106 struct nvme_dsm_range *range, struct bio **bio) 106 struct nvme_dsm_range *range, struct bio **bio)
107{ 107{
108 if (__blkdev_issue_discard(ns->bdev, 108 int ret;
109
110 ret = __blkdev_issue_discard(ns->bdev,
109 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 111 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
110 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 112 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
111 GFP_KERNEL, 0, bio)) 113 GFP_KERNEL, 0, bio);
114 if (ret && ret != -EOPNOTSUPP)
112 return NVME_SC_INTERNAL | NVME_SC_DNR; 115 return NVME_SC_INTERNAL | NVME_SC_DNR;
113 return 0; 116 return 0;
114} 117}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 36ed84e26d9c..f46828e3b082 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
977 return 0; 977 return 0;
978} 978}
979 979
980static void * 980static const void *
981of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, 981of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
982 const struct device *dev) 982 const struct device *dev)
983{ 983{
984 return (void *)of_device_get_match_data(dev); 984 return of_device_get_match_data(dev);
985} 985}
986 986
987const struct fwnode_operations of_fwnode_ops = { 987const struct fwnode_operations of_fwnode_ops = {
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 2d87bc1adf38..0c0910709435 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
55 if (max_opps <= 0) 55 if (max_opps <= 0)
56 return max_opps ? max_opps : -ENODATA; 56 return max_opps ? max_opps : -ENODATA;
57 57
58 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); 58 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
59 if (!freq_table) 59 if (!freq_table)
60 return -ENOMEM; 60 return -ENOMEM;
61 61
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fc734014206f..8b14bd326d4a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3419 3419
3420static void quirk_chelsio_extend_vpd(struct pci_dev *dev) 3420static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
3421{ 3421{
3422 pci_set_vpd_size(dev, 8192); 3422 int chip = (dev->device & 0xf000) >> 12;
3423} 3423 int func = (dev->device & 0x0f00) >> 8;
3424 3424 int prod = (dev->device & 0x00ff) >> 0;
3425DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); 3425
3426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); 3426 /*
3427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); 3427 * If this is a T3-based adapter, there's a 1KB VPD area at offset
3428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); 3428 * 0xc00 which contains the preferred VPD values. If this is a T4 or
3429DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); 3429 * later based adapter, the special VPD is at offset 0x400 for the
3430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); 3430 * Physical Functions (the SR-IOV Virtual Functions have no VPD
3431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); 3431 * Capabilities). The PCI VPD Access core routines will normally
3432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); 3432 * compute the size of the VPD by parsing the VPD Data Structure at
3433DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); 3433 * offset 0x000. This will result in silent failures when attempting
3434DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); 3434 * to accesses these other VPD areas which are beyond those computed
3435DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); 3435 * limits.
3436DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); 3436 */
3437DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); 3437 if (chip == 0x0 && prod >= 0x20)
3438 pci_set_vpd_size(dev, 8192);
3439 else if (chip >= 0x4 && func < 0x8)
3440 pci_set_vpd_size(dev, 2048);
3441}
3442
3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3444 quirk_chelsio_extend_vpd);
3438 3445
3439#ifdef CONFIG_ACPI 3446#ifdef CONFIG_ACPI
3440/* 3447/*
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 7bc5eee96b31..0c2ed11c0603 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -17,7 +17,6 @@
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/perf/arm_pmu.h> 19#include <linux/perf/arm_pmu.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h> 20#include <linux/slab.h>
22#include <linux/sched/clock.h> 21#include <linux/sched/clock.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
@@ -26,6 +25,9 @@
26 25
27#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
28 27
28static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
29static DEFINE_PER_CPU(int, cpu_irq);
30
29static int 31static int
30armpmu_map_cache_event(const unsigned (*cache_map) 32armpmu_map_cache_event(const unsigned (*cache_map)
31 [PERF_COUNT_HW_CACHE_MAX] 33 [PERF_COUNT_HW_CACHE_MAX]
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event)
320 return 0; 322 return 0;
321} 323}
322 324
323static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
324{
325 struct platform_device *pdev = armpmu->plat_device;
326
327 return pdev ? dev_get_platdata(&pdev->dev) : NULL;
328}
329
330static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 325static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
331{ 326{
332 struct arm_pmu *armpmu; 327 struct arm_pmu *armpmu;
333 struct arm_pmu_platdata *plat;
334 int ret; 328 int ret;
335 u64 start_clock, finish_clock; 329 u64 start_clock, finish_clock;
336 330
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
341 * dereference. 335 * dereference.
342 */ 336 */
343 armpmu = *(void **)dev; 337 armpmu = *(void **)dev;
344 338 if (WARN_ON_ONCE(!armpmu))
345 plat = armpmu_get_platdata(armpmu); 339 return IRQ_NONE;
346 340
347 start_clock = sched_clock(); 341 start_clock = sched_clock();
348 if (plat && plat->handle_irq) 342 ret = armpmu->handle_irq(irq, armpmu);
349 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
350 else
351 ret = armpmu->handle_irq(irq, armpmu);
352 finish_clock = sched_clock(); 343 finish_clock = sched_clock();
353 344
354 perf_sample_event_took(finish_clock - start_clock); 345 perf_sample_event_took(finish_clock - start_clock);
@@ -531,54 +522,41 @@ int perf_num_counters(void)
531} 522}
532EXPORT_SYMBOL_GPL(perf_num_counters); 523EXPORT_SYMBOL_GPL(perf_num_counters);
533 524
534void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) 525static int armpmu_count_irq_users(const int irq)
535{ 526{
536 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; 527 int cpu, count = 0;
537 int irq = per_cpu(hw_events->irq, cpu);
538 528
539 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) 529 for_each_possible_cpu(cpu) {
540 return; 530 if (per_cpu(cpu_irq, cpu) == irq)
541 531 count++;
542 if (irq_is_percpu_devid(irq)) {
543 free_percpu_irq(irq, &hw_events->percpu_pmu);
544 cpumask_clear(&armpmu->active_irqs);
545 return;
546 } 532 }
547 533
548 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 534 return count;
549} 535}
550 536
551void armpmu_free_irqs(struct arm_pmu *armpmu) 537void armpmu_free_irq(int irq, int cpu)
552{ 538{
553 int cpu; 539 if (per_cpu(cpu_irq, cpu) == 0)
540 return;
541 if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
542 return;
543
544 if (!irq_is_percpu_devid(irq))
545 free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
546 else if (armpmu_count_irq_users(irq) == 1)
547 free_percpu_irq(irq, &cpu_armpmu);
554 548
555 for_each_cpu(cpu, &armpmu->supported_cpus) 549 per_cpu(cpu_irq, cpu) = 0;
556 armpmu_free_irq(armpmu, cpu);
557} 550}
558 551
559int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) 552int armpmu_request_irq(int irq, int cpu)
560{ 553{
561 int err = 0; 554 int err = 0;
562 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
563 const irq_handler_t handler = armpmu_dispatch_irq; 555 const irq_handler_t handler = armpmu_dispatch_irq;
564 int irq = per_cpu(hw_events->irq, cpu);
565 if (!irq) 556 if (!irq)
566 return 0; 557 return 0;
567 558
568 if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { 559 if (!irq_is_percpu_devid(irq)) {
569 err = request_percpu_irq(irq, handler, "arm-pmu",
570 &hw_events->percpu_pmu);
571 } else if (irq_is_percpu_devid(irq)) {
572 int other_cpu = cpumask_first(&armpmu->active_irqs);
573 int other_irq = per_cpu(hw_events->irq, other_cpu);
574
575 if (irq != other_irq) {
576 pr_warn("mismatched PPIs detected.\n");
577 err = -EINVAL;
578 goto err_out;
579 }
580 } else {
581 struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
582 unsigned long irq_flags; 560 unsigned long irq_flags;
583 561
584 err = irq_force_affinity(irq, cpumask_of(cpu)); 562 err = irq_force_affinity(irq, cpumask_of(cpu));
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
589 goto err_out; 567 goto err_out;
590 } 568 }
591 569
592 if (platdata && platdata->irq_flags) { 570 irq_flags = IRQF_PERCPU |
593 irq_flags = platdata->irq_flags; 571 IRQF_NOBALANCING |
594 } else { 572 IRQF_NO_THREAD;
595 irq_flags = IRQF_PERCPU |
596 IRQF_NOBALANCING |
597 IRQF_NO_THREAD;
598 }
599 573
574 irq_set_status_flags(irq, IRQ_NOAUTOEN);
600 err = request_irq(irq, handler, irq_flags, "arm-pmu", 575 err = request_irq(irq, handler, irq_flags, "arm-pmu",
601 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 576 per_cpu_ptr(&cpu_armpmu, cpu));
577 } else if (armpmu_count_irq_users(irq) == 0) {
578 err = request_percpu_irq(irq, handler, "arm-pmu",
579 &cpu_armpmu);
602 } 580 }
603 581
604 if (err) 582 if (err)
605 goto err_out; 583 goto err_out;
606 584
607 cpumask_set_cpu(cpu, &armpmu->active_irqs); 585 per_cpu(cpu_irq, cpu) = irq;
608 return 0; 586 return 0;
609 587
610err_out: 588err_out:
@@ -612,19 +590,6 @@ err_out:
612 return err; 590 return err;
613} 591}
614 592
615int armpmu_request_irqs(struct arm_pmu *armpmu)
616{
617 int cpu, err;
618
619 for_each_cpu(cpu, &armpmu->supported_cpus) {
620 err = armpmu_request_irq(armpmu, cpu);
621 if (err)
622 break;
623 }
624
625 return err;
626}
627
628static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) 593static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
629{ 594{
630 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; 595 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
647 if (pmu->reset) 612 if (pmu->reset)
648 pmu->reset(pmu); 613 pmu->reset(pmu);
649 614
615 per_cpu(cpu_armpmu, cpu) = pmu;
616
650 irq = armpmu_get_cpu_irq(pmu, cpu); 617 irq = armpmu_get_cpu_irq(pmu, cpu);
651 if (irq) { 618 if (irq) {
652 if (irq_is_percpu_devid(irq)) { 619 if (irq_is_percpu_devid(irq))
653 enable_percpu_irq(irq, IRQ_TYPE_NONE); 620 enable_percpu_irq(irq, IRQ_TYPE_NONE);
654 return 0; 621 else
655 } 622 enable_irq(irq);
656 } 623 }
657 624
658 return 0; 625 return 0;
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
667 return 0; 634 return 0;
668 635
669 irq = armpmu_get_cpu_irq(pmu, cpu); 636 irq = armpmu_get_cpu_irq(pmu, cpu);
670 if (irq && irq_is_percpu_devid(irq)) 637 if (irq) {
671 disable_percpu_irq(irq); 638 if (irq_is_percpu_devid(irq))
639 disable_percpu_irq(irq);
640 else
641 disable_irq(irq);
642 }
643
644 per_cpu(cpu_armpmu, cpu) = NULL;
672 645
673 return 0; 646 return 0;
674} 647}
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
800 &cpu_pmu->node); 773 &cpu_pmu->node);
801} 774}
802 775
803struct arm_pmu *armpmu_alloc(void) 776static struct arm_pmu *__armpmu_alloc(gfp_t flags)
804{ 777{
805 struct arm_pmu *pmu; 778 struct arm_pmu *pmu;
806 int cpu; 779 int cpu;
807 780
808 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 781 pmu = kzalloc(sizeof(*pmu), flags);
809 if (!pmu) { 782 if (!pmu) {
810 pr_info("failed to allocate PMU device!\n"); 783 pr_info("failed to allocate PMU device!\n");
811 goto out; 784 goto out;
812 } 785 }
813 786
814 pmu->hw_events = alloc_percpu(struct pmu_hw_events); 787 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
815 if (!pmu->hw_events) { 788 if (!pmu->hw_events) {
816 pr_info("failed to allocate per-cpu PMU data.\n"); 789 pr_info("failed to allocate per-cpu PMU data.\n");
817 goto out_free_pmu; 790 goto out_free_pmu;
@@ -857,6 +830,17 @@ out:
857 return NULL; 830 return NULL;
858} 831}
859 832
833struct arm_pmu *armpmu_alloc(void)
834{
835 return __armpmu_alloc(GFP_KERNEL);
836}
837
838struct arm_pmu *armpmu_alloc_atomic(void)
839{
840 return __armpmu_alloc(GFP_ATOMIC);
841}
842
843
860void armpmu_free(struct arm_pmu *pmu) 844void armpmu_free(struct arm_pmu *pmu)
861{ 845{
862 free_percpu(pmu->hw_events); 846 free_percpu(pmu->hw_events);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 705f1a390e31..0f197516d708 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -11,6 +11,8 @@
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irq.h>
15#include <linux/irqdesc.h>
14#include <linux/percpu.h> 16#include <linux/percpu.h>
15#include <linux/perf/arm_pmu.h> 17#include <linux/perf/arm_pmu.h>
16 18
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
87 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); 89 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
88 } 90 }
89 91
92 /*
93 * Log and request the IRQ so the core arm_pmu code can manage
94 * it. We'll have to sanity-check IRQs later when we associate
95 * them with their PMUs.
96 */
90 per_cpu(pmu_irqs, cpu) = irq; 97 per_cpu(pmu_irqs, cpu) = irq;
98 armpmu_request_irq(irq, cpu);
91 } 99 }
92 100
93 return 0; 101 return 0;
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
127 return pmu; 135 return pmu;
128 } 136 }
129 137
130 pmu = armpmu_alloc(); 138 pmu = armpmu_alloc_atomic();
131 if (!pmu) { 139 if (!pmu) {
132 pr_warn("Unable to allocate PMU for CPU%d\n", 140 pr_warn("Unable to allocate PMU for CPU%d\n",
133 smp_processor_id()); 141 smp_processor_id());
@@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
140} 148}
141 149
142/* 150/*
151 * Check whether the new IRQ is compatible with those already associated with
152 * the PMU (e.g. we don't have mismatched PPIs).
153 */
154static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
155{
156 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
157 int cpu;
158
159 if (!irq)
160 return true;
161
162 for_each_cpu(cpu, &pmu->supported_cpus) {
163 int other_irq = per_cpu(hw_events->irq, cpu);
164 if (!other_irq)
165 continue;
166
167 if (irq == other_irq)
168 continue;
169 if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
170 continue;
171
172 pr_warn("mismatched PPIs detected\n");
173 return false;
174 }
175
176 return true;
177}
178
179/*
143 * This must run before the common arm_pmu hotplug logic, so that we can 180 * This must run before the common arm_pmu hotplug logic, so that we can
144 * associate a CPU and its interrupt before the common code tries to manage the 181 * associate a CPU and its interrupt before the common code tries to manage the
145 * affinity and so on. 182 * affinity and so on.
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
164 if (!pmu) 201 if (!pmu)
165 return -ENOMEM; 202 return -ENOMEM;
166 203
167 cpumask_set_cpu(cpu, &pmu->supported_cpus);
168
169 per_cpu(probed_pmus, cpu) = pmu; 204 per_cpu(probed_pmus, cpu) = pmu;
170 205
171 /* 206 if (pmu_irq_matches(pmu, irq)) {
172 * Log and request the IRQ so the core arm_pmu code can manage it. In 207 hw_events = pmu->hw_events;
173 * some situations (e.g. mismatched PPIs), we may fail to request the 208 per_cpu(hw_events->irq, cpu) = irq;
174 * IRQ. However, it may be too late for us to do anything about it. 209 }
175 * The common ARM PMU code will log a warning in this case. 210
176 */ 211 cpumask_set_cpu(cpu, &pmu->supported_cpus);
177 hw_events = pmu->hw_events;
178 per_cpu(hw_events->irq, cpu) = irq;
179 armpmu_request_irq(pmu, cpu);
180 212
181 /* 213 /*
182 * Ideally, we'd probe the PMU here when we find the first matching 214 * Ideally, we'd probe the PMU here when we find the first matching
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)
247 if (acpi_disabled) 279 if (acpi_disabled)
248 return 0; 280 return 0;
249 281
250 /*
251 * We can't request IRQs yet, since we don't know the cookie value
252 * until we know which CPUs share the same logical PMU. We'll handle
253 * that in arm_pmu_acpi_cpu_starting().
254 */
255 ret = arm_pmu_acpi_parse_irqs(); 282 ret = arm_pmu_acpi_parse_irqs();
256 if (ret) 283 if (ret)
257 return ret; 284 return ret;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 46501cc79fd7..7729eda5909d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
127 pdev->dev.of_node); 127 pdev->dev.of_node);
128 } 128 }
129 129
130 /*
131 * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
132 * special platdata function that attempts to demux them.
133 */
134 if (dev_get_platdata(&pdev->dev))
135 cpumask_setall(&pmu->supported_cpus);
136
137 for (i = 0; i < num_irqs; i++) { 130 for (i = 0; i < num_irqs; i++) {
138 int cpu, irq; 131 int cpu, irq;
139 132
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
164 return 0; 157 return 0;
165} 158}
166 159
160static int armpmu_request_irqs(struct arm_pmu *armpmu)
161{
162 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
163 int cpu, err;
164
165 for_each_cpu(cpu, &armpmu->supported_cpus) {
166 int irq = per_cpu(hw_events->irq, cpu);
167 if (!irq)
168 continue;
169
170 err = armpmu_request_irq(irq, cpu);
171 if (err)
172 break;
173 }
174
175 return err;
176}
177
178static void armpmu_free_irqs(struct arm_pmu *armpmu)
179{
180 int cpu;
181 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
182
183 for_each_cpu(cpu, &armpmu->supported_cpus) {
184 int irq = per_cpu(hw_events->irq, cpu);
185
186 armpmu_free_irq(irq, cpu);
187 }
188}
189
167int arm_pmu_device_probe(struct platform_device *pdev, 190int arm_pmu_device_probe(struct platform_device *pdev,
168 const struct of_device_id *of_table, 191 const struct of_device_id *of_table,
169 const struct pmu_probe_info *probe_table) 192 const struct pmu_probe_info *probe_table)
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2a68f59d2228..c52c6723374b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -127,24 +127,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
127 }, 127 },
128 }, 128 },
129 { 129 {
130 .matches = {
131 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
132 DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
133 },
134 },
135 {
136 .matches = {
137 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
138 DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
139 },
140 },
141 {
142 .matches = {
143 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
144 DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
145 },
146 },
147 {
148 .ident = "Dell Computer Corporation", 130 .ident = "Dell Computer Corporation",
149 .matches = { 131 .matches = {
150 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 132 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
@@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)
1279 struct calling_interface_buffer buffer; 1261 struct calling_interface_buffer buffer;
1280 int ret; 1262 int ret;
1281 1263
1282 dell_fill_request(&buffer, 0, 0, 0, 0); 1264 dell_fill_request(&buffer, 0x1, 0, 0, 0);
1283 ret = dell_send_request(&buffer, 1265 ret = dell_send_request(&buffer,
1284 CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); 1266 CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
1285 if (ret) 1267 if (ret)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5b6f18b18801..535199c9e6bc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
113/* 113/*
114 * ACPI Helpers 114 * ACPI Helpers
115 */ 115 */
116#define IDEAPAD_EC_TIMEOUT (100) /* in ms */ 116#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
117 117
118static int read_method_int(acpi_handle handle, const char *method, int *val) 118static int read_method_int(acpi_handle handle, const char *method, int *val)
119{ 119{
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index daa68acbc900..c0c8945603cb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
933 goto probe_failure; 933 goto probe_failure;
934 } 934 }
935 935
936 buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); 936 buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
937 if (!buf) { 937 if (!buf) {
938 ret = -ENOMEM; 938 ret = -ENOMEM;
939 goto probe_string_failure; 939 goto probe_string_failure;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ba2e0856d22c..8f5c1d7f751a 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1297 vcdev->device_lost = true; 1297 vcdev->device_lost = true;
1298 rc = NOTIFY_DONE; 1298 rc = NOTIFY_DONE;
1299 break; 1299 break;
1300 case CIO_OPER:
1301 rc = NOTIFY_OK;
1302 break;
1300 default: 1303 default:
1301 rc = NOTIFY_DONE; 1304 rc = NOTIFY_DONE;
1302 break; 1305 break;
@@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {
1309 {}, 1312 {},
1310}; 1313};
1311 1314
1315#ifdef CONFIG_PM_SLEEP
1316static int virtio_ccw_freeze(struct ccw_device *cdev)
1317{
1318 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1319
1320 return virtio_device_freeze(&vcdev->vdev);
1321}
1322
1323static int virtio_ccw_restore(struct ccw_device *cdev)
1324{
1325 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1326 int ret;
1327
1328 ret = virtio_ccw_set_transport_rev(vcdev);
1329 if (ret)
1330 return ret;
1331
1332 return virtio_device_restore(&vcdev->vdev);
1333}
1334#endif
1335
1312static struct ccw_driver virtio_ccw_driver = { 1336static struct ccw_driver virtio_ccw_driver = {
1313 .driver = { 1337 .driver = {
1314 .owner = THIS_MODULE, 1338 .owner = THIS_MODULE,
@@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {
1321 .set_online = virtio_ccw_online, 1345 .set_online = virtio_ccw_online,
1322 .notify = virtio_ccw_cio_notify, 1346 .notify = virtio_ccw_cio_notify,
1323 .int_class = IRQIO_VIR, 1347 .int_class = IRQIO_VIR,
1348#ifdef CONFIG_PM_SLEEP
1349 .freeze = virtio_ccw_freeze,
1350 .thaw = virtio_ccw_restore,
1351 .restore = virtio_ccw_restore,
1352#endif
1324}; 1353};
1325 1354
1326static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1355static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index fcfd28d2884c..de1b3fce936d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
185CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 185CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
186zalon7xx-objs := zalon.o ncr53c8xx.o 186zalon7xx-objs := zalon.o ncr53c8xx.o
187NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 187NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
188oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
189 188
190# Files generated that shall be removed upon make clean 189# Files generated that shall be removed upon make clean
191clean-files := 53c700_d.h 53c700_u.h 190clean-files := 53c700_d.h 53c700_u.h
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index b3b931ab77eb..2664ea0df35f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1693 * Map in the registers from the adapter. 1693 * Map in the registers from the adapter.
1694 */ 1694 */
1695 aac->base_size = AAC_MIN_FOOTPRINT_SIZE; 1695 aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
1696 if ((*aac_drivers[index].init)(aac)) 1696 if ((*aac_drivers[index].init)(aac)) {
1697 error = -ENODEV;
1697 goto out_unmap; 1698 goto out_unmap;
1699 }
1698 1700
1699 if (aac->sync_mode) { 1701 if (aac->sync_mode) {
1700 if (aac_sync_mode) 1702 if (aac_sync_mode)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
deleted file mode 100644
index 828ae3d9a510..000000000000
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Implementation of Utility functions for all SCSI device types.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
30 * $Id$
31 */
32
33#include "aiclib.h"
34
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8e2f767147cb..5a645b8b9af1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1889 /* we will not receive ABTS response for this IO */ 1889 /* we will not receive ABTS response for this IO */
1890 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1890 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1891 "this scsi cmd\n"); 1891 "this scsi cmd\n");
1892 return;
1892 } 1893 }
1893 1894
1894 /* Cancel the timeout_work, as we received IO completion */ 1895 /* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index be5ee2d37815..7dbbbb81a1e7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = {
114static struct csio_lnode * 114static struct csio_lnode *
115csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) 115csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
116{ 116{
117 struct csio_lnode *ln = hw->rln; 117 struct csio_lnode *ln;
118 struct list_head *tmp; 118 struct list_head *tmp;
119 119
120 /* Match siblings lnode with portid */ 120 /* Match siblings lnode with portid */
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 022e421c2185..4b44325d1a82 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work)
876 876
877/** 877/**
878 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously 878 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
879 * @pg: ALUA port group associated with @sdev.
880 * @sdev: SCSI device for which to submit an RTPG.
881 * @qdata: Information about the callback to invoke after the RTPG.
882 * @force: Whether or not to submit an RTPG if a work item that will submit an
883 * RTPG already has been scheduled.
879 * 884 *
880 * Returns true if and only if alua_rtpg_work() will be called asynchronously. 885 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
881 * That function is responsible for calling @qdata->fn(). 886 * That function is responsible for calling @qdata->fn().
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 9a0696f68f37..b81a53c4a9a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {
367}; 367};
368 368
369struct ibmvfc_fcp_rsp_info { 369struct ibmvfc_fcp_rsp_info {
370 __be16 reserved; 370 u8 reserved[3];
371 u8 rsp_code; 371 u8 rsp_code;
372 u8 reserved2[4]; 372 u8 reserved2[4];
373}__attribute__((packed, aligned (2))); 373}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 13d6e4ec3022..59a87ca328d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2410 continue; 2410 continue;
2411 } 2411 }
2412 2412
2413 for_each_cpu(cpu, mask) 2413 for_each_cpu_and(cpu, mask, cpu_online_mask) {
2414 if (cpu >= ioc->cpu_msix_table_sz)
2415 break;
2414 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2416 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2417 }
2415 } 2418 }
2416 return; 2419 return;
2417 } 2420 }
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 029e2e69b29f..f57a94b4f0d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
1724{ 1724{
1725 struct qedi_ctx *qedi = data; 1725 struct qedi_ctx *qedi = data;
1726 struct nvm_iscsi_initiator *initiator; 1726 struct nvm_iscsi_initiator *initiator;
1727 char *str = buf;
1728 int rc = 1; 1727 int rc = 1;
1729 u32 ipv6_en, dhcp_en, ip_len; 1728 u32 ipv6_en, dhcp_en, ip_len;
1730 struct nvm_iscsi_block *block; 1729 struct nvm_iscsi_block *block;
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
1758 1757
1759 switch (type) { 1758 switch (type) {
1760 case ISCSI_BOOT_ETH_IP_ADDR: 1759 case ISCSI_BOOT_ETH_IP_ADDR:
1761 rc = snprintf(str, ip_len, fmt, ip); 1760 rc = snprintf(buf, ip_len, fmt, ip);
1762 break; 1761 break;
1763 case ISCSI_BOOT_ETH_SUBNET_MASK: 1762 case ISCSI_BOOT_ETH_SUBNET_MASK:
1764 rc = snprintf(str, ip_len, fmt, sub); 1763 rc = snprintf(buf, ip_len, fmt, sub);
1765 break; 1764 break;
1766 case ISCSI_BOOT_ETH_GATEWAY: 1765 case ISCSI_BOOT_ETH_GATEWAY:
1767 rc = snprintf(str, ip_len, fmt, gw); 1766 rc = snprintf(buf, ip_len, fmt, gw);
1768 break; 1767 break;
1769 case ISCSI_BOOT_ETH_FLAGS: 1768 case ISCSI_BOOT_ETH_FLAGS:
1770 rc = snprintf(str, 3, "%hhd\n", 1769 rc = snprintf(buf, 3, "%hhd\n",
1771 SYSFS_FLAG_FW_SEL_BOOT); 1770 SYSFS_FLAG_FW_SEL_BOOT);
1772 break; 1771 break;
1773 case ISCSI_BOOT_ETH_INDEX: 1772 case ISCSI_BOOT_ETH_INDEX:
1774 rc = snprintf(str, 3, "0\n"); 1773 rc = snprintf(buf, 3, "0\n");
1775 break; 1774 break;
1776 case ISCSI_BOOT_ETH_MAC: 1775 case ISCSI_BOOT_ETH_MAC:
1777 rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN); 1776 rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
1778 break; 1777 break;
1779 case ISCSI_BOOT_ETH_VLAN: 1778 case ISCSI_BOOT_ETH_VLAN:
1780 rc = snprintf(str, 12, "%d\n", 1779 rc = snprintf(buf, 12, "%d\n",
1781 GET_FIELD2(initiator->generic_cont0, 1780 GET_FIELD2(initiator->generic_cont0,
1782 NVM_ISCSI_CFG_INITIATOR_VLAN)); 1781 NVM_ISCSI_CFG_INITIATOR_VLAN));
1783 break; 1782 break;
1784 case ISCSI_BOOT_ETH_ORIGIN: 1783 case ISCSI_BOOT_ETH_ORIGIN:
1785 if (dhcp_en) 1784 if (dhcp_en)
1786 rc = snprintf(str, 3, "3\n"); 1785 rc = snprintf(buf, 3, "3\n");
1787 break; 1786 break;
1788 default: 1787 default:
1789 rc = 0; 1788 rc = 0;
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
1819{ 1818{
1820 struct qedi_ctx *qedi = data; 1819 struct qedi_ctx *qedi = data;
1821 struct nvm_iscsi_initiator *initiator; 1820 struct nvm_iscsi_initiator *initiator;
1822 char *str = buf;
1823 int rc; 1821 int rc;
1824 struct nvm_iscsi_block *block; 1822 struct nvm_iscsi_block *block;
1825 1823
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
1831 1829
1832 switch (type) { 1830 switch (type) {
1833 case ISCSI_BOOT_INI_INITIATOR_NAME: 1831 case ISCSI_BOOT_INI_INITIATOR_NAME:
1834 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", 1832 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
1835 initiator->initiator_name.byte); 1833 initiator->initiator_name.byte);
1836 break; 1834 break;
1837 default: 1835 default:
1838 rc = 0; 1836 rc = 0;
@@ -1860,7 +1858,6 @@ static ssize_t
1860qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, 1858qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
1861 char *buf, enum qedi_nvm_tgts idx) 1859 char *buf, enum qedi_nvm_tgts idx)
1862{ 1860{
1863 char *str = buf;
1864 int rc = 1; 1861 int rc = 1;
1865 u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; 1862 u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
1866 struct nvm_iscsi_block *block; 1863 struct nvm_iscsi_block *block;
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
1899 1896
1900 switch (type) { 1897 switch (type) {
1901 case ISCSI_BOOT_TGT_NAME: 1898 case ISCSI_BOOT_TGT_NAME:
1902 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", 1899 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
1903 block->target[idx].target_name.byte); 1900 block->target[idx].target_name.byte);
1904 break; 1901 break;
1905 case ISCSI_BOOT_TGT_IP_ADDR: 1902 case ISCSI_BOOT_TGT_IP_ADDR:
1906 if (ipv6_en) 1903 if (ipv6_en)
1907 rc = snprintf(str, ip_len, "%pI6\n", 1904 rc = snprintf(buf, ip_len, "%pI6\n",
1908 block->target[idx].ipv6_addr.byte); 1905 block->target[idx].ipv6_addr.byte);
1909 else 1906 else
1910 rc = snprintf(str, ip_len, "%pI4\n", 1907 rc = snprintf(buf, ip_len, "%pI4\n",
1911 block->target[idx].ipv4_addr.byte); 1908 block->target[idx].ipv4_addr.byte);
1912 break; 1909 break;
1913 case ISCSI_BOOT_TGT_PORT: 1910 case ISCSI_BOOT_TGT_PORT:
1914 rc = snprintf(str, 12, "%d\n", 1911 rc = snprintf(buf, 12, "%d\n",
1915 GET_FIELD2(block->target[idx].generic_cont0, 1912 GET_FIELD2(block->target[idx].generic_cont0,
1916 NVM_ISCSI_CFG_TARGET_TCP_PORT)); 1913 NVM_ISCSI_CFG_TARGET_TCP_PORT));
1917 break; 1914 break;
1918 case ISCSI_BOOT_TGT_LUN: 1915 case ISCSI_BOOT_TGT_LUN:
1919 rc = snprintf(str, 22, "%.*d\n", 1916 rc = snprintf(buf, 22, "%.*d\n",
1920 block->target[idx].lun.value[1], 1917 block->target[idx].lun.value[1],
1921 block->target[idx].lun.value[0]); 1918 block->target[idx].lun.value[0]);
1922 break; 1919 break;
1923 case ISCSI_BOOT_TGT_CHAP_NAME: 1920 case ISCSI_BOOT_TGT_CHAP_NAME:
1924 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", 1921 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
1925 chap_name); 1922 chap_name);
1926 break; 1923 break;
1927 case ISCSI_BOOT_TGT_CHAP_SECRET: 1924 case ISCSI_BOOT_TGT_CHAP_SECRET:
1928 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", 1925 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
1929 chap_secret); 1926 chap_secret);
1930 break; 1927 break;
1931 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 1928 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
1932 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", 1929 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
1933 mchap_name); 1930 mchap_name);
1934 break; 1931 break;
1935 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 1932 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
1936 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", 1933 rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
1937 mchap_secret); 1934 mchap_secret);
1938 break; 1935 break;
1939 case ISCSI_BOOT_TGT_FLAGS: 1936 case ISCSI_BOOT_TGT_FLAGS:
1940 rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); 1937 rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
1941 break; 1938 break;
1942 case ISCSI_BOOT_TGT_NIC_ASSOC: 1939 case ISCSI_BOOT_TGT_NIC_ASSOC:
1943 rc = snprintf(str, 3, "0\n"); 1940 rc = snprintf(buf, 3, "0\n");
1944 break; 1941 break;
1945 default: 1942 default:
1946 rc = 0; 1943 rc = 0;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aececf664654..2dea1129d396 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t)
59 req->outstanding_cmds[sp->handle] = NULL; 59 req->outstanding_cmds[sp->handle] = NULL;
60 iocb = &sp->u.iocb_cmd; 60 iocb = &sp->u.iocb_cmd;
61 iocb->timeout(sp); 61 iocb->timeout(sp);
62 if (sp->type != SRB_ELS_DCMD)
63 sp->free(sp);
64 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
65} 63}
66 64
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data)
102 srb_t *sp = data; 100 srb_t *sp = data;
103 fc_port_t *fcport = sp->fcport; 101 fc_port_t *fcport = sp->fcport;
104 struct srb_iocb *lio = &sp->u.iocb_cmd; 102 struct srb_iocb *lio = &sp->u.iocb_cmd;
105 struct event_arg ea;
106 103
107 if (fcport) { 104 if (fcport) {
108 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data)
117 114
118 switch (sp->type) { 115 switch (sp->type) {
119 case SRB_LOGIN_CMD: 116 case SRB_LOGIN_CMD:
120 if (!fcport)
121 break;
122 /* Retry as needed. */ 117 /* Retry as needed. */
123 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 118 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
124 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 119 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
125 QLA_LOGIO_LOGIN_RETRIED : 0; 120 QLA_LOGIO_LOGIN_RETRIED : 0;
126 memset(&ea, 0, sizeof(ea)); 121 sp->done(sp, QLA_FUNCTION_TIMEOUT);
127 ea.event = FCME_PLOGI_DONE;
128 ea.fcport = sp->fcport;
129 ea.data[0] = lio->u.logio.data[0];
130 ea.data[1] = lio->u.logio.data[1];
131 ea.sp = sp;
132 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
133 break; 122 break;
134 case SRB_LOGOUT_CMD: 123 case SRB_LOGOUT_CMD:
135 if (!fcport)
136 break;
137 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
138 break;
139 case SRB_CT_PTHRU_CMD: 124 case SRB_CT_PTHRU_CMD:
140 case SRB_MB_IOCB: 125 case SRB_MB_IOCB:
141 case SRB_NACK_PLOGI: 126 case SRB_NACK_PLOGI:
@@ -235,12 +220,10 @@ static void
235qla2x00_async_logout_sp_done(void *ptr, int res) 220qla2x00_async_logout_sp_done(void *ptr, int res)
236{ 221{
237 srb_t *sp = ptr; 222 srb_t *sp = ptr;
238 struct srb_iocb *lio = &sp->u.iocb_cmd;
239 223
240 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 224 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
241 if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) 225 sp->fcport->login_gen++;
242 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, 226 qlt_logo_completion_handler(sp->fcport, res);
243 lio->u.logio.data);
244 sp->free(sp); 227 sp->free(sp);
245} 228}
246 229
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1b62e943ec49..8d00d559bd26 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3275 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3275 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3276 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3276 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3277 abt_iocb->entry_count = 1; 3277 abt_iocb->entry_count = 1;
3278 abt_iocb->handle = 3278 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3279 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3280 aio->u.abt.cmd_hndl));
3281 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3279 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3282 abt_iocb->handle_to_abort = 3280 abt_iocb->handle_to_abort =
3283 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); 3281 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3282 aio->u.abt.cmd_hndl));
3284 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3283 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3285 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3284 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3286 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3285 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 14109d86c3f6..89f93ebd819d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
273 273
274 /* Read all mbox registers? */ 274 /* Read all mbox registers? */
275 mboxes = (1 << ha->mbx_count) - 1; 275 WARN_ON_ONCE(ha->mbx_count > 32);
276 mboxes = (1ULL << ha->mbx_count) - 1;
276 if (!ha->mcp) 277 if (!ha->mcp)
277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 278 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
278 else 279 else
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2880 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2881 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2881 2882
2882 /* Read all mbox registers? */ 2883 /* Read all mbox registers? */
2883 mboxes = (1 << ha->mbx_count) - 1; 2884 WARN_ON_ONCE(ha->mbx_count > 32);
2885 mboxes = (1ULL << ha->mbx_count) - 1;
2884 if (!ha->mcp) 2886 if (!ha->mcp)
2885 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2887 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2886 else 2888 else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ee6e02d146..afcb5567998a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3625 } 3625 }
3626 qla2x00_wait_for_hba_ready(base_vha); 3626 qla2x00_wait_for_hba_ready(base_vha);
3627 3627
3628 qla2x00_wait_for_sess_deletion(base_vha);
3629
3628 /* 3630 /*
3629 * if UNLOAD flag is already set, then continue unload, 3631 * if UNLOAD flag is already set, then continue unload,
3630 * where it was set first. 3632 * where it was set first.
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fc89af8fe256..896b2d8bd803 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -4871,8 +4871,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4871 sess); 4871 sess);
4872 qlt_send_term_imm_notif(vha, iocb, 1); 4872 qlt_send_term_imm_notif(vha, iocb, 1);
4873 res = 0; 4873 res = 0;
4874 spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
4875 flags);
4876 break; 4874 break;
4877 } 4875 }
4878 4876
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index fc233717355f..817f312023a9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
168#define DEV_DB_NON_PERSISTENT 0 168#define DEV_DB_NON_PERSISTENT 0
169#define DEV_DB_PERSISTENT 1 169#define DEV_DB_PERSISTENT 1
170 170
171#define QL4_ISP_REG_DISCONNECT 0xffffffffU
172
171#define COPY_ISID(dst_isid, src_isid) { \ 173#define COPY_ISID(dst_isid, src_isid) { \
172 int i, j; \ 174 int i, j; \
173 for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ 175 for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 82e889bbe0ed..fc2c97d9a0d6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
262 262
263static struct scsi_transport_template *qla4xxx_scsi_transport; 263static struct scsi_transport_template *qla4xxx_scsi_transport;
264 264
265static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
266{
267 u32 reg_val = 0;
268 int rval = QLA_SUCCESS;
269
270 if (is_qla8022(ha))
271 reg_val = readl(&ha->qla4_82xx_reg->host_status);
272 else if (is_qla8032(ha) || is_qla8042(ha))
273 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
274 else
275 reg_val = readw(&ha->reg->ctrl_status);
276
277 if (reg_val == QL4_ISP_REG_DISCONNECT)
278 rval = QLA_ERROR;
279
280 return rval;
281}
282
265static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, 283static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
266 uint32_t iface_type, uint32_t payload_size, 284 uint32_t iface_type, uint32_t payload_size,
267 uint32_t pid, struct sockaddr *dst_addr) 285 uint32_t pid, struct sockaddr *dst_addr)
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9186 struct srb *srb = NULL; 9204 struct srb *srb = NULL;
9187 int ret = SUCCESS; 9205 int ret = SUCCESS;
9188 int wait = 0; 9206 int wait = 0;
9207 int rval;
9189 9208
9190 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", 9209 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9191 ha->host_no, id, lun, cmd, cmd->cmnd[0]); 9210 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9192 9211
9212 rval = qla4xxx_isp_check_reg(ha);
9213 if (rval != QLA_SUCCESS) {
9214 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9215 return FAILED;
9216 }
9217
9193 spin_lock_irqsave(&ha->hardware_lock, flags); 9218 spin_lock_irqsave(&ha->hardware_lock, flags);
9194 srb = (struct srb *) CMD_SP(cmd); 9219 srb = (struct srb *) CMD_SP(cmd);
9195 if (!srb) { 9220 if (!srb) {
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9241 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9266 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9242 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9267 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9243 int ret = FAILED, stat; 9268 int ret = FAILED, stat;
9269 int rval;
9244 9270
9245 if (!ddb_entry) 9271 if (!ddb_entry)
9246 return ret; 9272 return ret;
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9260 cmd, jiffies, cmd->request->timeout / HZ, 9286 cmd, jiffies, cmd->request->timeout / HZ,
9261 ha->dpc_flags, cmd->result, cmd->allowed)); 9287 ha->dpc_flags, cmd->result, cmd->allowed));
9262 9288
9289 rval = qla4xxx_isp_check_reg(ha);
9290 if (rval != QLA_SUCCESS) {
9291 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9292 return FAILED;
9293 }
9294
9263 /* FIXME: wait for hba to go online */ 9295 /* FIXME: wait for hba to go online */
9264 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); 9296 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9265 if (stat != QLA_SUCCESS) { 9297 if (stat != QLA_SUCCESS) {
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9303 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 9335 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9304 struct ddb_entry *ddb_entry = cmd->device->hostdata; 9336 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9305 int stat, ret; 9337 int stat, ret;
9338 int rval;
9306 9339
9307 if (!ddb_entry) 9340 if (!ddb_entry)
9308 return FAILED; 9341 return FAILED;
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9320 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, 9353 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9321 ha->dpc_flags, cmd->result, cmd->allowed)); 9354 ha->dpc_flags, cmd->result, cmd->allowed));
9322 9355
9356 rval = qla4xxx_isp_check_reg(ha);
9357 if (rval != QLA_SUCCESS) {
9358 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9359 return FAILED;
9360 }
9361
9323 stat = qla4xxx_reset_target(ha, ddb_entry); 9362 stat = qla4xxx_reset_target(ha, ddb_entry);
9324 if (stat != QLA_SUCCESS) { 9363 if (stat != QLA_SUCCESS) {
9325 starget_printk(KERN_INFO, scsi_target(cmd->device), 9364 starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9374{ 9413{
9375 int return_status = FAILED; 9414 int return_status = FAILED;
9376 struct scsi_qla_host *ha; 9415 struct scsi_qla_host *ha;
9416 int rval;
9377 9417
9378 ha = to_qla_host(cmd->device->host); 9418 ha = to_qla_host(cmd->device->host);
9379 9419
9420 rval = qla4xxx_isp_check_reg(ha);
9421 if (rval != QLA_SUCCESS) {
9422 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9423 return FAILED;
9424 }
9425
9380 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) 9426 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9381 qla4_83xx_set_idc_dontreset(ha); 9427 qla4_83xx_set_idc_dontreset(ha);
9382 9428
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 40fc7a590e81..6be5ab32c94f 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1657,7 +1657,7 @@ static struct scsi_host_template scsi_driver = {
1657 .eh_timed_out = storvsc_eh_timed_out, 1657 .eh_timed_out = storvsc_eh_timed_out,
1658 .slave_alloc = storvsc_device_alloc, 1658 .slave_alloc = storvsc_device_alloc,
1659 .slave_configure = storvsc_device_configure, 1659 .slave_configure = storvsc_device_configure,
1660 .cmd_per_lun = 255, 1660 .cmd_per_lun = 2048,
1661 .this_id = -1, 1661 .this_id = -1,
1662 .use_clustering = ENABLE_CLUSTERING, 1662 .use_clustering = ENABLE_CLUSTERING,
1663 /* Make sure we dont get a sg segment crosses a page boundary */ 1663 /* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ca360daa6a25..378af306fda1 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
536 * Look for the greatest clock divisor that allows an 536 * Look for the greatest clock divisor that allows an
537 * input speed faster than the period. 537 * input speed faster than the period.
538 */ 538 */
539 while (div-- > 0) 539 while (--div > 0)
540 if (kpc >= (div_10M[div] << 2)) break; 540 if (kpc >= (div_10M[div] << 2)) break;
541 541
542 /* 542 /*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a355d989b414..c7da2c185990 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
4352 /* REPORT SUPPORTED OPERATION CODES is not supported */ 4352 /* REPORT SUPPORTED OPERATION CODES is not supported */
4353 sdev->no_report_opcodes = 1; 4353 sdev->no_report_opcodes = 1;
4354 4354
4355 /* WRITE_SAME command is not supported */
4356 sdev->no_write_same = 1;
4355 4357
4356 ufshcd_set_queue_depth(sdev); 4358 ufshcd_set_queue_depth(sdev);
4357 4359
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 53f7275d6cbd..cfb42f5eccb2 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
348 if (i == 1) { 348 if (i == 1) {
349 domain->supply = devm_regulator_get(dev, "pu"); 349 domain->supply = devm_regulator_get(dev, "pu");
350 if (IS_ERR(domain->supply)) 350 if (IS_ERR(domain->supply))
351 return PTR_ERR(domain->supply);; 351 return PTR_ERR(domain->supply);
352 352
353 ret = imx_pgc_get_clocks(dev, domain); 353 ret = imx_pgc_get_clocks(dev, domain);
354 if (ret) 354 if (ret)
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index bbdc53b686dd..6dbba5aff191 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -702,30 +702,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
702 size_t pgstart, pgend; 702 size_t pgstart, pgend;
703 int ret = -EINVAL; 703 int ret = -EINVAL;
704 704
705 mutex_lock(&ashmem_mutex);
706
705 if (unlikely(!asma->file)) 707 if (unlikely(!asma->file))
706 return -EINVAL; 708 goto out_unlock;
707 709
708 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
709 return -EFAULT; 711 ret = -EFAULT;
712 goto out_unlock;
713 }
710 714
711 /* per custom, you can pass zero for len to mean "everything onward" */ 715 /* per custom, you can pass zero for len to mean "everything onward" */
712 if (!pin.len) 716 if (!pin.len)
713 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 717 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
714 718
715 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 719 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
716 return -EINVAL; 720 goto out_unlock;
717 721
718 if (unlikely(((__u32)-1) - pin.offset < pin.len)) 722 if (unlikely(((__u32)-1) - pin.offset < pin.len))
719 return -EINVAL; 723 goto out_unlock;
720 724
721 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 725 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
722 return -EINVAL; 726 goto out_unlock;
723 727
724 pgstart = pin.offset / PAGE_SIZE; 728 pgstart = pin.offset / PAGE_SIZE;
725 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 729 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
726 730
727 mutex_lock(&ashmem_mutex);
728
729 switch (cmd) { 731 switch (cmd) {
730 case ASHMEM_PIN: 732 case ASHMEM_PIN:
731 ret = ashmem_pin(asma, pgstart, pgend); 733 ret = ashmem_pin(asma, pgstart, pgend);
@@ -738,6 +740,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
738 break; 740 break;
739 } 741 }
740 742
743out_unlock:
741 mutex_unlock(&ashmem_mutex); 744 mutex_unlock(&ashmem_mutex);
742 745
743 return ret; 746 return ret;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 94e06925c712..49718c96bf9e 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -12,6 +12,7 @@
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/cma.h> 13#include <linux/cma.h>
14#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
15#include <linux/highmem.h>
15 16
16#include "ion.h" 17#include "ion.h"
17 18
@@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
42 if (!pages) 43 if (!pages)
43 return -ENOMEM; 44 return -ENOMEM;
44 45
46 if (PageHighMem(pages)) {
47 unsigned long nr_clear_pages = nr_pages;
48 struct page *page = pages;
49
50 while (nr_clear_pages > 0) {
51 void *vaddr = kmap_atomic(page);
52
53 memset(vaddr, 0, PAGE_SIZE);
54 kunmap_atomic(vaddr);
55 page++;
56 nr_clear_pages--;
57 }
58 } else {
59 memset(page_address(pages), 0, size);
60 }
61
45 table = kmalloc(sizeof(*table), GFP_KERNEL); 62 table = kmalloc(sizeof(*table), GFP_KERNEL);
46 if (!table) 63 if (!table)
47 goto err; 64 goto err;
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 079a8410d664..df0499fc4802 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -141,6 +141,8 @@
141#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ 141#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
142#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ 142#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
143 143
144#define AD7192_EXT_FREQ_MHZ_MIN 2457600
145#define AD7192_EXT_FREQ_MHZ_MAX 5120000
144#define AD7192_INT_FREQ_MHZ 4915200 146#define AD7192_INT_FREQ_MHZ 4915200
145 147
146/* NOTE: 148/* NOTE:
@@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
218 ARRAY_SIZE(ad7192_calib_arr)); 220 ARRAY_SIZE(ad7192_calib_arr));
219} 221}
220 222
223static inline bool ad7192_valid_external_frequency(u32 freq)
224{
225 return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
226 freq <= AD7192_EXT_FREQ_MHZ_MAX);
227}
228
221static int ad7192_setup(struct ad7192_state *st, 229static int ad7192_setup(struct ad7192_state *st,
222 const struct ad7192_platform_data *pdata) 230 const struct ad7192_platform_data *pdata)
223{ 231{
@@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st,
243 id); 251 id);
244 252
245 switch (pdata->clock_source_sel) { 253 switch (pdata->clock_source_sel) {
246 case AD7192_CLK_EXT_MCLK1_2:
247 case AD7192_CLK_EXT_MCLK2:
248 st->mclk = AD7192_INT_FREQ_MHZ;
249 break;
250 case AD7192_CLK_INT: 254 case AD7192_CLK_INT:
251 case AD7192_CLK_INT_CO: 255 case AD7192_CLK_INT_CO:
252 if (pdata->ext_clk_hz) 256 st->mclk = AD7192_INT_FREQ_MHZ;
253 st->mclk = pdata->ext_clk_hz;
254 else
255 st->mclk = AD7192_INT_FREQ_MHZ;
256 break; 257 break;
258 case AD7192_CLK_EXT_MCLK1_2:
259 case AD7192_CLK_EXT_MCLK2:
260 if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
261 st->mclk = pdata->ext_clk_hz;
262 break;
263 }
264 dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
265 pdata->ext_clk_hz);
266 ret = -EINVAL;
267 goto out;
257 default: 268 default:
258 ret = -EINVAL; 269 ret = -EINVAL;
259 goto out; 270 goto out;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 2b28fb9c0048..3bcf49466361 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
648 /* Ring buffer functions - here trigger setup related */ 648 /* Ring buffer functions - here trigger setup related */
649 indio_dev->setup_ops = &ad5933_ring_setup_ops; 649 indio_dev->setup_ops = &ad5933_ring_setup_ops;
650 650
651 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
652
653 return 0; 651 return 0;
654} 652}
655 653
@@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client,
762 indio_dev->dev.parent = &client->dev; 760 indio_dev->dev.parent = &client->dev;
763 indio_dev->info = &ad5933_info; 761 indio_dev->info = &ad5933_info;
764 indio_dev->name = id->name; 762 indio_dev->name = id->name;
765 indio_dev->modes = INDIO_DIRECT_MODE; 763 indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
766 indio_dev->channels = ad5933_channels; 764 indio_dev->channels = ad5933_channels;
767 indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); 765 indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
768 766
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index f699abab1787..148f3ee70286 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO
19config USB_EHCI_BIG_ENDIAN_DESC 19config USB_EHCI_BIG_ENDIAN_DESC
20 bool 20 bool
21 21
22config USB_UHCI_BIG_ENDIAN_MMIO
23 bool
24
25config USB_UHCI_BIG_ENDIAN_DESC
26 bool
27
22menuconfig USB_SUPPORT 28menuconfig USB_SUPPORT
23 bool "USB support" 29 bool "USB support"
24 depends on HAS_IOMEM 30 depends on HAS_IOMEM
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 06b3b54a0e68..7b366a6c0b49 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm)
174 wb = &acm->wb[wbn]; 174 wb = &acm->wb[wbn];
175 if (!wb->use) { 175 if (!wb->use) {
176 wb->use = 1; 176 wb->use = 1;
177 wb->len = 0;
177 return wbn; 178 return wbn;
178 } 179 }
179 wbn = (wbn + 1) % ACM_NW; 180 wbn = (wbn + 1) % ACM_NW;
@@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty,
805static void acm_tty_flush_chars(struct tty_struct *tty) 806static void acm_tty_flush_chars(struct tty_struct *tty)
806{ 807{
807 struct acm *acm = tty->driver_data; 808 struct acm *acm = tty->driver_data;
808 struct acm_wb *cur = acm->putbuffer; 809 struct acm_wb *cur;
809 int err; 810 int err;
810 unsigned long flags; 811 unsigned long flags;
811 812
813 spin_lock_irqsave(&acm->write_lock, flags);
814
815 cur = acm->putbuffer;
812 if (!cur) /* nothing to do */ 816 if (!cur) /* nothing to do */
813 return; 817 goto out;
814 818
815 acm->putbuffer = NULL; 819 acm->putbuffer = NULL;
816 err = usb_autopm_get_interface_async(acm->control); 820 err = usb_autopm_get_interface_async(acm->control);
817 spin_lock_irqsave(&acm->write_lock, flags);
818 if (err < 0) { 821 if (err < 0) {
819 cur->use = 0; 822 cur->use = 0;
820 acm->putbuffer = cur; 823 acm->putbuffer = cur;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4024926c1d68..f4a548471f0f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -226,6 +226,9 @@ static const struct usb_device_id usb_quirk_list[] = {
226 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 226 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
227 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 227 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
228 228
229 /* Corsair K70 RGB */
230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
231
229 /* Corsair Strafe RGB */ 232 /* Corsair Strafe RGB */
230 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
231 234
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e4c3ce0de5de..5bcad1d869b5 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
1917 /* Not specific buffer needed for ep0 ZLP */ 1917 /* Not specific buffer needed for ep0 ZLP */
1918 dma_addr_t dma = hs_ep->desc_list_dma; 1918 dma_addr_t dma = hs_ep->desc_list_dma;
1919 1919
1920 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); 1920 if (!index)
1921 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
1922
1921 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); 1923 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
1922 } else { 1924 } else {
1923 dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 1925 dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
@@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
2974 if (ints & DXEPINT_STSPHSERCVD) { 2976 if (ints & DXEPINT_STSPHSERCVD) {
2975 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); 2977 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
2976 2978
2977 /* Move to STATUS IN for DDMA */ 2979 /* Safety check EP0 state when STSPHSERCVD asserted */
2978 if (using_desc_dma(hsotg)) 2980 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2979 dwc2_hsotg_ep0_zlp(hsotg, true); 2981 /* Move to STATUS IN for DDMA */
2982 if (using_desc_dma(hsotg))
2983 dwc2_hsotg_ep0_zlp(hsotg, true);
2984 }
2985
2980 } 2986 }
2981 2987
2982 if (ints & DXEPINT_BACK2BACKSETUP) 2988 if (ints & DXEPINT_BACK2BACKSETUP)
@@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3375 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3381 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3376 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); 3382 DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
3377 3383
3378 dwc2_hsotg_enqueue_setup(hsotg);
3379
3380 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3381 dwc2_readl(hsotg->regs + DIEPCTL0),
3382 dwc2_readl(hsotg->regs + DOEPCTL0));
3383
3384 /* clear global NAKs */ 3384 /* clear global NAKs */
3385 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 3385 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3386 if (!is_usb_reset) 3386 if (!is_usb_reset)
@@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3391 mdelay(3); 3391 mdelay(3);
3392 3392
3393 hsotg->lx_state = DWC2_L0; 3393 hsotg->lx_state = DWC2_L0;
3394
3395 dwc2_hsotg_enqueue_setup(hsotg);
3396
3397 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3398 dwc2_readl(hsotg->regs + DIEPCTL0),
3399 dwc2_readl(hsotg->regs + DOEPCTL0));
3394} 3400}
3395 3401
3396static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 3402static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ade2ab00d37a..f1d838a4acd6 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
100 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); 100 reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
101 reg |= DWC3_GCTL_PRTCAPDIR(mode); 101 reg |= DWC3_GCTL_PRTCAPDIR(mode);
102 dwc3_writel(dwc->regs, DWC3_GCTL, reg); 102 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
103
104 dwc->current_dr_role = mode;
103} 105}
104 106
105static void __dwc3_set_mode(struct work_struct *work) 107static void __dwc3_set_mode(struct work_struct *work)
@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work)
133 135
134 dwc3_set_prtcap(dwc, dwc->desired_dr_role); 136 dwc3_set_prtcap(dwc, dwc->desired_dr_role);
135 137
136 dwc->current_dr_role = dwc->desired_dr_role;
137
138 spin_unlock_irqrestore(&dwc->lock, flags); 138 spin_unlock_irqrestore(&dwc->lock, flags);
139 139
140 switch (dwc->desired_dr_role) { 140 switch (dwc->desired_dr_role) {
@@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
219 * XHCI driver will reset the host block. If dwc3 was configured for 219 * XHCI driver will reset the host block. If dwc3 was configured for
220 * host-only mode, then we can return early. 220 * host-only mode, then we can return early.
221 */ 221 */
222 if (dwc->dr_mode == USB_DR_MODE_HOST) 222 if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
223 return 0; 223 return 0;
224 224
225 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 225 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
234 udelay(1); 234 udelay(1);
235 } while (--retries); 235 } while (--retries);
236 236
237 phy_exit(dwc->usb3_generic_phy);
238 phy_exit(dwc->usb2_generic_phy);
239
237 return -ETIMEDOUT; 240 return -ETIMEDOUT;
238} 241}
239 242
@@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
483 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); 486 parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
484} 487}
485 488
489static int dwc3_core_ulpi_init(struct dwc3 *dwc)
490{
491 int intf;
492 int ret = 0;
493
494 intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
495
496 if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
497 (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
498 dwc->hsphy_interface &&
499 !strncmp(dwc->hsphy_interface, "ulpi", 4)))
500 ret = dwc3_ulpi_init(dwc);
501
502 return ret;
503}
504
486/** 505/**
487 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core 506 * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
488 * @dwc: Pointer to our controller context structure 507 * @dwc: Pointer to our controller context structure
@@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
494static int dwc3_phy_setup(struct dwc3 *dwc) 513static int dwc3_phy_setup(struct dwc3 *dwc)
495{ 514{
496 u32 reg; 515 u32 reg;
497 int ret;
498 516
499 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 517 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
500 518
@@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
565 } 583 }
566 /* FALLTHROUGH */ 584 /* FALLTHROUGH */
567 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: 585 case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
568 ret = dwc3_ulpi_init(dwc);
569 if (ret)
570 return ret;
571 /* FALLTHROUGH */ 586 /* FALLTHROUGH */
572 default: 587 default:
573 break; 588 break;
@@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
724} 739}
725 740
726static int dwc3_core_get_phy(struct dwc3 *dwc); 741static int dwc3_core_get_phy(struct dwc3 *dwc);
742static int dwc3_core_ulpi_init(struct dwc3 *dwc);
727 743
728/** 744/**
729 * dwc3_core_init - Low-level initialization of DWC3 Core 745 * dwc3_core_init - Low-level initialization of DWC3 Core
@@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc)
755 dwc->maximum_speed = USB_SPEED_HIGH; 771 dwc->maximum_speed = USB_SPEED_HIGH;
756 } 772 }
757 773
758 ret = dwc3_core_get_phy(dwc); 774 ret = dwc3_phy_setup(dwc);
759 if (ret) 775 if (ret)
760 goto err0; 776 goto err0;
761 777
762 ret = dwc3_core_soft_reset(dwc); 778 if (!dwc->ulpi_ready) {
763 if (ret) 779 ret = dwc3_core_ulpi_init(dwc);
764 goto err0; 780 if (ret)
781 goto err0;
782 dwc->ulpi_ready = true;
783 }
765 784
766 ret = dwc3_phy_setup(dwc); 785 if (!dwc->phys_ready) {
786 ret = dwc3_core_get_phy(dwc);
787 if (ret)
788 goto err0a;
789 dwc->phys_ready = true;
790 }
791
792 ret = dwc3_core_soft_reset(dwc);
767 if (ret) 793 if (ret)
768 goto err0; 794 goto err0a;
769 795
770 dwc3_core_setup_global_control(dwc); 796 dwc3_core_setup_global_control(dwc);
771 dwc3_core_num_eps(dwc); 797 dwc3_core_num_eps(dwc);
@@ -838,6 +864,9 @@ err1:
838 phy_exit(dwc->usb2_generic_phy); 864 phy_exit(dwc->usb2_generic_phy);
839 phy_exit(dwc->usb3_generic_phy); 865 phy_exit(dwc->usb3_generic_phy);
840 866
867err0a:
868 dwc3_ulpi_exit(dwc);
869
841err0: 870err0:
842 return ret; 871 return ret;
843} 872}
@@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
916 945
917 switch (dwc->dr_mode) { 946 switch (dwc->dr_mode) {
918 case USB_DR_MODE_PERIPHERAL: 947 case USB_DR_MODE_PERIPHERAL:
919 dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
920 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); 948 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
921 949
922 if (dwc->usb2_phy) 950 if (dwc->usb2_phy)
@@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
932 } 960 }
933 break; 961 break;
934 case USB_DR_MODE_HOST: 962 case USB_DR_MODE_HOST:
935 dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
936 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); 963 dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
937 964
938 if (dwc->usb2_phy) 965 if (dwc->usb2_phy)
@@ -1234,7 +1261,6 @@ err4:
1234 1261
1235err3: 1262err3:
1236 dwc3_free_event_buffers(dwc); 1263 dwc3_free_event_buffers(dwc);
1237 dwc3_ulpi_exit(dwc);
1238 1264
1239err2: 1265err2:
1240 pm_runtime_allow(&pdev->dev); 1266 pm_runtime_allow(&pdev->dev);
@@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev)
1284} 1310}
1285 1311
1286#ifdef CONFIG_PM 1312#ifdef CONFIG_PM
1287static int dwc3_suspend_common(struct dwc3 *dwc) 1313static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
1288{ 1314{
1289 unsigned long flags; 1315 unsigned long flags;
1290 1316
@@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
1296 dwc3_core_exit(dwc); 1322 dwc3_core_exit(dwc);
1297 break; 1323 break;
1298 case DWC3_GCTL_PRTCAP_HOST: 1324 case DWC3_GCTL_PRTCAP_HOST:
1325 /* do nothing during host runtime_suspend */
1326 if (!PMSG_IS_AUTO(msg))
1327 dwc3_core_exit(dwc);
1328 break;
1299 default: 1329 default:
1300 /* do nothing */ 1330 /* do nothing */
1301 break; 1331 break;
@@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
1304 return 0; 1334 return 0;
1305} 1335}
1306 1336
1307static int dwc3_resume_common(struct dwc3 *dwc) 1337static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
1308{ 1338{
1309 unsigned long flags; 1339 unsigned long flags;
1310 int ret; 1340 int ret;
@@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc)
1320 spin_unlock_irqrestore(&dwc->lock, flags); 1350 spin_unlock_irqrestore(&dwc->lock, flags);
1321 break; 1351 break;
1322 case DWC3_GCTL_PRTCAP_HOST: 1352 case DWC3_GCTL_PRTCAP_HOST:
1353 /* nothing to do on host runtime_resume */
1354 if (!PMSG_IS_AUTO(msg)) {
1355 ret = dwc3_core_init(dwc);
1356 if (ret)
1357 return ret;
1358 }
1359 break;
1323 default: 1360 default:
1324 /* do nothing */ 1361 /* do nothing */
1325 break; 1362 break;
@@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc)
1331static int dwc3_runtime_checks(struct dwc3 *dwc) 1368static int dwc3_runtime_checks(struct dwc3 *dwc)
1332{ 1369{
1333 switch (dwc->current_dr_role) { 1370 switch (dwc->current_dr_role) {
1334 case USB_DR_MODE_PERIPHERAL: 1371 case DWC3_GCTL_PRTCAP_DEVICE:
1335 case USB_DR_MODE_OTG:
1336 if (dwc->connected) 1372 if (dwc->connected)
1337 return -EBUSY; 1373 return -EBUSY;
1338 break; 1374 break;
1339 case USB_DR_MODE_HOST: 1375 case DWC3_GCTL_PRTCAP_HOST:
1340 default: 1376 default:
1341 /* do nothing */ 1377 /* do nothing */
1342 break; 1378 break;
@@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev)
1353 if (dwc3_runtime_checks(dwc)) 1389 if (dwc3_runtime_checks(dwc))
1354 return -EBUSY; 1390 return -EBUSY;
1355 1391
1356 ret = dwc3_suspend_common(dwc); 1392 ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
1357 if (ret) 1393 if (ret)
1358 return ret; 1394 return ret;
1359 1395
@@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev)
1369 1405
1370 device_init_wakeup(dev, false); 1406 device_init_wakeup(dev, false);
1371 1407
1372 ret = dwc3_resume_common(dwc); 1408 ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
1373 if (ret) 1409 if (ret)
1374 return ret; 1410 return ret;
1375 1411
@@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev)
1416 struct dwc3 *dwc = dev_get_drvdata(dev); 1452 struct dwc3 *dwc = dev_get_drvdata(dev);
1417 int ret; 1453 int ret;
1418 1454
1419 ret = dwc3_suspend_common(dwc); 1455 ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
1420 if (ret) 1456 if (ret)
1421 return ret; 1457 return ret;
1422 1458
@@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev)
1432 1468
1433 pinctrl_pm_select_default_state(dev); 1469 pinctrl_pm_select_default_state(dev);
1434 1470
1435 ret = dwc3_resume_common(dwc); 1471 ret = dwc3_resume_common(dwc, PMSG_RESUME);
1436 if (ret) 1472 if (ret)
1437 return ret; 1473 return ret;
1438 1474
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 03c7aaaac926..860d2bc184d1 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -158,13 +158,15 @@
158#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) 158#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
159#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) 159#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
160 160
161#define DWC3_TXFIFOQ 1 161#define DWC3_TXFIFOQ 0
162#define DWC3_RXFIFOQ 3 162#define DWC3_RXFIFOQ 1
163#define DWC3_TXREQQ 5 163#define DWC3_TXREQQ 2
164#define DWC3_RXREQQ 7 164#define DWC3_RXREQQ 3
165#define DWC3_RXINFOQ 9 165#define DWC3_RXINFOQ 4
166#define DWC3_DESCFETCHQ 13 166#define DWC3_PSTATQ 5
167#define DWC3_EVENTQ 15 167#define DWC3_DESCFETCHQ 6
168#define DWC3_EVENTQ 7
169#define DWC3_AUXEVENTQ 8
168 170
169/* Global RX Threshold Configuration Register */ 171/* Global RX Threshold Configuration Register */
170#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) 172#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
@@ -795,7 +797,9 @@ struct dwc3_scratchpad_array {
795 * @usb3_phy: pointer to USB3 PHY 797 * @usb3_phy: pointer to USB3 PHY
796 * @usb2_generic_phy: pointer to USB2 PHY 798 * @usb2_generic_phy: pointer to USB2 PHY
797 * @usb3_generic_phy: pointer to USB3 PHY 799 * @usb3_generic_phy: pointer to USB3 PHY
800 * @phys_ready: flag to indicate that PHYs are ready
798 * @ulpi: pointer to ulpi interface 801 * @ulpi: pointer to ulpi interface
802 * @ulpi_ready: flag to indicate that ULPI is initialized
799 * @u2sel: parameter from Set SEL request. 803 * @u2sel: parameter from Set SEL request.
800 * @u2pel: parameter from Set SEL request. 804 * @u2pel: parameter from Set SEL request.
801 * @u1sel: parameter from Set SEL request. 805 * @u1sel: parameter from Set SEL request.
@@ -893,7 +897,10 @@ struct dwc3 {
893 struct phy *usb2_generic_phy; 897 struct phy *usb2_generic_phy;
894 struct phy *usb3_generic_phy; 898 struct phy *usb3_generic_phy;
895 899
900 bool phys_ready;
901
896 struct ulpi *ulpi; 902 struct ulpi *ulpi;
903 bool ulpi_ready;
897 904
898 void __iomem *regs; 905 void __iomem *regs;
899 size_t regs_size; 906 size_t regs_size;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 7ae0eefc7cc7..e54c3622eb28 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
143 clk_disable_unprepare(simple->clks[i]); 143 clk_disable_unprepare(simple->clks[i]);
144 clk_put(simple->clks[i]); 144 clk_put(simple->clks[i]);
145 } 145 }
146 simple->num_clocks = 0;
146 147
147 reset_control_assert(simple->resets); 148 reset_control_assert(simple->resets);
148 reset_control_put(simple->resets); 149 reset_control_put(simple->resets);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a4719e853b85..ed8b86517675 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev)
582 return 0; 582 return 0;
583} 583}
584 584
585static void dwc3_omap_complete(struct device *dev)
586{
587 struct dwc3_omap *omap = dev_get_drvdata(dev);
588
589 if (extcon_get_state(omap->edev, EXTCON_USB))
590 dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
591 else
592 dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
593
594 if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
595 dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
596 else
597 dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
598}
599
585static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { 600static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
586 601
587 SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) 602 SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
603 .complete = dwc3_omap_complete,
588}; 604};
589 605
590#define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) 606#define DEV_PM_OPS (&dwc3_omap_dev_pm_ops)
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 9c2e4a17918e..18be31d5743a 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
854 trb++; 854 trb++;
855 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 855 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
856 trace_dwc3_complete_trb(ep0, trb); 856 trace_dwc3_complete_trb(ep0, trb);
857 ep0->trb_enqueue = 0; 857
858 if (r->direction)
859 dwc->eps[1]->trb_enqueue = 0;
860 else
861 dwc->eps[0]->trb_enqueue = 0;
862
858 dwc->ep0_bounced = false; 863 dwc->ep0_bounced = false;
859 } 864 }
860 865
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 616ef49ccb49..2bda4eb1e9ac 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2745 break; 2745 break;
2746 } 2746 }
2747 2747
2748 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
2749
2748 /* Enable USB2 LPM Capability */ 2750 /* Enable USB2 LPM Capability */
2749 2751
2750 if ((dwc->revision > DWC3_REVISION_194A) && 2752 if ((dwc->revision > DWC3_REVISION_194A) &&
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8f2cf3baa19c..c2592d883f67 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1855,44 +1855,20 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1855 1855
1856 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1856 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1857 while(count--) { 1857 while(count--) {
1858 struct usb_endpoint_descriptor *ds;
1859 struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
1860 int needs_comp_desc = false;
1861 int desc_idx;
1862
1863 if (ffs->gadget->speed == USB_SPEED_SUPER) {
1864 desc_idx = 2;
1865 needs_comp_desc = true;
1866 } else if (ffs->gadget->speed == USB_SPEED_HIGH)
1867 desc_idx = 1;
1868 else
1869 desc_idx = 0;
1870
1871 /* fall-back to lower speed if desc missing for current speed */
1872 do {
1873 ds = ep->descs[desc_idx];
1874 } while (!ds && --desc_idx >= 0);
1875
1876 if (!ds) {
1877 ret = -EINVAL;
1878 break;
1879 }
1880
1881 ep->ep->driver_data = ep; 1858 ep->ep->driver_data = ep;
1882 ep->ep->desc = ds;
1883 1859
1884 if (needs_comp_desc) { 1860 ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
1885 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + 1861 if (ret) {
1886 USB_DT_ENDPOINT_SIZE); 1862 pr_err("%s: config_ep_by_speed(%s) returned %d\n",
1887 ep->ep->maxburst = comp_desc->bMaxBurst + 1; 1863 __func__, ep->ep->name, ret);
1888 ep->ep->comp_desc = comp_desc; 1864 break;
1889 } 1865 }
1890 1866
1891 ret = usb_ep_enable(ep->ep); 1867 ret = usb_ep_enable(ep->ep);
1892 if (likely(!ret)) { 1868 if (likely(!ret)) {
1893 epfile->ep = ep; 1869 epfile->ep = ep;
1894 epfile->in = usb_endpoint_dir_in(ds); 1870 epfile->in = usb_endpoint_dir_in(ep->ep->desc);
1895 epfile->isoc = usb_endpoint_xfer_isoc(ds); 1871 epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
1896 } else { 1872 } else {
1897 break; 1873 break;
1898 } 1874 }
@@ -2979,10 +2955,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
2979 struct ffs_data *ffs = func->ffs; 2955 struct ffs_data *ffs = func->ffs;
2980 2956
2981 const int full = !!func->ffs->fs_descs_count; 2957 const int full = !!func->ffs->fs_descs_count;
2982 const int high = gadget_is_dualspeed(func->gadget) && 2958 const int high = !!func->ffs->hs_descs_count;
2983 func->ffs->hs_descs_count; 2959 const int super = !!func->ffs->ss_descs_count;
2984 const int super = gadget_is_superspeed(func->gadget) &&
2985 func->ffs->ss_descs_count;
2986 2960
2987 int fs_len, hs_len, ss_len, ret, i; 2961 int fs_len, hs_len, ss_len, ret, i;
2988 struct ffs_ep *eps_ptr; 2962 struct ffs_ep *eps_ptr;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 11fe788b4308..d2dc1f00180b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
524 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 524 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
525 return ret; 525 return ret;
526 } 526 }
527 iad_desc.bFirstInterface = ret;
528
527 std_ac_if_desc.bInterfaceNumber = ret; 529 std_ac_if_desc.bInterfaceNumber = ret;
528 uac2->ac_intf = ret; 530 uac2->ac_intf = ret;
529 uac2->ac_alt = 0; 531 uac2->ac_alt = 0;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1e9567091d86..0875d38476ee 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT
274 tristate "Synopsys USB 2.0 Device controller" 274 tristate "Synopsys USB 2.0 Device controller"
275 depends on USB_GADGET && OF && HAS_DMA 275 depends on USB_GADGET && OF && HAS_DMA
276 depends on EXTCON || EXTCON=n 276 depends on EXTCON || EXTCON=n
277 select USB_GADGET_DUALSPEED
278 select USB_SNP_CORE 277 select USB_SNP_CORE
279 default ARCH_BCM_IPROC 278 default ARCH_BCM_IPROC
280 help 279 help
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 1e940f054cb8..6dbc489513cd 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
77 if (ret) { 77 if (ret) {
78 dev_err(&pci->dev, 78 dev_err(&pci->dev,
79 "couldn't add resources to bdc device\n"); 79 "couldn't add resources to bdc device\n");
80 platform_device_put(bdc);
80 return ret; 81 return ret;
81 } 82 }
82 83
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 859d5b11ba4c..1f8b19d9cf97 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
180void usb_ep_free_request(struct usb_ep *ep, 180void usb_ep_free_request(struct usb_ep *ep,
181 struct usb_request *req) 181 struct usb_request *req)
182{ 182{
183 ep->ops->free_request(ep, req);
184 trace_usb_ep_free_request(ep, req, 0); 183 trace_usb_ep_free_request(ep, req, 0);
184 ep->ops->free_request(ep, req);
185} 185}
186EXPORT_SYMBOL_GPL(usb_ep_free_request); 186EXPORT_SYMBOL_GPL(usb_ep_free_request);
187 187
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e5b4ee96c4bf..56b517a38865 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
1305{ 1305{
1306 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); 1306 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
1307 1307
1308 if (ep->name) 1308 if (ep->ep.name)
1309 nuke(ep, -ESHUTDOWN); 1309 nuke(ep, -ESHUTDOWN);
1310} 1310}
1311 1311
@@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1693 curr_ep = get_ep_by_pipe(udc, i); 1693 curr_ep = get_ep_by_pipe(udc, i);
1694 1694
1695 /* If the ep is configured */ 1695 /* If the ep is configured */
1696 if (curr_ep->name == NULL) { 1696 if (!curr_ep->ep.name) {
1697 WARNING("Invalid EP?"); 1697 WARNING("Invalid EP?");
1698 continue; 1698 continue;
1699 } 1699 }
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 6e87af248367..409cde4e6a51 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
2410 __renesas_usb3_ep_free_request(usb3->ep0_req); 2410 __renesas_usb3_ep_free_request(usb3->ep0_req);
2411 if (usb3->phy) 2411 if (usb3->phy)
2412 phy_put(usb3->phy); 2412 phy_put(usb3->phy);
2413 pm_runtime_disable(usb3_to_dev(usb3)); 2413 pm_runtime_disable(&pdev->dev);
2414 2414
2415 return 0; 2415 return 0;
2416} 2416}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 6150bed7cfa8..4fcfb3084b36 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -633,14 +633,6 @@ config USB_UHCI_ASPEED
633 bool 633 bool
634 default y if ARCH_ASPEED 634 default y if ARCH_ASPEED
635 635
636config USB_UHCI_BIG_ENDIAN_MMIO
637 bool
638 default y if SPARC_LEON
639
640config USB_UHCI_BIG_ENDIAN_DESC
641 bool
642 default y if SPARC_LEON
643
644config USB_FHCI_HCD 636config USB_FHCI_HCD
645 tristate "Freescale QE USB Host Controller support" 637 tristate "Freescale QE USB Host Controller support"
646 depends on OF_GPIO && QE_GPIO && QUICC_ENGINE 638 depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index facafdf8fb95..d7641cbdee43 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb(
774 atomic_inc(&urb->use_count); 774 atomic_inc(&urb->use_count);
775 atomic_inc(&urb->dev->urbnum); 775 atomic_inc(&urb->dev->urbnum);
776 urb->setup_dma = dma_map_single( 776 urb->setup_dma = dma_map_single(
777 hcd->self.controller, 777 hcd->self.sysdev,
778 urb->setup_packet, 778 urb->setup_packet,
779 sizeof(struct usb_ctrlrequest), 779 sizeof(struct usb_ctrlrequest),
780 DMA_TO_DEVICE); 780 DMA_TO_DEVICE);
781 urb->transfer_dma = dma_map_single( 781 urb->transfer_dma = dma_map_single(
782 hcd->self.controller, 782 hcd->self.sysdev,
783 urb->transfer_buffer, 783 urb->transfer_buffer,
784 urb->transfer_buffer_length, 784 urb->transfer_buffer_length,
785 DMA_FROM_DEVICE); 785 DMA_FROM_DEVICE);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 88158324dcae..327630405695 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature(
1188 * 15 secs after the setup 1188 * 15 secs after the setup
1189 */ 1189 */
1190 if (is_setup) { 1190 if (is_setup) {
1191 /* SETUP pid */ 1191 /* SETUP pid, and interrupt after SETUP completion */
1192 qtd_fill(ehci, qtd, urb->setup_dma, 1192 qtd_fill(ehci, qtd, urb->setup_dma,
1193 sizeof(struct usb_ctrlrequest), 1193 sizeof(struct usb_ctrlrequest),
1194 token | (2 /* "setup" */ << 8), 8); 1194 QTD_IOC | token | (2 /* "setup" */ << 8), 8);
1195 1195
1196 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); 1196 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
1197 return 0; /*Return now; we shall come back after 15 seconds*/ 1197 return 0; /*Return now; we shall come back after 15 seconds*/
@@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature(
1228 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 1228 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
1229 list_add_tail(&qtd->qtd_list, head); 1229 list_add_tail(&qtd->qtd_list, head);
1230 1230
1231 /* dont fill any data in such packets */ 1231 /* Interrupt after STATUS completion */
1232 qtd_fill(ehci, qtd, 0, 0, token, 0); 1232 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
1233
1234 /* by default, enable interrupt on urb completion */
1235 if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
1236 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
1237 1233
1238 submit_async(ehci, urb, &qtd_list, GFP_KERNEL); 1234 submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
1239 1235
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index ee9676349333..84f88fa411cd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -74,6 +74,7 @@ static const char hcd_name [] = "ohci_hcd";
74 74
75#define STATECHANGE_DELAY msecs_to_jiffies(300) 75#define STATECHANGE_DELAY msecs_to_jiffies(300)
76#define IO_WATCHDOG_DELAY msecs_to_jiffies(275) 76#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
77#define IO_WATCHDOG_OFF 0xffffff00
77 78
78#include "ohci.h" 79#include "ohci.h"
79#include "pci-quirks.h" 80#include "pci-quirks.h"
@@ -231,7 +232,7 @@ static int ohci_urb_enqueue (
231 } 232 }
232 233
233 /* Start up the I/O watchdog timer, if it's not running */ 234 /* Start up the I/O watchdog timer, if it's not running */
234 if (!timer_pending(&ohci->io_watchdog) && 235 if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
235 list_empty(&ohci->eds_in_use) && 236 list_empty(&ohci->eds_in_use) &&
236 !(ohci->flags & OHCI_QUIRK_QEMU)) { 237 !(ohci->flags & OHCI_QUIRK_QEMU)) {
237 ohci->prev_frame_no = ohci_frame_no(ohci); 238 ohci->prev_frame_no = ohci_frame_no(ohci);
@@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci)
501 return 0; 502 return 0;
502 503
503 timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); 504 timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
505 ohci->prev_frame_no = IO_WATCHDOG_OFF;
504 506
505 ohci->hcca = dma_alloc_coherent (hcd->self.controller, 507 ohci->hcca = dma_alloc_coherent (hcd->self.controller,
506 sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); 508 sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -730,7 +732,7 @@ static void io_watchdog_func(struct timer_list *t)
730 u32 head; 732 u32 head;
731 struct ed *ed; 733 struct ed *ed;
732 struct td *td, *td_start, *td_next; 734 struct td *td, *td_start, *td_next;
733 unsigned frame_no; 735 unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF;
734 unsigned long flags; 736 unsigned long flags;
735 737
736 spin_lock_irqsave(&ohci->lock, flags); 738 spin_lock_irqsave(&ohci->lock, flags);
@@ -835,7 +837,7 @@ static void io_watchdog_func(struct timer_list *t)
835 } 837 }
836 } 838 }
837 if (!list_empty(&ohci->eds_in_use)) { 839 if (!list_empty(&ohci->eds_in_use)) {
838 ohci->prev_frame_no = frame_no; 840 prev_frame_no = frame_no;
839 ohci->prev_wdh_cnt = ohci->wdh_cnt; 841 ohci->prev_wdh_cnt = ohci->wdh_cnt;
840 ohci->prev_donehead = ohci_readl(ohci, 842 ohci->prev_donehead = ohci_readl(ohci,
841 &ohci->regs->donehead); 843 &ohci->regs->donehead);
@@ -845,6 +847,7 @@ static void io_watchdog_func(struct timer_list *t)
845 } 847 }
846 848
847 done: 849 done:
850 ohci->prev_frame_no = prev_frame_no;
848 spin_unlock_irqrestore(&ohci->lock, flags); 851 spin_unlock_irqrestore(&ohci->lock, flags);
849} 852}
850 853
@@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd)
973 if (quirk_nec(ohci)) 976 if (quirk_nec(ohci))
974 flush_work(&ohci->nec_work); 977 flush_work(&ohci->nec_work);
975 del_timer_sync(&ohci->io_watchdog); 978 del_timer_sync(&ohci->io_watchdog);
979 ohci->prev_frame_no = IO_WATCHDOG_OFF;
976 980
977 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 981 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
978 ohci_usb_reset(ohci); 982 ohci_usb_reset(ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index fb7aaa3b9d06..634f3c7bf774 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
311 rc = ohci_rh_suspend (ohci, 0); 311 rc = ohci_rh_suspend (ohci, 0);
312 spin_unlock_irq (&ohci->lock); 312 spin_unlock_irq (&ohci->lock);
313 313
314 if (rc == 0) 314 if (rc == 0) {
315 del_timer_sync(&ohci->io_watchdog); 315 del_timer_sync(&ohci->io_watchdog);
316 ohci->prev_frame_no = IO_WATCHDOG_OFF;
317 }
316 return rc; 318 return rc;
317} 319}
318 320
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index b2ec8c399363..4ccb85a67bb3 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1019,6 +1019,8 @@ skip_ed:
1019 * have modified this list. normally it's just prepending 1019 * have modified this list. normally it's just prepending
1020 * entries (which we'd ignore), but paranoia won't hurt. 1020 * entries (which we'd ignore), but paranoia won't hurt.
1021 */ 1021 */
1022 *last = ed->ed_next;
1023 ed->ed_next = NULL;
1022 modified = 0; 1024 modified = 0;
1023 1025
1024 /* unlink urbs as requested, but rescan the list after 1026 /* unlink urbs as requested, but rescan the list after
@@ -1077,21 +1079,22 @@ rescan_this:
1077 goto rescan_this; 1079 goto rescan_this;
1078 1080
1079 /* 1081 /*
1080 * If no TDs are queued, take ED off the ed_rm_list. 1082 * If no TDs are queued, ED is now idle.
1081 * Otherwise, if the HC is running, reschedule. 1083 * Otherwise, if the HC is running, reschedule.
1082 * If not, leave it on the list for further dequeues. 1084 * If the HC isn't running, add ED back to the
1085 * start of the list for later processing.
1083 */ 1086 */
1084 if (list_empty(&ed->td_list)) { 1087 if (list_empty(&ed->td_list)) {
1085 *last = ed->ed_next;
1086 ed->ed_next = NULL;
1087 ed->state = ED_IDLE; 1088 ed->state = ED_IDLE;
1088 list_del(&ed->in_use_list); 1089 list_del(&ed->in_use_list);
1089 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1090 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1090 *last = ed->ed_next;
1091 ed->ed_next = NULL;
1092 ed_schedule(ohci, ed); 1091 ed_schedule(ohci, ed);
1093 } else { 1092 } else {
1094 last = &ed->ed_next; 1093 ed->ed_next = ohci->ed_rm_list;
1094 ohci->ed_rm_list = ed;
1095 /* Don't loop on the same ED */
1096 if (last == &ohci->ed_rm_list)
1097 last = &ed->ed_next;
1095 } 1098 }
1096 1099
1097 if (modified) 1100 if (modified)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 161536717025..67ad4bb6919a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -66,6 +66,23 @@
66#define AX_INDXC 0x30 66#define AX_INDXC 0x30
67#define AX_DATAC 0x34 67#define AX_DATAC 0x34
68 68
69#define PT_ADDR_INDX 0xE8
70#define PT_READ_INDX 0xE4
71#define PT_SIG_1_ADDR 0xA520
72#define PT_SIG_2_ADDR 0xA521
73#define PT_SIG_3_ADDR 0xA522
74#define PT_SIG_4_ADDR 0xA523
75#define PT_SIG_1_DATA 0x78
76#define PT_SIG_2_DATA 0x56
77#define PT_SIG_3_DATA 0x34
78#define PT_SIG_4_DATA 0x12
79#define PT4_P1_REG 0xB521
80#define PT4_P2_REG 0xB522
81#define PT2_P1_REG 0xD520
82#define PT2_P2_REG 0xD521
83#define PT1_P1_REG 0xD522
84#define PT1_P2_REG 0xD523
85
69#define NB_PCIE_INDX_ADDR 0xe0 86#define NB_PCIE_INDX_ADDR 0xe0
70#define NB_PCIE_INDX_DATA 0xe4 87#define NB_PCIE_INDX_DATA 0xe4
71#define PCIE_P_CNTL 0x10040 88#define PCIE_P_CNTL 0x10040
@@ -513,6 +530,98 @@ void usb_amd_dev_put(void)
513EXPORT_SYMBOL_GPL(usb_amd_dev_put); 530EXPORT_SYMBOL_GPL(usb_amd_dev_put);
514 531
515/* 532/*
533 * Check if port is disabled in BIOS on AMD Promontory host.
534 * BIOS Disabled ports may wake on connect/disconnect and need
535 * driver workaround to keep them disabled.
536 * Returns true if port is marked disabled.
537 */
538bool usb_amd_pt_check_port(struct device *device, int port)
539{
540 unsigned char value, port_shift;
541 struct pci_dev *pdev;
542 u16 reg;
543
544 pdev = to_pci_dev(device);
545 pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
546
547 pci_read_config_byte(pdev, PT_READ_INDX, &value);
548 if (value != PT_SIG_1_DATA)
549 return false;
550
551 pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
552
553 pci_read_config_byte(pdev, PT_READ_INDX, &value);
554 if (value != PT_SIG_2_DATA)
555 return false;
556
557 pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
558
559 pci_read_config_byte(pdev, PT_READ_INDX, &value);
560 if (value != PT_SIG_3_DATA)
561 return false;
562
563 pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
564
565 pci_read_config_byte(pdev, PT_READ_INDX, &value);
566 if (value != PT_SIG_4_DATA)
567 return false;
568
569 /* Check disabled port setting, if bit is set port is enabled */
570 switch (pdev->device) {
571 case 0x43b9:
572 case 0x43ba:
573 /*
574 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
575 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
576 * PT4_P2_REG bits[6..0] represents ports 13 to 7
577 */
578 if (port > 6) {
579 reg = PT4_P2_REG;
580 port_shift = port - 7;
581 } else {
582 reg = PT4_P1_REG;
583 port_shift = port + 1;
584 }
585 break;
586 case 0x43bb:
587 /*
588 * device is AMD_PROMONTORYA_2(0x43bb)
589 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
590 * PT2_P2_REG bits[5..0] represents ports 9 to 3
591 */
592 if (port > 2) {
593 reg = PT2_P2_REG;
594 port_shift = port - 3;
595 } else {
596 reg = PT2_P1_REG;
597 port_shift = port + 5;
598 }
599 break;
600 case 0x43bc:
601 /*
602 * device is AMD_PROMONTORYA_1(0x43bc)
603 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
604 * PT1_P2_REG[5..0] represents ports 9 to 4
605 */
606 if (port > 3) {
607 reg = PT1_P2_REG;
608 port_shift = port - 4;
609 } else {
610 reg = PT1_P1_REG;
611 port_shift = port + 4;
612 }
613 break;
614 default:
615 return false;
616 }
617 pci_write_config_word(pdev, PT_ADDR_INDX, reg);
618 pci_read_config_byte(pdev, PT_READ_INDX, &value);
619
620 return !(value & BIT(port_shift));
621}
622EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
623
624/*
516 * Make sure the controller is completely inactive, unable to 625 * Make sure the controller is completely inactive, unable to
517 * generate interrupts or do DMA. 626 * generate interrupts or do DMA.
518 */ 627 */
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index b68dcb5dd0fd..4ca0d9b7e463 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
17void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 17void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
18void sb800_prefetch(struct device *dev, int on); 18void sb800_prefetch(struct device *dev, int on);
19bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); 19bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
20bool usb_amd_pt_check_port(struct device *device, int port);
20#else 21#else
21struct pci_dev; 22struct pci_dev;
22static inline void usb_amd_quirk_pll_disable(void) {} 23static inline void usb_amd_quirk_pll_disable(void) {}
@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
25static inline void usb_amd_dev_put(void) {} 26static inline void usb_amd_dev_put(void) {}
26static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} 27static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
27static inline void sb800_prefetch(struct device *dev, int on) {} 28static inline void sb800_prefetch(struct device *dev, int on) {}
29static inline bool usb_amd_pt_check_port(struct device *device, int port)
30{
31 return false;
32}
28#endif /* CONFIG_USB_PCI */ 33#endif /* CONFIG_USB_PCI */
29 34
30#endif /* __LINUX_USB_PCI_QUIRKS_H */ 35#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index e26e685d8a57..5851052d4668 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,
211static int xhci_ring_trb_show(struct seq_file *s, void *unused) 211static int xhci_ring_trb_show(struct seq_file *s, void *unused)
212{ 212{
213 int i; 213 int i;
214 struct xhci_ring *ring = s->private; 214 struct xhci_ring *ring = *(struct xhci_ring **)s->private;
215 struct xhci_segment *seg = ring->first_seg; 215 struct xhci_segment *seg = ring->first_seg;
216 216
217 for (i = 0; i < ring->num_segs; i++) { 217 for (i = 0; i < ring->num_segs; i++) {
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
387 387
388 snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); 388 snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
389 epriv->root = xhci_debugfs_create_ring_dir(xhci, 389 epriv->root = xhci_debugfs_create_ring_dir(xhci,
390 &dev->eps[ep_index].new_ring, 390 &dev->eps[ep_index].ring,
391 epriv->name, 391 epriv->name,
392 spriv->root); 392 spriv->root);
393 spriv->eps[ep_index] = epriv; 393 spriv->eps[ep_index] = epriv;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 46d5e08f05f1..72ebbc908e19 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1224 temp = readl(port_array[wIndex]); 1224 temp = readl(port_array[wIndex]);
1225 break; 1225 break;
1226 } 1226 }
1227 1227 /* Port must be enabled */
1228 /* Software should not attempt to set 1228 if (!(temp & PORT_PE)) {
1229 * port link state above '3' (U3) and the port 1229 retval = -ENODEV;
1230 * must be enabled. 1230 break;
1231 */ 1231 }
1232 if ((temp & PORT_PE) == 0 || 1232 /* Can't set port link state above '3' (U3) */
1233 (link_state > USB_SS_PORT_LS_U3)) { 1233 if (link_state > USB_SS_PORT_LS_U3) {
1234 xhci_warn(xhci, "Cannot set link state.\n"); 1234 xhci_warn(xhci, "Cannot set port %d link state %d\n",
1235 wIndex, link_state);
1235 goto error; 1236 goto error;
1236 } 1237 }
1237
1238 if (link_state == USB_SS_PORT_LS_U3) { 1238 if (link_state == USB_SS_PORT_LS_U3) {
1239 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1239 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1240 wIndex + 1); 1240 wIndex + 1);
@@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1522 t2 |= PORT_WKOC_E | PORT_WKCONN_E; 1522 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
1523 t2 &= ~PORT_WKDISC_E; 1523 t2 &= ~PORT_WKDISC_E;
1524 } 1524 }
1525
1526 if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
1527 (hcd->speed < HCD_USB3)) {
1528 if (usb_amd_pt_check_port(hcd->self.controller,
1529 port_index))
1530 t2 &= ~PORT_WAKE_BITS;
1531 }
1525 } else 1532 } else
1526 t2 &= ~PORT_WAKE_BITS; 1533 t2 &= ~PORT_WAKE_BITS;
1527 1534
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6c79037876db..5262fa571a5d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -42,6 +42,10 @@
42#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 42#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
43#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 43#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
44 44
45#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
46#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
47#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
48#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
45#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 49#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
46 50
47static const char hcd_name[] = "xhci_hcd"; 51static const char hcd_name[] = "xhci_hcd";
@@ -125,6 +129,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
125 if (pdev->vendor == PCI_VENDOR_ID_AMD) 129 if (pdev->vendor == PCI_VENDOR_ID_AMD)
126 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 130 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
127 131
132 if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
133 ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
134 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
135 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
136 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
137 xhci->quirks |= XHCI_U2_DISABLE_WAKE;
138
128 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 139 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
129 xhci->quirks |= XHCI_LPM_SUPPORT; 140 xhci->quirks |= XHCI_LPM_SUPPORT;
130 xhci->quirks |= XHCI_INTEL_HOST; 141 xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1eeb3396300f..25d4b748a56f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd)
646 return; 646 return;
647 } 647 }
648 648
649 xhci_debugfs_exit(xhci);
650
651 xhci_dbc_exit(xhci); 649 xhci_dbc_exit(xhci);
652 650
653 spin_lock_irq(&xhci->lock); 651 spin_lock_irq(&xhci->lock);
@@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd)
680 678
681 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 679 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
682 xhci_mem_cleanup(xhci); 680 xhci_mem_cleanup(xhci);
681 xhci_debugfs_exit(xhci);
683 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 682 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
684 "xhci_stop completed - status = %x", 683 "xhci_stop completed - status = %x",
685 readl(&xhci->op_regs->status)); 684 readl(&xhci->op_regs->status));
@@ -1014,6 +1013,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1014 1013
1015 xhci_dbg(xhci, "cleaning up memory\n"); 1014 xhci_dbg(xhci, "cleaning up memory\n");
1016 xhci_mem_cleanup(xhci); 1015 xhci_mem_cleanup(xhci);
1016 xhci_debugfs_exit(xhci);
1017 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1017 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1018 readl(&xhci->op_regs->status)); 1018 readl(&xhci->op_regs->status));
1019 1019
@@ -3544,12 +3544,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3544 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 3544 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3545 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3545 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3546 } 3546 }
3547 3547 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3548 ret = xhci_disable_slot(xhci, udev->slot_id); 3548 ret = xhci_disable_slot(xhci, udev->slot_id);
3549 if (ret) { 3549 if (ret)
3550 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3551 xhci_free_virt_device(xhci, udev->slot_id); 3550 xhci_free_virt_device(xhci, udev->slot_id);
3552 }
3553} 3551}
3554 3552
3555int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3553int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 96099a245c69..e4d7d3d06a75 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1822,7 +1822,7 @@ struct xhci_hcd {
1822/* For controller with a broken Port Disable implementation */ 1822/* For controller with a broken Port Disable implementation */
1823#define XHCI_BROKEN_PORT_PED (1 << 25) 1823#define XHCI_BROKEN_PORT_PED (1 << 25)
1824#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) 1824#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
1825/* Reserved. It was XHCI_U2_DISABLE_WAKE */ 1825#define XHCI_U2_DISABLE_WAKE (1 << 27)
1826#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1826#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1827#define XHCI_HW_LPM_DISABLE (1 << 29) 1827#define XHCI_HW_LPM_DISABLE (1 << 29)
1828 1828
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 63b9e85dc0e9..236a60f53099 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -42,6 +42,9 @@
42#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ 42#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
43#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ 43#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
44#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ 44#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
45#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
46#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
47#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
45#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ 48#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
46#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ 49#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
47#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ 50#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = {
84 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, 87 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
85 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, 88 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
86 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, 89 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
90 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
91 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
92 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
87 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, 93 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
88 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, 94 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
89 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, 95 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 968bf1e8b0fe..eef4ad578b31 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2708,7 +2708,8 @@ static int musb_resume(struct device *dev)
2708 if ((devctl & mask) != (musb->context.devctl & mask)) 2708 if ((devctl & mask) != (musb->context.devctl & mask))
2709 musb->port1_status = 0; 2709 musb->port1_status = 0;
2710 2710
2711 musb_start(musb); 2711 musb_enable_interrupts(musb);
2712 musb_platform_enable(musb);
2712 2713
2713 spin_lock_irqsave(&musb->lock, flags); 2714 spin_lock_irqsave(&musb->lock, flags);
2714 error = musb_run_resume_work(musb); 2715 error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 394b4ac86161..45ed32c2cba9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
391 } 391 }
392 } 392 }
393 393
394 /* 394 if (qh != NULL && qh->is_ready) {
395 * The pipe must be broken if current urb->status is set, so don't
396 * start next urb.
397 * TODO: to minimize the risk of regression, only check urb->status
398 * for RX, until we have a test case to understand the behavior of TX.
399 */
400 if ((!status || !is_in) && qh && qh->is_ready) {
401 musb_dbg(musb, "... next ep%d %cX urb %p", 395 musb_dbg(musb, "... next ep%d %cX urb %p",
402 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 396 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
403 musb_start_urb(musb, is_in, qh); 397 musb_start_urb(musb, is_in, qh);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index da031c45395a..fbec863350f6 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
602 void __iomem *base = phy->io_priv; 602 void __iomem *base = phy->io_priv;
603 enum usb_charger_type chgr_type = UNKNOWN_TYPE; 603 enum usb_charger_type chgr_type = UNKNOWN_TYPE;
604 604
605 if (!regmap)
606 return UNKNOWN_TYPE;
607
605 if (mxs_charger_data_contact_detect(mxs_phy)) 608 if (mxs_charger_data_contact_detect(mxs_phy))
606 return chgr_type; 609 return chgr_type;
607 610
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 5925d111bd47..39fa2fc1b8b7 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
982 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) 982 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
983 goto usbhsf_pio_prepare_pop; 983 goto usbhsf_pio_prepare_pop;
984 984
985 /* return at this time if the pipe is running */
986 if (usbhs_pipe_is_running(pipe))
987 return 0;
988
985 usbhs_pipe_config_change_bfre(pipe, 1); 989 usbhs_pipe_config_change_bfre(pipe, 1);
986 990
987 ret = usbhsf_fifo_select(pipe, fifo, 0); 991 ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
1172 usbhsf_fifo_clear(pipe, fifo); 1176 usbhsf_fifo_clear(pipe, fifo);
1173 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); 1177 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
1174 1178
1179 usbhs_pipe_running(pipe, 0);
1175 usbhsf_dma_stop(pipe, fifo); 1180 usbhsf_dma_stop(pipe, fifo);
1176 usbhsf_dma_unmap(pkt); 1181 usbhsf_dma_unmap(pkt);
1177 usbhsf_fifo_unselect(pipe, pipe->fifo); 1182 usbhsf_fifo_unselect(pipe, pipe->fifo);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5db8ed517e0e..2d8d9150da0c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
241#define QUECTEL_PRODUCT_EC21 0x0121 241#define QUECTEL_PRODUCT_EC21 0x0121
242#define QUECTEL_PRODUCT_EC25 0x0125 242#define QUECTEL_PRODUCT_EC25 0x0125
243#define QUECTEL_PRODUCT_BG96 0x0296 243#define QUECTEL_PRODUCT_BG96 0x0296
244#define QUECTEL_PRODUCT_EP06 0x0306
244 245
245#define CMOTECH_VENDOR_ID 0x16d8 246#define CMOTECH_VENDOR_ID 0x16d8
246#define CMOTECH_PRODUCT_6001 0x6001 247#define CMOTECH_PRODUCT_6001 0x6001
@@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
689 .reserved = BIT(1) | BIT(4), 690 .reserved = BIT(1) | BIT(4),
690}; 691};
691 692
693static const struct option_blacklist_info quectel_ep06_blacklist = {
694 .reserved = BIT(4) | BIT(5),
695};
696
692static const struct usb_device_id option_ids[] = { 697static const struct usb_device_id option_ids[] = {
693 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 698 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
694 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 699 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = {
1203 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1208 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1204 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1209 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
1205 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1210 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1211 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
1212 .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist },
1206 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1213 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1207 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1214 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1208 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1215 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 49e552472c3f..dd8ef36ab10e 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
73 goto err; 73 goto err;
74 74
75 sdev->ud.tcp_socket = socket; 75 sdev->ud.tcp_socket = socket;
76 sdev->ud.sockfd = sockfd;
76 77
77 spin_unlock_irq(&sdev->ud.lock); 78 spin_unlock_irq(&sdev->ud.lock);
78 79
@@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
172 if (ud->tcp_socket) { 173 if (ud->tcp_socket) {
173 sockfd_put(ud->tcp_socket); 174 sockfd_put(ud->tcp_socket);
174 ud->tcp_socket = NULL; 175 ud->tcp_socket = NULL;
176 ud->sockfd = -1;
175 } 177 }
176 178
177 /* 3. free used data */ 179 /* 3. free used data */
@@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
266 sdev->ud.status = SDEV_ST_AVAILABLE; 268 sdev->ud.status = SDEV_ST_AVAILABLE;
267 spin_lock_init(&sdev->ud.lock); 269 spin_lock_init(&sdev->ud.lock);
268 sdev->ud.tcp_socket = NULL; 270 sdev->ud.tcp_socket = NULL;
271 sdev->ud.sockfd = -1;
269 272
270 INIT_LIST_HEAD(&sdev->priv_init); 273 INIT_LIST_HEAD(&sdev->priv_init);
271 INIT_LIST_HEAD(&sdev->priv_tx); 274 INIT_LIST_HEAD(&sdev->priv_tx);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index c3e1008aa491..20e3d4609583 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
984 if (vdev->ud.tcp_socket) { 984 if (vdev->ud.tcp_socket) {
985 sockfd_put(vdev->ud.tcp_socket); 985 sockfd_put(vdev->ud.tcp_socket);
986 vdev->ud.tcp_socket = NULL; 986 vdev->ud.tcp_socket = NULL;
987 vdev->ud.sockfd = -1;
987 } 988 }
988 pr_info("release socket\n"); 989 pr_info("release socket\n");
989 990
@@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud)
1030 if (ud->tcp_socket) { 1031 if (ud->tcp_socket) {
1031 sockfd_put(ud->tcp_socket); 1032 sockfd_put(ud->tcp_socket);
1032 ud->tcp_socket = NULL; 1033 ud->tcp_socket = NULL;
1034 ud->sockfd = -1;
1033 } 1035 }
1034 ud->status = VDEV_ST_NULL; 1036 ud->status = VDEV_ST_NULL;
1035 1037
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 6082f653c68a..67773e8bbb95 100644
--- a/drivers/video/fbdev/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
127 int timeout = 1000; 127 int timeout = 1000;
128 128
129 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ 129 /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
130 if (cpu_data(0).x86_mask == 1) { 130 if (cpu_data(0).x86_stepping == 1) {
131 pll_table = gx_pll_table_14MHz; 131 pll_table = gx_pll_table_14MHz;
132 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); 132 pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
133 } else { 133 } else {
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 753d9cb437d0..aedbee3b2838 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -60,6 +60,7 @@ struct sock_mapping {
60 bool active_socket; 60 bool active_socket;
61 struct list_head list; 61 struct list_head list;
62 struct socket *sock; 62 struct socket *sock;
63 atomic_t refcount;
63 union { 64 union {
64 struct { 65 struct {
65 int irq; 66 int irq;
@@ -93,6 +94,32 @@ struct sock_mapping {
93 }; 94 };
94}; 95};
95 96
97static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
98{
99 struct sock_mapping *map;
100
101 if (!pvcalls_front_dev ||
102 dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
103 return ERR_PTR(-ENOTCONN);
104
105 map = (struct sock_mapping *)sock->sk->sk_send_head;
106 if (map == NULL)
107 return ERR_PTR(-ENOTSOCK);
108
109 pvcalls_enter();
110 atomic_inc(&map->refcount);
111 return map;
112}
113
114static inline void pvcalls_exit_sock(struct socket *sock)
115{
116 struct sock_mapping *map;
117
118 map = (struct sock_mapping *)sock->sk->sk_send_head;
119 atomic_dec(&map->refcount);
120 pvcalls_exit();
121}
122
96static inline int get_request(struct pvcalls_bedata *bedata, int *req_id) 123static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
97{ 124{
98 *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1); 125 *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
@@ -369,31 +396,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
369 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) 396 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
370 return -EOPNOTSUPP; 397 return -EOPNOTSUPP;
371 398
372 pvcalls_enter(); 399 map = pvcalls_enter_sock(sock);
373 if (!pvcalls_front_dev) { 400 if (IS_ERR(map))
374 pvcalls_exit(); 401 return PTR_ERR(map);
375 return -ENOTCONN;
376 }
377 402
378 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 403 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
379 404
380 map = (struct sock_mapping *)sock->sk->sk_send_head;
381 if (!map) {
382 pvcalls_exit();
383 return -ENOTSOCK;
384 }
385
386 spin_lock(&bedata->socket_lock); 405 spin_lock(&bedata->socket_lock);
387 ret = get_request(bedata, &req_id); 406 ret = get_request(bedata, &req_id);
388 if (ret < 0) { 407 if (ret < 0) {
389 spin_unlock(&bedata->socket_lock); 408 spin_unlock(&bedata->socket_lock);
390 pvcalls_exit(); 409 pvcalls_exit_sock(sock);
391 return ret; 410 return ret;
392 } 411 }
393 ret = create_active(map, &evtchn); 412 ret = create_active(map, &evtchn);
394 if (ret < 0) { 413 if (ret < 0) {
395 spin_unlock(&bedata->socket_lock); 414 spin_unlock(&bedata->socket_lock);
396 pvcalls_exit(); 415 pvcalls_exit_sock(sock);
397 return ret; 416 return ret;
398 } 417 }
399 418
@@ -423,7 +442,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
423 smp_rmb(); 442 smp_rmb();
424 ret = bedata->rsp[req_id].ret; 443 ret = bedata->rsp[req_id].ret;
425 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 444 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
426 pvcalls_exit(); 445 pvcalls_exit_sock(sock);
427 return ret; 446 return ret;
428} 447}
429 448
@@ -488,23 +507,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
488 if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB)) 507 if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
489 return -EOPNOTSUPP; 508 return -EOPNOTSUPP;
490 509
491 pvcalls_enter(); 510 map = pvcalls_enter_sock(sock);
492 if (!pvcalls_front_dev) { 511 if (IS_ERR(map))
493 pvcalls_exit(); 512 return PTR_ERR(map);
494 return -ENOTCONN;
495 }
496 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 513 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
497 514
498 map = (struct sock_mapping *) sock->sk->sk_send_head;
499 if (!map) {
500 pvcalls_exit();
501 return -ENOTSOCK;
502 }
503
504 mutex_lock(&map->active.out_mutex); 515 mutex_lock(&map->active.out_mutex);
505 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { 516 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
506 mutex_unlock(&map->active.out_mutex); 517 mutex_unlock(&map->active.out_mutex);
507 pvcalls_exit(); 518 pvcalls_exit_sock(sock);
508 return -EAGAIN; 519 return -EAGAIN;
509 } 520 }
510 if (len > INT_MAX) 521 if (len > INT_MAX)
@@ -526,7 +537,7 @@ again:
526 tot_sent = sent; 537 tot_sent = sent;
527 538
528 mutex_unlock(&map->active.out_mutex); 539 mutex_unlock(&map->active.out_mutex);
529 pvcalls_exit(); 540 pvcalls_exit_sock(sock);
530 return tot_sent; 541 return tot_sent;
531} 542}
532 543
@@ -591,19 +602,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
591 if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC)) 602 if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
592 return -EOPNOTSUPP; 603 return -EOPNOTSUPP;
593 604
594 pvcalls_enter(); 605 map = pvcalls_enter_sock(sock);
595 if (!pvcalls_front_dev) { 606 if (IS_ERR(map))
596 pvcalls_exit(); 607 return PTR_ERR(map);
597 return -ENOTCONN;
598 }
599 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 608 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
600 609
601 map = (struct sock_mapping *) sock->sk->sk_send_head;
602 if (!map) {
603 pvcalls_exit();
604 return -ENOTSOCK;
605 }
606
607 mutex_lock(&map->active.in_mutex); 610 mutex_lock(&map->active.in_mutex);
608 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) 611 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
609 len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); 612 len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
@@ -623,7 +626,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
623 ret = 0; 626 ret = 0;
624 627
625 mutex_unlock(&map->active.in_mutex); 628 mutex_unlock(&map->active.in_mutex);
626 pvcalls_exit(); 629 pvcalls_exit_sock(sock);
627 return ret; 630 return ret;
628} 631}
629 632
@@ -637,24 +640,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
637 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) 640 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
638 return -EOPNOTSUPP; 641 return -EOPNOTSUPP;
639 642
640 pvcalls_enter(); 643 map = pvcalls_enter_sock(sock);
641 if (!pvcalls_front_dev) { 644 if (IS_ERR(map))
642 pvcalls_exit(); 645 return PTR_ERR(map);
643 return -ENOTCONN;
644 }
645 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 646 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
646 647
647 map = (struct sock_mapping *) sock->sk->sk_send_head;
648 if (map == NULL) {
649 pvcalls_exit();
650 return -ENOTSOCK;
651 }
652
653 spin_lock(&bedata->socket_lock); 648 spin_lock(&bedata->socket_lock);
654 ret = get_request(bedata, &req_id); 649 ret = get_request(bedata, &req_id);
655 if (ret < 0) { 650 if (ret < 0) {
656 spin_unlock(&bedata->socket_lock); 651 spin_unlock(&bedata->socket_lock);
657 pvcalls_exit(); 652 pvcalls_exit_sock(sock);
658 return ret; 653 return ret;
659 } 654 }
660 req = RING_GET_REQUEST(&bedata->ring, req_id); 655 req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -684,7 +679,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
684 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 679 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
685 680
686 map->passive.status = PVCALLS_STATUS_BIND; 681 map->passive.status = PVCALLS_STATUS_BIND;
687 pvcalls_exit(); 682 pvcalls_exit_sock(sock);
688 return 0; 683 return 0;
689} 684}
690 685
@@ -695,21 +690,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
695 struct xen_pvcalls_request *req; 690 struct xen_pvcalls_request *req;
696 int notify, req_id, ret; 691 int notify, req_id, ret;
697 692
698 pvcalls_enter(); 693 map = pvcalls_enter_sock(sock);
699 if (!pvcalls_front_dev) { 694 if (IS_ERR(map))
700 pvcalls_exit(); 695 return PTR_ERR(map);
701 return -ENOTCONN;
702 }
703 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 696 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
704 697
705 map = (struct sock_mapping *) sock->sk->sk_send_head;
706 if (!map) {
707 pvcalls_exit();
708 return -ENOTSOCK;
709 }
710
711 if (map->passive.status != PVCALLS_STATUS_BIND) { 698 if (map->passive.status != PVCALLS_STATUS_BIND) {
712 pvcalls_exit(); 699 pvcalls_exit_sock(sock);
713 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
714 } 701 }
715 702
@@ -717,7 +704,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
717 ret = get_request(bedata, &req_id); 704 ret = get_request(bedata, &req_id);
718 if (ret < 0) { 705 if (ret < 0) {
719 spin_unlock(&bedata->socket_lock); 706 spin_unlock(&bedata->socket_lock);
720 pvcalls_exit(); 707 pvcalls_exit_sock(sock);
721 return ret; 708 return ret;
722 } 709 }
723 req = RING_GET_REQUEST(&bedata->ring, req_id); 710 req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -741,7 +728,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
741 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 728 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
742 729
743 map->passive.status = PVCALLS_STATUS_LISTEN; 730 map->passive.status = PVCALLS_STATUS_LISTEN;
744 pvcalls_exit(); 731 pvcalls_exit_sock(sock);
745 return ret; 732 return ret;
746} 733}
747 734
@@ -753,21 +740,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
753 struct xen_pvcalls_request *req; 740 struct xen_pvcalls_request *req;
754 int notify, req_id, ret, evtchn, nonblock; 741 int notify, req_id, ret, evtchn, nonblock;
755 742
756 pvcalls_enter(); 743 map = pvcalls_enter_sock(sock);
757 if (!pvcalls_front_dev) { 744 if (IS_ERR(map))
758 pvcalls_exit(); 745 return PTR_ERR(map);
759 return -ENOTCONN;
760 }
761 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 746 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
762 747
763 map = (struct sock_mapping *) sock->sk->sk_send_head;
764 if (!map) {
765 pvcalls_exit();
766 return -ENOTSOCK;
767 }
768
769 if (map->passive.status != PVCALLS_STATUS_LISTEN) { 748 if (map->passive.status != PVCALLS_STATUS_LISTEN) {
770 pvcalls_exit(); 749 pvcalls_exit_sock(sock);
771 return -EINVAL; 750 return -EINVAL;
772 } 751 }
773 752
@@ -785,13 +764,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
785 goto received; 764 goto received;
786 } 765 }
787 if (nonblock) { 766 if (nonblock) {
788 pvcalls_exit(); 767 pvcalls_exit_sock(sock);
789 return -EAGAIN; 768 return -EAGAIN;
790 } 769 }
791 if (wait_event_interruptible(map->passive.inflight_accept_req, 770 if (wait_event_interruptible(map->passive.inflight_accept_req,
792 !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 771 !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
793 (void *)&map->passive.flags))) { 772 (void *)&map->passive.flags))) {
794 pvcalls_exit(); 773 pvcalls_exit_sock(sock);
795 return -EINTR; 774 return -EINTR;
796 } 775 }
797 } 776 }
@@ -802,7 +781,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
802 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 781 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
803 (void *)&map->passive.flags); 782 (void *)&map->passive.flags);
804 spin_unlock(&bedata->socket_lock); 783 spin_unlock(&bedata->socket_lock);
805 pvcalls_exit(); 784 pvcalls_exit_sock(sock);
806 return ret; 785 return ret;
807 } 786 }
808 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); 787 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
@@ -810,7 +789,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
810 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 789 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
811 (void *)&map->passive.flags); 790 (void *)&map->passive.flags);
812 spin_unlock(&bedata->socket_lock); 791 spin_unlock(&bedata->socket_lock);
813 pvcalls_exit(); 792 pvcalls_exit_sock(sock);
814 return -ENOMEM; 793 return -ENOMEM;
815 } 794 }
816 ret = create_active(map2, &evtchn); 795 ret = create_active(map2, &evtchn);
@@ -819,7 +798,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
819 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 798 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
820 (void *)&map->passive.flags); 799 (void *)&map->passive.flags);
821 spin_unlock(&bedata->socket_lock); 800 spin_unlock(&bedata->socket_lock);
822 pvcalls_exit(); 801 pvcalls_exit_sock(sock);
823 return ret; 802 return ret;
824 } 803 }
825 list_add_tail(&map2->list, &bedata->socket_mappings); 804 list_add_tail(&map2->list, &bedata->socket_mappings);
@@ -841,13 +820,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
841 /* We could check if we have received a response before returning. */ 820 /* We could check if we have received a response before returning. */
842 if (nonblock) { 821 if (nonblock) {
843 WRITE_ONCE(map->passive.inflight_req_id, req_id); 822 WRITE_ONCE(map->passive.inflight_req_id, req_id);
844 pvcalls_exit(); 823 pvcalls_exit_sock(sock);
845 return -EAGAIN; 824 return -EAGAIN;
846 } 825 }
847 826
848 if (wait_event_interruptible(bedata->inflight_req, 827 if (wait_event_interruptible(bedata->inflight_req,
849 READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) { 828 READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
850 pvcalls_exit(); 829 pvcalls_exit_sock(sock);
851 return -EINTR; 830 return -EINTR;
852 } 831 }
853 /* read req_id, then the content */ 832 /* read req_id, then the content */
@@ -862,7 +841,7 @@ received:
862 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 841 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
863 (void *)&map->passive.flags); 842 (void *)&map->passive.flags);
864 pvcalls_front_free_map(bedata, map2); 843 pvcalls_front_free_map(bedata, map2);
865 pvcalls_exit(); 844 pvcalls_exit_sock(sock);
866 return -ENOMEM; 845 return -ENOMEM;
867 } 846 }
868 newsock->sk->sk_send_head = (void *)map2; 847 newsock->sk->sk_send_head = (void *)map2;
@@ -874,7 +853,7 @@ received:
874 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); 853 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
875 wake_up(&map->passive.inflight_accept_req); 854 wake_up(&map->passive.inflight_accept_req);
876 855
877 pvcalls_exit(); 856 pvcalls_exit_sock(sock);
878 return ret; 857 return ret;
879} 858}
880 859
@@ -965,23 +944,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
965 struct sock_mapping *map; 944 struct sock_mapping *map;
966 __poll_t ret; 945 __poll_t ret;
967 946
968 pvcalls_enter(); 947 map = pvcalls_enter_sock(sock);
969 if (!pvcalls_front_dev) { 948 if (IS_ERR(map))
970 pvcalls_exit();
971 return EPOLLNVAL; 949 return EPOLLNVAL;
972 }
973 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 950 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
974 951
975 map = (struct sock_mapping *) sock->sk->sk_send_head;
976 if (!map) {
977 pvcalls_exit();
978 return EPOLLNVAL;
979 }
980 if (map->active_socket) 952 if (map->active_socket)
981 ret = pvcalls_front_poll_active(file, bedata, map, wait); 953 ret = pvcalls_front_poll_active(file, bedata, map, wait);
982 else 954 else
983 ret = pvcalls_front_poll_passive(file, bedata, map, wait); 955 ret = pvcalls_front_poll_passive(file, bedata, map, wait);
984 pvcalls_exit(); 956 pvcalls_exit_sock(sock);
985 return ret; 957 return ret;
986} 958}
987 959
@@ -995,25 +967,20 @@ int pvcalls_front_release(struct socket *sock)
995 if (sock->sk == NULL) 967 if (sock->sk == NULL)
996 return 0; 968 return 0;
997 969
998 pvcalls_enter(); 970 map = pvcalls_enter_sock(sock);
999 if (!pvcalls_front_dev) { 971 if (IS_ERR(map)) {
1000 pvcalls_exit(); 972 if (PTR_ERR(map) == -ENOTCONN)
1001 return -EIO; 973 return -EIO;
974 else
975 return 0;
1002 } 976 }
1003
1004 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 977 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
1005 978
1006 map = (struct sock_mapping *) sock->sk->sk_send_head;
1007 if (map == NULL) {
1008 pvcalls_exit();
1009 return 0;
1010 }
1011
1012 spin_lock(&bedata->socket_lock); 979 spin_lock(&bedata->socket_lock);
1013 ret = get_request(bedata, &req_id); 980 ret = get_request(bedata, &req_id);
1014 if (ret < 0) { 981 if (ret < 0) {
1015 spin_unlock(&bedata->socket_lock); 982 spin_unlock(&bedata->socket_lock);
1016 pvcalls_exit(); 983 pvcalls_exit_sock(sock);
1017 return ret; 984 return ret;
1018 } 985 }
1019 sock->sk->sk_send_head = NULL; 986 sock->sk->sk_send_head = NULL;
@@ -1043,14 +1010,20 @@ int pvcalls_front_release(struct socket *sock)
1043 /* 1010 /*
1044 * We need to make sure that sendmsg/recvmsg on this socket have 1011 * We need to make sure that sendmsg/recvmsg on this socket have
1045 * not started before we've cleared sk_send_head here. The 1012 * not started before we've cleared sk_send_head here. The
1046 * easiest (though not optimal) way to guarantee this is to see 1013 * easiest way to guarantee this is to see that no pvcalls
1047 * that no pvcall (other than us) is in progress. 1014 * (other than us) is in progress on this socket.
1048 */ 1015 */
1049 while (atomic_read(&pvcalls_refcount) > 1) 1016 while (atomic_read(&map->refcount) > 1)
1050 cpu_relax(); 1017 cpu_relax();
1051 1018
1052 pvcalls_front_free_map(bedata, map); 1019 pvcalls_front_free_map(bedata, map);
1053 } else { 1020 } else {
1021 wake_up(&bedata->inflight_req);
1022 wake_up(&map->passive.inflight_accept_req);
1023
1024 while (atomic_read(&map->refcount) > 1)
1025 cpu_relax();
1026
1054 spin_lock(&bedata->socket_lock); 1027 spin_lock(&bedata->socket_lock);
1055 list_del(&map->list); 1028 list_del(&map->list);
1056 spin_unlock(&bedata->socket_lock); 1029 spin_unlock(&bedata->socket_lock);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index bf13d1ec51f3..04e7b3b29bac 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
284 int pool = tmem_frontswap_poolid; 284 int pool = tmem_frontswap_poolid;
285 int ret; 285 int ret;
286 286
287 /* THP isn't supported */
288 if (PageTransHuge(page))
289 return -1;
290
287 if (pool < 0) 291 if (pool < 0)
288 return -1; 292 return -1;
289 if (ind64 != ind) 293 if (ind64 != ind)
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 149c5e7efc89..092981171df1 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -76,6 +76,7 @@ struct xb_req_data {
76 struct list_head list; 76 struct list_head list;
77 wait_queue_head_t wq; 77 wait_queue_head_t wq;
78 struct xsd_sockmsg msg; 78 struct xsd_sockmsg msg;
79 uint32_t caller_req_id;
79 enum xsd_sockmsg_type type; 80 enum xsd_sockmsg_type type;
80 char *body; 81 char *body;
81 const struct kvec *vec; 82 const struct kvec *vec;
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 5b081a01779d..d239fc3c5e3d 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -309,6 +309,7 @@ static int process_msg(void)
309 goto out; 309 goto out;
310 310
311 if (req->state == xb_req_state_wait_reply) { 311 if (req->state == xb_req_state_wait_reply) {
312 req->msg.req_id = req->caller_req_id;
312 req->msg.type = state.msg.type; 313 req->msg.type = state.msg.type;
313 req->msg.len = state.msg.len; 314 req->msg.len = state.msg.len;
314 req->body = state.body; 315 req->body = state.body;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3e59590c7254..3f3b29398ab8 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
227 req->state = xb_req_state_queued; 227 req->state = xb_req_state_queued;
228 init_waitqueue_head(&req->wq); 228 init_waitqueue_head(&req->wq);
229 229
230 /* Save the caller req_id and restore it later in the reply */
231 req->caller_req_id = req->msg.req_id;
230 req->msg.req_id = xs_request_enter(req); 232 req->msg.req_id = xs_request_enter(req);
231 233
232 mutex_lock(&xb_write_mutex); 234 mutex_lock(&xb_write_mutex);
@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
310 req->num_vecs = num_vecs; 312 req->num_vecs = num_vecs;
311 req->cb = xs_wake_up; 313 req->cb = xs_wake_up;
312 314
315 msg.req_id = 0;
313 msg.tx_id = t.id; 316 msg.tx_id = t.id;
314 msg.type = type; 317 msg.type = type;
315 msg.len = 0; 318 msg.len = 0;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index e4054e533f6d..f94b2d8c744a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1264,7 +1264,16 @@ again:
1264 while (node) { 1264 while (node) {
1265 ref = rb_entry(node, struct prelim_ref, rbnode); 1265 ref = rb_entry(node, struct prelim_ref, rbnode);
1266 node = rb_next(&ref->rbnode); 1266 node = rb_next(&ref->rbnode);
1267 WARN_ON(ref->count < 0); 1267 /*
1268 * ref->count < 0 can happen here if there are delayed
1269 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1270 * prelim_ref_insert() relies on this when merging
1271 * identical refs to keep the overall count correct.
1272 * prelim_ref_insert() will merge only those refs
1273 * which compare identically. Any refs having
1274 * e.g. different offsets would not be merged,
1275 * and would retain their original ref->count < 0.
1276 */
1268 if (roots && ref->count && ref->root_id && ref->parent == 0) { 1277 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1269 if (sc && sc->root_objectid && 1278 if (sc && sc->root_objectid &&
1270 ref->root_id != sc->root_objectid) { 1279 ref->root_id != sc->root_objectid) {
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index a1a40cf382e3..7ab5e0128f0c 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -821,7 +821,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
821 spin_unlock(&delayed_refs->lock); 821 spin_unlock(&delayed_refs->lock);
822 822
823 if (qrecord_inserted) 823 if (qrecord_inserted)
824 return btrfs_qgroup_trace_extent_post(fs_info, record); 824 btrfs_qgroup_trace_extent_post(fs_info, record);
825
825 return 0; 826 return 0;
826 827
827free_head_ref: 828free_head_ref:
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 05751a677da4..c1618ab9fecf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2147,6 +2147,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
2147 u64 bytes; 2147 u64 bytes;
2148 struct request_queue *req_q; 2148 struct request_queue *req_q;
2149 2149
2150 if (!stripe->dev->bdev) {
2151 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
2152 continue;
2153 }
2150 req_q = bdev_get_queue(stripe->dev->bdev); 2154 req_q = bdev_get_queue(stripe->dev->bdev);
2151 if (!blk_queue_discard(req_q)) 2155 if (!blk_queue_discard(req_q))
2152 continue; 2156 continue;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 53ca025655fc..a79299a89b7d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1335,8 +1335,11 @@ next_slot:
1335 leaf = path->nodes[0]; 1335 leaf = path->nodes[0];
1336 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1336 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1337 ret = btrfs_next_leaf(root, path); 1337 ret = btrfs_next_leaf(root, path);
1338 if (ret < 0) 1338 if (ret < 0) {
1339 if (cow_start != (u64)-1)
1340 cur_offset = cow_start;
1339 goto error; 1341 goto error;
1342 }
1340 if (ret > 0) 1343 if (ret > 0)
1341 break; 1344 break;
1342 leaf = path->nodes[0]; 1345 leaf = path->nodes[0];
@@ -3385,6 +3388,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3385 ret = btrfs_orphan_reserve_metadata(trans, inode); 3388 ret = btrfs_orphan_reserve_metadata(trans, inode);
3386 ASSERT(!ret); 3389 ASSERT(!ret);
3387 if (ret) { 3390 if (ret) {
3391 /*
3392 * dec doesn't need spin_lock as ->orphan_block_rsv
3393 * would be released only if ->orphan_inodes is
3394 * zero.
3395 */
3388 atomic_dec(&root->orphan_inodes); 3396 atomic_dec(&root->orphan_inodes);
3389 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3397 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3390 &inode->runtime_flags); 3398 &inode->runtime_flags);
@@ -3399,12 +3407,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3399 if (insert >= 1) { 3407 if (insert >= 1) {
3400 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 3408 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3401 if (ret) { 3409 if (ret) {
3402 atomic_dec(&root->orphan_inodes);
3403 if (reserve) { 3410 if (reserve) {
3404 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3411 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3405 &inode->runtime_flags); 3412 &inode->runtime_flags);
3406 btrfs_orphan_release_metadata(inode); 3413 btrfs_orphan_release_metadata(inode);
3407 } 3414 }
3415 /*
3416 * btrfs_orphan_commit_root may race with us and set
3417 * ->orphan_block_rsv to zero, in order to avoid that,
3418 * decrease ->orphan_inodes after everything is done.
3419 */
3420 atomic_dec(&root->orphan_inodes);
3408 if (ret != -EEXIST) { 3421 if (ret != -EEXIST) {
3409 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3422 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3410 &inode->runtime_flags); 3423 &inode->runtime_flags);
@@ -3436,28 +3449,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3436{ 3449{
3437 struct btrfs_root *root = inode->root; 3450 struct btrfs_root *root = inode->root;
3438 int delete_item = 0; 3451 int delete_item = 0;
3439 int release_rsv = 0;
3440 int ret = 0; 3452 int ret = 0;
3441 3453
3442 spin_lock(&root->orphan_lock);
3443 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3454 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3444 &inode->runtime_flags)) 3455 &inode->runtime_flags))
3445 delete_item = 1; 3456 delete_item = 1;
3446 3457
3458 if (delete_item && trans)
3459 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3460
3447 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3461 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3448 &inode->runtime_flags)) 3462 &inode->runtime_flags))
3449 release_rsv = 1; 3463 btrfs_orphan_release_metadata(inode);
3450 spin_unlock(&root->orphan_lock);
3451 3464
3452 if (delete_item) { 3465 /*
3466 * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
3467 * to zero, in order to avoid that, decrease ->orphan_inodes after
3468 * everything is done.
3469 */
3470 if (delete_item)
3453 atomic_dec(&root->orphan_inodes); 3471 atomic_dec(&root->orphan_inodes);
3454 if (trans)
3455 ret = btrfs_del_orphan_item(trans, root,
3456 btrfs_ino(inode));
3457 }
3458
3459 if (release_rsv)
3460 btrfs_orphan_release_metadata(inode);
3461 3472
3462 return ret; 3473 return ret;
3463} 3474}
@@ -5281,7 +5292,7 @@ void btrfs_evict_inode(struct inode *inode)
5281 trace_btrfs_inode_evict(inode); 5292 trace_btrfs_inode_evict(inode);
5282 5293
5283 if (!root) { 5294 if (!root) {
5284 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 5295 clear_inode(inode);
5285 return; 5296 return;
5286 } 5297 }
5287 5298
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e61dd624f7b..aa259d6986e1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1442,8 +1442,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1442 int ret; 1442 int ret;
1443 1443
1444 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); 1444 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1445 if (ret < 0) 1445 if (ret < 0) {
1446 return ret; 1446 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1447 btrfs_warn(fs_info,
1448"error accounting new delayed refs extent (err code: %d), quota inconsistent",
1449 ret);
1450 return 0;
1451 }
1447 1452
1448 /* 1453 /*
1449 * Here we don't need to get the lock of 1454 * Here we don't need to get the lock of
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index afadaadab18e..4fd19b4d6675 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -29,6 +29,7 @@
29#include "hash.h" 29#include "hash.h"
30#include "compression.h" 30#include "compression.h"
31#include "qgroup.h" 31#include "qgroup.h"
32#include "inode-map.h"
32 33
33/* magic values for the inode_only field in btrfs_log_inode: 34/* magic values for the inode_only field in btrfs_log_inode:
34 * 35 *
@@ -2472,6 +2473,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2472 clean_tree_block(fs_info, next); 2473 clean_tree_block(fs_info, next);
2473 btrfs_wait_tree_block_writeback(next); 2474 btrfs_wait_tree_block_writeback(next);
2474 btrfs_tree_unlock(next); 2475 btrfs_tree_unlock(next);
2476 } else {
2477 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2478 clear_extent_buffer_dirty(next);
2475 } 2479 }
2476 2480
2477 WARN_ON(root_owner != 2481 WARN_ON(root_owner !=
@@ -2552,6 +2556,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2552 clean_tree_block(fs_info, next); 2556 clean_tree_block(fs_info, next);
2553 btrfs_wait_tree_block_writeback(next); 2557 btrfs_wait_tree_block_writeback(next);
2554 btrfs_tree_unlock(next); 2558 btrfs_tree_unlock(next);
2559 } else {
2560 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2561 clear_extent_buffer_dirty(next);
2555 } 2562 }
2556 2563
2557 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2564 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@@ -2630,6 +2637,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
2630 clean_tree_block(fs_info, next); 2637 clean_tree_block(fs_info, next);
2631 btrfs_wait_tree_block_writeback(next); 2638 btrfs_wait_tree_block_writeback(next);
2632 btrfs_tree_unlock(next); 2639 btrfs_tree_unlock(next);
2640 } else {
2641 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2642 clear_extent_buffer_dirty(next);
2633 } 2643 }
2634 2644
2635 WARN_ON(log->root_key.objectid != 2645 WARN_ON(log->root_key.objectid !=
@@ -3018,13 +3028,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
3018 3028
3019 while (1) { 3029 while (1) {
3020 ret = find_first_extent_bit(&log->dirty_log_pages, 3030 ret = find_first_extent_bit(&log->dirty_log_pages,
3021 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, 3031 0, &start, &end,
3032 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
3022 NULL); 3033 NULL);
3023 if (ret) 3034 if (ret)
3024 break; 3035 break;
3025 3036
3026 clear_extent_bits(&log->dirty_log_pages, start, end, 3037 clear_extent_bits(&log->dirty_log_pages, start, end,
3027 EXTENT_DIRTY | EXTENT_NEW); 3038 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3028 } 3039 }
3029 3040
3030 /* 3041 /*
@@ -5677,6 +5688,23 @@ again:
5677 path); 5688 path);
5678 } 5689 }
5679 5690
5691 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5692 struct btrfs_root *root = wc.replay_dest;
5693
5694 btrfs_release_path(path);
5695
5696 /*
5697 * We have just replayed everything, and the highest
5698 * objectid of fs roots probably has changed in case
5699 * some inode_item's got replayed.
5700 *
5701 * root->objectid_mutex is not acquired as log replay
5702 * could only happen during mount.
5703 */
5704 ret = btrfs_find_highest_objectid(root,
5705 &root->highest_objectid);
5706 }
5707
5680 key.offset = found_key.offset - 1; 5708 key.offset = found_key.offset - 1;
5681 wc.replay_dest->log_root = NULL; 5709 wc.replay_dest->log_root = NULL;
5682 free_extent_buffer(log->node); 5710 free_extent_buffer(log->node);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b5036bd69e6a..2ceb924ca0d6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -645,6 +645,7 @@ static void btrfs_free_stale_devices(const char *path,
645 btrfs_sysfs_remove_fsid(fs_devs); 645 btrfs_sysfs_remove_fsid(fs_devs);
646 list_del(&fs_devs->list); 646 list_del(&fs_devs->list);
647 free_fs_devices(fs_devs); 647 free_fs_devices(fs_devs);
648 break;
648 } else { 649 } else {
649 fs_devs->num_devices--; 650 fs_devs->num_devices--;
650 list_del(&dev->dev_list); 651 list_del(&dev->dev_list);
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 5f22e74bbade..8e568428c88b 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/delay.h>
11#include <linux/fs.h> 12#include <linux/fs.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/mount.h> 14#include <linux/mount.h>
@@ -74,6 +75,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
74 ssize_t size = 0; 75 ssize_t size = 0;
75 int err; 76 int err;
76 77
78 while (!__ratelimit(&file->f_cred->user->ratelimit)) {
79 if (!msleep_interruptible(50))
80 return -EINTR;
81 }
82
77 err = efivar_entry_size(var, &datasize); 83 err = efivar_entry_size(var, &datasize);
78 84
79 /* 85 /*
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 86863792f36a..86d6a4435c87 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -716,7 +716,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
716 __be64 *ptr; 716 __be64 *ptr;
717 sector_t lblock; 717 sector_t lblock;
718 sector_t lend; 718 sector_t lend;
719 int ret; 719 int ret = 0;
720 int eob; 720 int eob;
721 unsigned int len; 721 unsigned int len;
722 struct buffer_head *bh; 722 struct buffer_head *bh;
@@ -728,12 +728,14 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
728 goto out; 728 goto out;
729 } 729 }
730 730
731 if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) { 731 if (gfs2_is_stuffed(ip)) {
732 gfs2_stuffed_iomap(inode, iomap); 732 if (flags & IOMAP_REPORT) {
733 if (pos >= iomap->length) 733 gfs2_stuffed_iomap(inode, iomap);
734 return -ENOENT; 734 if (pos >= iomap->length)
735 ret = 0; 735 ret = -ENOENT;
736 goto out; 736 goto out;
737 }
738 BUG_ON(!(flags & IOMAP_WRITE));
737 } 739 }
738 740
739 lblock = pos >> inode->i_blkbits; 741 lblock = pos >> inode->i_blkbits;
@@ -744,7 +746,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
744 iomap->type = IOMAP_HOLE; 746 iomap->type = IOMAP_HOLE;
745 iomap->length = (u64)(lend - lblock) << inode->i_blkbits; 747 iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
746 iomap->flags = IOMAP_F_MERGED; 748 iomap->flags = IOMAP_F_MERGED;
747 bmap_lock(ip, 0); 749 bmap_lock(ip, flags & IOMAP_WRITE);
748 750
749 /* 751 /*
750 * Directory data blocks have a struct gfs2_meta_header header, so the 752 * Directory data blocks have a struct gfs2_meta_header header, so the
@@ -787,27 +789,28 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
787 iomap->flags |= IOMAP_F_BOUNDARY; 789 iomap->flags |= IOMAP_F_BOUNDARY;
788 iomap->length = (u64)len << inode->i_blkbits; 790 iomap->length = (u64)len << inode->i_blkbits;
789 791
790 ret = 0;
791
792out_release: 792out_release:
793 release_metapath(&mp); 793 release_metapath(&mp);
794 bmap_unlock(ip, 0); 794 bmap_unlock(ip, flags & IOMAP_WRITE);
795out: 795out:
796 trace_gfs2_iomap_end(ip, iomap, ret); 796 trace_gfs2_iomap_end(ip, iomap, ret);
797 return ret; 797 return ret;
798 798
799do_alloc: 799do_alloc:
800 if (!(flags & IOMAP_WRITE)) { 800 if (flags & IOMAP_WRITE) {
801 if (pos >= i_size_read(inode)) { 801 ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
802 } else if (flags & IOMAP_REPORT) {
803 loff_t size = i_size_read(inode);
804 if (pos >= size)
802 ret = -ENOENT; 805 ret = -ENOENT;
803 goto out_release; 806 else if (height <= ip->i_height)
804 } 807 iomap->length = hole_size(inode, lblock, &mp);
805 ret = 0; 808 else
806 iomap->length = hole_size(inode, lblock, &mp); 809 iomap->length = size - pos;
807 goto out_release; 810 } else {
811 if (height <= ip->i_height)
812 iomap->length = hole_size(inode, lblock, &mp);
808 } 813 }
809
810 ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
811 goto out_release; 814 goto out_release;
812} 815}
813 816
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2435af56b87e..a50d7813e3ea 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -572,7 +572,7 @@ out:
572} 572}
573 573
574static bool 574static bool
575validate_bitmap_values(unsigned long mask) 575validate_bitmap_values(unsigned int mask)
576{ 576{
577 return (mask & ~RCA4_TYPE_MASK_ALL) == 0; 577 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
578} 578}
@@ -596,17 +596,15 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
596 goto out; 596 goto out;
597 597
598 status = cpu_to_be32(NFS4_OK); 598 status = cpu_to_be32(NFS4_OK);
599 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) 599 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
600 &args->craa_type_mask))
601 flags = FMODE_READ; 600 flags = FMODE_READ;
602 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) 601 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
603 &args->craa_type_mask))
604 flags |= FMODE_WRITE; 602 flags |= FMODE_WRITE;
605 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
606 &args->craa_type_mask))
607 pnfs_recall_all_layouts(cps->clp);
608 if (flags) 603 if (flags)
609 nfs_expire_unused_delegation_types(cps->clp, flags); 604 nfs_expire_unused_delegation_types(cps->clp, flags);
605
606 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
607 pnfs_recall_all_layouts(cps->clp);
610out: 608out:
611 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 609 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
612 return status; 610 return status;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 49f848fd1f04..7327930ad970 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -873,7 +873,7 @@ static void nfs3_nlm_release_call(void *data)
873 } 873 }
874} 874}
875 875
876const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = { 876static const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
877 .nlmclnt_alloc_call = nfs3_nlm_alloc_call, 877 .nlmclnt_alloc_call = nfs3_nlm_alloc_call,
878 .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare, 878 .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare,
879 .nlmclnt_release_call = nfs3_nlm_release_call, 879 .nlmclnt_release_call = nfs3_nlm_release_call,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 04612c24d394..979631411a0e 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -868,8 +868,10 @@ static int nfs4_set_client(struct nfs_server *server,
868 if (IS_ERR(clp)) 868 if (IS_ERR(clp))
869 return PTR_ERR(clp); 869 return PTR_ERR(clp);
870 870
871 if (server->nfs_client == clp) 871 if (server->nfs_client == clp) {
872 nfs_put_client(clp);
872 return -ELOOP; 873 return -ELOOP;
874 }
873 875
874 /* 876 /*
875 * Query for the lease time on clientid setup or renewal 877 * Query for the lease time on clientid setup or renewal
@@ -1244,11 +1246,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
1244 clp->cl_proto, clnt->cl_timeout, 1246 clp->cl_proto, clnt->cl_timeout,
1245 clp->cl_minorversion, net); 1247 clp->cl_minorversion, net);
1246 clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); 1248 clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
1247 nfs_put_client(clp);
1248 if (error != 0) { 1249 if (error != 0) {
1249 nfs_server_insert_lists(server); 1250 nfs_server_insert_lists(server);
1250 return error; 1251 return error;
1251 } 1252 }
1253 nfs_put_client(clp);
1252 1254
1253 if (server->nfs_client->cl_hostname == NULL) 1255 if (server->nfs_client->cl_hostname == NULL)
1254 server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); 1256 server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e8a93bc8285d..d1e82761de81 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
510 /* we have to zero-fill user buffer even if no read */ 510 /* we have to zero-fill user buffer even if no read */
511 if (copy_to_user(buffer, buf, tsz)) 511 if (copy_to_user(buffer, buf, tsz))
512 return -EFAULT; 512 return -EFAULT;
513 } else if (m->type == KCORE_USER) {
514 /* User page is handled prior to normal kernel page: */
515 if (copy_to_user(buffer, (char *)start, tsz))
516 return -EFAULT;
513 } else { 517 } else {
514 if (kern_addr_valid(start)) { 518 if (kern_addr_valid(start)) {
515 /* 519 /*
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 9990957264e3..76bf9cc62074 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -118,13 +118,22 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
118 err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno); 118 err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
119#endif 119#endif
120#ifdef BUS_MCEERR_AO 120#ifdef BUS_MCEERR_AO
121 /* 121 /*
122 * Other callers might not initialize the si_lsb field,
123 * so check explicitly for the right codes here.
124 */
125 if (kinfo->si_signo == SIGBUS &&
126 kinfo->si_code == BUS_MCEERR_AO)
127 err |= __put_user((short) kinfo->si_addr_lsb,
128 &uinfo->ssi_addr_lsb);
129#endif
130#ifdef BUS_MCEERR_AR
131 /*
122 * Other callers might not initialize the si_lsb field, 132 * Other callers might not initialize the si_lsb field,
123 * so check explicitly for the right codes here. 133 * so check explicitly for the right codes here.
124 */ 134 */
125 if (kinfo->si_signo == SIGBUS && 135 if (kinfo->si_signo == SIGBUS &&
126 (kinfo->si_code == BUS_MCEERR_AR || 136 kinfo->si_code == BUS_MCEERR_AR)
127 kinfo->si_code == BUS_MCEERR_AO))
128 err |= __put_user((short) kinfo->si_addr_lsb, 137 err |= __put_user((short) kinfo->si_addr_lsb,
129 &uinfo->ssi_addr_lsb); 138 &uinfo->ssi_addr_lsb);
130#endif 139#endif
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index bc397573c43a..67ab280ad134 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,8 @@
7 * @nr: Bit to set 7 * @nr: Bit to set
8 * @addr: Address to count from 8 * @addr: Address to count from
9 * 9 *
10 * This operation is atomic and provides acquire barrier semantics. 10 * This operation is atomic and provides acquire barrier semantics if
11 * the returned value is 0.
11 * It can be used to implement bit locks. 12 * It can be used to implement bit locks.
12 */ 13 */
13#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr) 14#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 963b755d19b0..a7613e1b0c87 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -52,6 +52,7 @@ struct bug_entry {
52#ifndef HAVE_ARCH_BUG 52#ifndef HAVE_ARCH_BUG
53#define BUG() do { \ 53#define BUG() do { \
54 printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ 54 printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
55 barrier_before_unreachable(); \
55 panic("BUG!"); \ 56 panic("BUG!"); \
56} while (0) 57} while (0)
57#endif 58#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1c27526c499e..cf13842a6dbd 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -134,6 +134,15 @@ struct drm_crtc_commit {
134 * &drm_pending_vblank_event pointer to clean up private events. 134 * &drm_pending_vblank_event pointer to clean up private events.
135 */ 135 */
136 struct drm_pending_vblank_event *event; 136 struct drm_pending_vblank_event *event;
137
138 /**
139 * @abort_completion:
140 *
141 * A flag that's set after drm_atomic_helper_setup_commit takes a second
142 * reference for the completion of $drm_crtc_state.event. It's used by
143 * the free code to remove the second reference if commit fails.
144 */
145 bool abort_completion;
137}; 146};
138 147
139struct __drm_planes_state { 148struct __drm_planes_state {
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 76e237bd989b..6914633037a5 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
77 77
78void drm_kms_helper_poll_disable(struct drm_device *dev); 78void drm_kms_helper_poll_disable(struct drm_device *dev);
79void drm_kms_helper_poll_enable(struct drm_device *dev); 79void drm_kms_helper_poll_enable(struct drm_device *dev);
80bool drm_kms_helper_is_poll_worker(void);
80 81
81#endif 82#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 64e10746f282..968173ec2726 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -587,7 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
587const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 587const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
588 const struct device *dev); 588 const struct device *dev);
589 589
590void *acpi_get_match_data(const struct device *dev); 590const void *acpi_device_get_match_data(const struct device *dev);
591extern bool acpi_driver_match_device(struct device *dev, 591extern bool acpi_driver_match_device(struct device *dev,
592 const struct device_driver *drv); 592 const struct device_driver *drv);
593int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 593int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -766,7 +766,7 @@ static inline const struct acpi_device_id *acpi_match_device(
766 return NULL; 766 return NULL;
767} 767}
768 768
769static inline void *acpi_get_match_data(const struct device *dev) 769static inline const void *acpi_device_get_match_data(const struct device *dev)
770{ 770{
771 return NULL; 771 return NULL;
772} 772}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4f3df807cf8f..ed63f3b69c12 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -49,7 +49,7 @@ struct blk_stat_callback;
49#define BLKDEV_MIN_RQ 4 49#define BLKDEV_MIN_RQ 4
50#define BLKDEV_MAX_RQ 128 /* Default maximum */ 50#define BLKDEV_MAX_RQ 128 /* Default maximum */
51 51
52/* Must be consisitent with blk_mq_poll_stats_bkt() */ 52/* Must be consistent with blk_mq_poll_stats_bkt() */
53#define BLK_MQ_POLL_STATS_BKTS 16 53#define BLK_MQ_POLL_STATS_BKTS 16
54 54
55/* 55/*
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 631354acfa72..901c1ccb3374 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -167,8 +167,6 @@
167 167
168#if GCC_VERSION >= 40100 168#if GCC_VERSION >= 40100
169# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) 169# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
170
171#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
172#endif 170#endif
173 171
174#if GCC_VERSION >= 40300 172#if GCC_VERSION >= 40300
@@ -196,6 +194,11 @@
196#endif /* __CHECKER__ */ 194#endif /* __CHECKER__ */
197#endif /* GCC_VERSION >= 40300 */ 195#endif /* GCC_VERSION >= 40300 */
198 196
197#if GCC_VERSION >= 40400
198#define __optimize(level) __attribute__((__optimize__(level)))
199#define __nostackprotector __optimize("no-stack-protector")
200#endif /* GCC_VERSION >= 40400 */
201
199#if GCC_VERSION >= 40500 202#if GCC_VERSION >= 40500
200 203
201#ifndef __CHECKER__ 204#ifndef __CHECKER__
@@ -205,6 +208,15 @@
205#endif 208#endif
206 209
207/* 210/*
211 * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
212 * confuse the stack allocation in gcc, leading to overly large stack
213 * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
214 *
215 * Adding an empty inline assembly before it works around the problem
216 */
217#define barrier_before_unreachable() asm volatile("")
218
219/*
208 * Mark a position in code as unreachable. This can be used to 220 * Mark a position in code as unreachable. This can be used to
209 * suppress control flow warnings after asm blocks that transfer 221 * suppress control flow warnings after asm blocks that transfer
210 * control elsewhere. 222 * control elsewhere.
@@ -214,7 +226,11 @@
214 * unreleased. Really, we need to have autoconf for the kernel. 226 * unreleased. Really, we need to have autoconf for the kernel.
215 */ 227 */
216#define unreachable() \ 228#define unreachable() \
217 do { annotate_unreachable(); __builtin_unreachable(); } while (0) 229 do { \
230 annotate_unreachable(); \
231 barrier_before_unreachable(); \
232 __builtin_unreachable(); \
233 } while (0)
218 234
219/* Mark a function definition as prohibited from being cloned. */ 235/* Mark a function definition as prohibited from being cloned. */
220#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) 236#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c2cc57a2f508..ab4711c63601 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
86# define barrier_data(ptr) barrier() 86# define barrier_data(ptr) barrier()
87#endif 87#endif
88 88
89/* workaround for GCC PR82365 if needed */
90#ifndef barrier_before_unreachable
91# define barrier_before_unreachable() do { } while (0)
92#endif
93
89/* Unreachable code */ 94/* Unreachable code */
90#ifdef CONFIG_STACK_VALIDATION 95#ifdef CONFIG_STACK_VALIDATION
91/* 96/*
@@ -277,6 +282,10 @@ unsigned long read_word_at_a_time(const void *addr)
277 282
278#endif /* __ASSEMBLY__ */ 283#endif /* __ASSEMBLY__ */
279 284
285#ifndef __optimize
286# define __optimize(level)
287#endif
288
280/* Compile time object size, -1 for unknown */ 289/* Compile time object size, -1 for unknown */
281#ifndef __compiletime_object_size 290#ifndef __compiletime_object_size
282# define __compiletime_object_size(obj) -1 291# define __compiletime_object_size(obj) -1
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 871f9e21810c..0b3fc229086c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
225} 225}
226#endif 226#endif
227 227
228#ifdef CONFIG_ARCH_HAS_CPU_RELAX 228#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
229void cpuidle_poll_state_init(struct cpuidle_driver *drv); 229void cpuidle_poll_state_init(struct cpuidle_driver *drv);
230#else 230#else
231static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} 231static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d4a2a7dcd72d..bf53d893ad02 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -170,6 +170,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
170 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 170 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
171#define for_each_cpu_not(cpu, mask) \ 171#define for_each_cpu_not(cpu, mask) \
172 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 172 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
173#define for_each_cpu_wrap(cpu, mask, start) \
174 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
173#define for_each_cpu_and(cpu, mask, and) \ 175#define for_each_cpu_and(cpu, mask, and) \
174 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) 176 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
175#else 177#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 34fe8463d10e..eb9eab4ecd6d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -578,7 +578,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
578 578
579/* 579/*
580 * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please 580 * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
581 * don't use this is new code. 581 * don't use this in new code.
582 */ 582 */
583#ifndef arch_dma_supported 583#ifndef arch_dma_supported
584#define arch_dma_supported(dev, mask) (1) 584#define arch_dma_supported(dev, mask) (1)
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fa1a489efe4..4fe8f289b3f6 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -73,8 +73,8 @@ struct fwnode_operations {
73 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); 73 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
74 void (*put)(struct fwnode_handle *fwnode); 74 void (*put)(struct fwnode_handle *fwnode);
75 bool (*device_is_available)(const struct fwnode_handle *fwnode); 75 bool (*device_is_available)(const struct fwnode_handle *fwnode);
76 void *(*device_get_match_data)(const struct fwnode_handle *fwnode, 76 const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
77 const struct device *dev); 77 const struct device *dev);
78 bool (*property_present)(const struct fwnode_handle *fwnode, 78 bool (*property_present)(const struct fwnode_handle *fwnode,
79 const char *propname); 79 const char *propname);
80 int (*property_read_int_array)(const struct fwnode_handle *fwnode, 80 int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index fec5076eda91..dcde9471897d 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -4,6 +4,12 @@
4 4
5#include <generated/autoconf.h> 5#include <generated/autoconf.h>
6 6
7#ifdef CONFIG_CPU_BIG_ENDIAN
8#define __BIG_ENDIAN 4321
9#else
10#define __LITTLE_ENDIAN 1234
11#endif
12
7#define __ARG_PLACEHOLDER_1 0, 13#define __ARG_PLACEHOLDER_1 0,
8#define __take_second_arg(__ignored, val, ...) val 14#define __take_second_arg(__ignored, val, ...) val
9 15
@@ -64,4 +70,7 @@
64 */ 70 */
65#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) 71#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
66 72
73/* Make sure we always have all types and struct attributes defined. */
74#include <linux/compiler_types.h>
75
67#endif /* __LINUX_KCONFIG_H */ 76#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 7ff25a808fef..80db19d3a505 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -10,6 +10,7 @@ enum kcore_type {
10 KCORE_VMALLOC, 10 KCORE_VMALLOC,
11 KCORE_RAM, 11 KCORE_RAM,
12 KCORE_VMEMMAP, 12 KCORE_VMEMMAP,
13 KCORE_USER,
13 KCORE_OTHER, 14 KCORE_OTHER,
14}; 15};
15 16
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 882046863581..c46016bb25eb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
523static inline void mod_memcg_state(struct mem_cgroup *memcg, 523static inline void mod_memcg_state(struct mem_cgroup *memcg,
524 int idx, int val) 524 int idx, int val)
525{ 525{
526 preempt_disable(); 526 unsigned long flags;
527
528 local_irq_save(flags);
527 __mod_memcg_state(memcg, idx, val); 529 __mod_memcg_state(memcg, idx, val);
528 preempt_enable(); 530 local_irq_restore(flags);
529} 531}
530 532
531/** 533/**
@@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
606static inline void mod_lruvec_state(struct lruvec *lruvec, 608static inline void mod_lruvec_state(struct lruvec *lruvec,
607 enum node_stat_item idx, int val) 609 enum node_stat_item idx, int val)
608{ 610{
609 preempt_disable(); 611 unsigned long flags;
612
613 local_irq_save(flags);
610 __mod_lruvec_state(lruvec, idx, val); 614 __mod_lruvec_state(lruvec, idx, val);
611 preempt_enable(); 615 local_irq_restore(flags);
612} 616}
613 617
614static inline void __mod_lruvec_page_state(struct page *page, 618static inline void __mod_lruvec_page_state(struct page *page,
@@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page,
630static inline void mod_lruvec_page_state(struct page *page, 634static inline void mod_lruvec_page_state(struct page *page,
631 enum node_stat_item idx, int val) 635 enum node_stat_item idx, int val)
632{ 636{
633 preempt_disable(); 637 unsigned long flags;
638
639 local_irq_save(flags);
634 __mod_lruvec_page_state(page, idx, val); 640 __mod_lruvec_page_state(page, idx, val);
635 preempt_enable(); 641 local_irq_restore(flags);
636} 642}
637 643
638unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 644unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
659static inline void count_memcg_events(struct mem_cgroup *memcg, 665static inline void count_memcg_events(struct mem_cgroup *memcg,
660 int idx, unsigned long count) 666 int idx, unsigned long count)
661{ 667{
662 preempt_disable(); 668 unsigned long flags;
669
670 local_irq_save(flags);
663 __count_memcg_events(memcg, idx, count); 671 __count_memcg_events(memcg, idx, count);
664 preempt_enable(); 672 local_irq_restore(flags);
665} 673}
666 674
667/* idx can be of type enum memcg_event_item or vm_event_item */ 675/* idx can be of type enum memcg_event_item or vm_event_item */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c30b32e3c862..10191c28fc04 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
127 127
128#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 128#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
129 129
130#ifdef arch_unmap_kpfn
131extern void arch_unmap_kpfn(unsigned long pfn);
132#else
133static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
134#endif
135
136#endif 130#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f25c13423bd4..cb3bbed4e633 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -66,6 +66,11 @@ struct mutex {
66#endif 66#endif
67}; 67};
68 68
69/*
70 * Internal helper function; C doesn't allow us to hide it :/
71 *
72 * DO NOT USE (outside of mutex code).
73 */
69static inline struct task_struct *__mutex_owner(struct mutex *lock) 74static inline struct task_struct *__mutex_owner(struct mutex *lock)
70{ 75{
71 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); 76 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index b99bced39ac2..fbc98e2c8228 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -20,20 +20,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
20 unsigned long size) 20 unsigned long size)
21{ 21{
22 /* 22 /*
23 * Warn developers about inappropriate array_index_nospec() usage.
24 *
25 * Even if the CPU speculates past the WARN_ONCE branch, the
26 * sign bit of @index is taken into account when generating the
27 * mask.
28 *
29 * This warning is compiled out when the compiler can infer that
30 * @index and @size are less than LONG_MAX.
31 */
32 if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
33 "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
34 return 0;
35
36 /*
37 * Always calculate and emit the mask even if the compiler 23 * Always calculate and emit the mask even if the compiler
38 * thinks the mask is not needed. The compiler does not take 24 * thinks the mask is not needed. The compiler does not take
39 * into account the value of @index under speculation. 25 * into account the value of @index under speculation.
@@ -44,6 +30,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
44#endif 30#endif
45 31
46/* 32/*
33 * Warn developers about inappropriate array_index_nospec() usage.
34 *
35 * Even if the CPU speculates past the WARN_ONCE branch, the
36 * sign bit of @index is taken into account when generating the
37 * mask.
38 *
39 * This warning is compiled out when the compiler can infer that
40 * @index and @size are less than LONG_MAX.
41 */
42#define array_index_mask_nospec_check(index, size) \
43({ \
44 if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
45 "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
46 _mask = 0; \
47 else \
48 _mask = array_index_mask_nospec(index, size); \
49 _mask; \
50})
51
52/*
47 * array_index_nospec - sanitize an array index after a bounds check 53 * array_index_nospec - sanitize an array index after a bounds check
48 * 54 *
49 * For a code sequence like: 55 * For a code sequence like:
@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
61({ \ 67({ \
62 typeof(index) _i = (index); \ 68 typeof(index) _i = (index); \
63 typeof(size) _s = (size); \ 69 typeof(size) _s = (size); \
64 unsigned long _mask = array_index_mask_nospec(_i, _s); \ 70 unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
65 \ 71 \
66 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ 72 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
67 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ 73 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index af0f44effd44..40036a57d072 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,26 +14,10 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/perf_event.h> 16#include <linux/perf_event.h>
17#include <linux/platform_device.h>
17#include <linux/sysfs.h> 18#include <linux/sysfs.h>
18#include <asm/cputype.h> 19#include <asm/cputype.h>
19 20
20/*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 *
28 * @irq_flags: if non-zero, these flags will be passed to request_irq
29 * when requesting interrupts for this PMU device.
30 */
31struct arm_pmu_platdata {
32 irqreturn_t (*handle_irq)(int irq, void *dev,
33 irq_handler_t pmu_handler);
34 unsigned long irq_flags;
35};
36
37#ifdef CONFIG_ARM_PMU 21#ifdef CONFIG_ARM_PMU
38 22
39/* 23/*
@@ -92,7 +76,6 @@ enum armpmu_attr_groups {
92 76
93struct arm_pmu { 77struct arm_pmu {
94 struct pmu pmu; 78 struct pmu pmu;
95 cpumask_t active_irqs;
96 cpumask_t supported_cpus; 79 cpumask_t supported_cpus;
97 char *name; 80 char *name;
98 irqreturn_t (*handle_irq)(int irq_num, void *dev); 81 irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
174 157
175/* Internal functions only for core arm_pmu code */ 158/* Internal functions only for core arm_pmu code */
176struct arm_pmu *armpmu_alloc(void); 159struct arm_pmu *armpmu_alloc(void);
160struct arm_pmu *armpmu_alloc_atomic(void);
177void armpmu_free(struct arm_pmu *pmu); 161void armpmu_free(struct arm_pmu *pmu);
178int armpmu_register(struct arm_pmu *pmu); 162int armpmu_register(struct arm_pmu *pmu);
179int armpmu_request_irqs(struct arm_pmu *armpmu); 163int armpmu_request_irq(int irq, int cpu);
180void armpmu_free_irqs(struct arm_pmu *armpmu); 164void armpmu_free_irq(int irq, int cpu);
181int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
182void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
183 165
184#define ARMV8_PMU_PDEV_NAME "armv8-pmu" 166#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
185 167
diff --git a/include/linux/property.h b/include/linux/property.h
index 769d372c1edf..2eea4b310fc2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -283,7 +283,7 @@ bool device_dma_supported(struct device *dev);
283 283
284enum dev_dma_attr device_get_dma_attr(struct device *dev); 284enum dev_dma_attr device_get_dma_attr(struct device *dev);
285 285
286void *device_get_match_data(struct device *dev); 286const void *device_get_match_data(struct device *dev);
287 287
288int device_get_phy_mode(struct device *dev); 288int device_get_phy_mode(struct device *dev);
289 289
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index b884b7794187..e6335227b844 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
469 */ 469 */
470static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) 470static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
471{ 471{
472 if (size * sizeof(void *) > KMALLOC_MAX_SIZE) 472 if (size > KMALLOC_MAX_SIZE / sizeof(void *))
473 return NULL; 473 return NULL;
474 return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); 474 return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
475} 475}
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 1149533aa2fa..9806184bb3d5 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
36 atomic_inc(&mm->mm_count); 36 atomic_inc(&mm->mm_count);
37} 37}
38 38
39extern void mmdrop(struct mm_struct *mm); 39extern void __mmdrop(struct mm_struct *mm);
40
41static inline void mmdrop(struct mm_struct *mm)
42{
43 /*
44 * The implicit full barrier implied by atomic_dec_and_test() is
45 * required by the membarrier system call before returning to
46 * user-space, after storing to rq->curr.
47 */
48 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
49 __mmdrop(mm);
50}
40 51
41/** 52/**
42 * mmget() - Pin the address space associated with a &struct mm_struct. 53 * mmget() - Pin the address space associated with a &struct mm_struct.
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 0dcf4e480ef7..96fe289c4c6e 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/uidgid.h> 5#include <linux/uidgid.h>
6#include <linux/atomic.h> 6#include <linux/atomic.h>
7#include <linux/ratelimit.h>
7 8
8struct key; 9struct key;
9 10
@@ -41,6 +42,9 @@ struct user_struct {
41 defined(CONFIG_NET) 42 defined(CONFIG_NET)
42 atomic_long_t locked_vm; 43 atomic_long_t locked_vm;
43#endif 44#endif
45
46 /* Miscellaneous per-user rate limit */
47 struct ratelimit_state ratelimit;
44}; 48};
45 49
46extern int uids_sysfs_init(void); 50extern int uids_sysfs_init(void);
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index dc368b8ce215..11c86fbfeb98 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -4,7 +4,7 @@
4 * 4 *
5 * Distributed under the terms of the GNU GPL, version 2 5 * Distributed under the terms of the GNU GPL, version 2
6 * 6 *
7 * Please see kernel/semaphore.c for documentation of these functions 7 * Please see kernel/locking/semaphore.c for documentation of these functions
8 */ 8 */
9#ifndef __LINUX_SEMAPHORE_H 9#ifndef __LINUX_SEMAPHORE_H
10#define __LINUX_SEMAPHORE_H 10#define __LINUX_SEMAPHORE_H
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5ebc0f869720..c1e66bdcf583 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3646 return true; 3646 return true;
3647} 3647}
3648 3648
3649/* For small packets <= CHECKSUM_BREAK peform checksum complete directly 3649/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
3650 * in checksum_init. 3650 * in checksum_init.
3651 */ 3651 */
3652#define CHECKSUM_BREAK 76 3652#define CHECKSUM_BREAK 76
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7b6a59f722a3..a1a3f4ed94ce 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page);
337extern void mark_page_lazyfree(struct page *page); 337extern void mark_page_lazyfree(struct page *page);
338extern void swap_setup(void); 338extern void swap_setup(void);
339 339
340extern void add_page_to_unevictable_list(struct page *page);
341
342extern void lru_cache_add_active_or_unevictable(struct page *page, 340extern void lru_cache_add_active_or_unevictable(struct page *page,
343 struct vm_area_struct *vma); 341 struct vm_area_struct *vma);
344 342
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a54ef96aff5..bc0cda180c8b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
465 465
466extern void workqueue_set_max_active(struct workqueue_struct *wq, 466extern void workqueue_set_max_active(struct workqueue_struct *wq,
467 int max_active); 467 int max_active);
468extern struct work_struct *current_work(void);
468extern bool current_is_workqueue_rescuer(void); 469extern bool current_is_workqueue_rescuer(void);
469extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 470extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
470extern unsigned int work_busy(struct work_struct *work); 471extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 906e90223066..c96511fa9198 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4149,7 +4149,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
4149 * The TX headroom reserved by mac80211 for its own tx_status functions. 4149 * The TX headroom reserved by mac80211 for its own tx_status functions.
4150 * This is enough for the radiotap header. 4150 * This is enough for the radiotap header.
4151 */ 4151 */
4152#define IEEE80211_TX_STATUS_HEADROOM 14 4152#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
4153 4153
4154/** 4154/**
4155 * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames 4155 * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2ed8631..f83cacce3308 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@ struct regulatory_request {
78 int wiphy_idx; 78 int wiphy_idx;
79 enum nl80211_reg_initiator initiator; 79 enum nl80211_reg_initiator initiator;
80 enum nl80211_user_reg_hint_type user_reg_hint_type; 80 enum nl80211_user_reg_hint_type user_reg_hint_type;
81 char alpha2[2]; 81 char alpha2[3];
82 enum nl80211_dfs_regions dfs_region; 82 enum nl80211_dfs_regions dfs_region;
83 bool intersect; 83 bool intersect;
84 bool processed; 84 bool processed;
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 81bdbf97319b..9185e45b997f 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
64 UDP_SKB_CB(skb)->cscov = cscov; 64 UDP_SKB_CB(skb)->cscov = cscov;
65 if (skb->ip_summed == CHECKSUM_COMPLETE) 65 if (skb->ip_summed == CHECKSUM_COMPLETE)
66 skb->ip_summed = CHECKSUM_NONE; 66 skb->ip_summed = CHECKSUM_NONE;
67 skb->csum_valid = 0;
67 } 68 }
68 69
69 return 0; 70 return 0;
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index c2d81167c858..2cdf8dcf4bdc 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -29,10 +29,6 @@ enum rdma_restrack_type {
29 */ 29 */
30 RDMA_RESTRACK_QP, 30 RDMA_RESTRACK_QP,
31 /** 31 /**
32 * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
33 */
34 RDMA_RESTRACK_XRCD,
35 /**
36 * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations 32 * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
37 */ 33 */
38 RDMA_RESTRACK_MAX 34 RDMA_RESTRACK_MAX
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 6da44079aa58..38287d9d23a1 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -276,10 +276,7 @@ struct uverbs_object_tree_def {
276 */ 276 */
277 277
278struct uverbs_ptr_attr { 278struct uverbs_ptr_attr {
279 union { 279 u64 data;
280 u64 data;
281 void __user *ptr;
282 };
283 u16 len; 280 u16 len;
284 /* Combination of bits from enum UVERBS_ATTR_F_XXXX */ 281 /* Combination of bits from enum UVERBS_ATTR_F_XXXX */
285 u16 flags; 282 u16 flags;
@@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
351} 348}
352 349
353static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, 350static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
354 size_t idx, const void *from) 351 size_t idx, const void *from, size_t size)
355{ 352{
356 const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); 353 const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
357 u16 flags; 354 u16 flags;
355 size_t min_size;
358 356
359 if (IS_ERR(attr)) 357 if (IS_ERR(attr))
360 return PTR_ERR(attr); 358 return PTR_ERR(attr);
361 359
360 min_size = min_t(size_t, attr->ptr_attr.len, size);
361 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
362 return -EFAULT;
363
362 flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT; 364 flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
363 return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) && 365 if (put_user(flags, &attr->uattr->flags))
364 !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT; 366 return -EFAULT;
367
368 return 0;
365} 369}
366 370
367static inline int _uverbs_copy_from(void *to, size_t to_size, 371static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
372{
373 return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
374}
375
376static inline int _uverbs_copy_from(void *to,
368 const struct uverbs_attr_bundle *attrs_bundle, 377 const struct uverbs_attr_bundle *attrs_bundle,
369 size_t idx) 378 size_t idx,
379 size_t size)
370{ 380{
371 const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); 381 const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
372 382
373 if (IS_ERR(attr)) 383 if (IS_ERR(attr))
374 return PTR_ERR(attr); 384 return PTR_ERR(attr);
375 385
376 if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data)) 386 /*
387 * Validation ensures attr->ptr_attr.len >= size. If the caller is
388 * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
389 * the right size.
390 */
391 if (unlikely(size < attr->ptr_attr.len))
392 return -EINVAL;
393
394 if (uverbs_attr_ptr_is_inline(attr))
377 memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); 395 memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
378 else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len)) 396 else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
397 attr->ptr_attr.len))
379 return -EFAULT; 398 return -EFAULT;
380 399
381 return 0; 400 return 0;
382} 401}
383 402
384#define uverbs_copy_from(to, attrs_bundle, idx) \ 403#define uverbs_copy_from(to, attrs_bundle, idx) \
385 _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx) 404 _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
386 405
387/* ================================================= 406/* =================================================
388 * Definitions -> Specs infrastructure 407 * Definitions -> Specs infrastructure
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 4bb86d379bd5..9a4fa0c3264a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -31,7 +31,7 @@
31#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */ 31#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */
32#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */ 32#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */
33#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */ 33#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */
34#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optinal) */ 34#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optional) */
35#define AC97_PHONE 0x0c /* Phone Volume (optional) */ 35#define AC97_PHONE 0x0c /* Phone Volume (optional) */
36#define AC97_MIC 0x0e /* MIC Volume */ 36#define AC97_MIC 0x0e /* MIC Volume */
37#define AC97_LINE 0x10 /* Line In Volume */ 37#define AC97_LINE 0x10 /* Line In Volume */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b8adf05c534e..7dd8f34c37df 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
368 TP_printk("%s", "") 368 TP_printk("%s", "")
369 ); 369 );
370 370
371TRACE_EVENT(xen_mmu_flush_tlb_single, 371TRACE_EVENT(xen_mmu_flush_tlb_one_user,
372 TP_PROTO(unsigned long addr), 372 TP_PROTO(unsigned long addr),
373 TP_ARGS(addr), 373 TP_ARGS(addr),
374 TP_STRUCT__entry( 374 TP_STRUCT__entry(
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index f8cb5760ea4f..8bbbcb5cd94b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,7 +23,6 @@
23#define _UAPI_LINUX_IF_ETHER_H 23#define _UAPI_LINUX_IF_ETHER_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/libc-compat.h>
27 26
28/* 27/*
29 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 28 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -151,6 +150,11 @@
151 * This is an Ethernet frame header. 150 * This is an Ethernet frame header.
152 */ 151 */
153 152
153/* allow libcs like musl to deactivate this, glibc does not implement this. */
154#ifndef __UAPI_DEF_ETHHDR
155#define __UAPI_DEF_ETHHDR 1
156#endif
157
154#if __UAPI_DEF_ETHHDR 158#if __UAPI_DEF_ETHHDR
155struct ethhdr { 159struct ethhdr {
156 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 160 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index fc29efaa918c..8254c937c9f4 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -264,10 +264,4 @@
264 264
265#endif /* __GLIBC__ */ 265#endif /* __GLIBC__ */
266 266
267/* Definitions for if_ether.h */
268/* allow libcs like musl to deactivate this, glibc does not implement this. */
269#ifndef __UAPI_DEF_ETHHDR
270#define __UAPI_DEF_ETHHDR 1
271#endif
272
273#endif /* _UAPI_LIBC_COMPAT_H */ 267#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e46d82b91166..d5a1b8a492b9 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -69,8 +69,8 @@ struct ptrace_peeksiginfo_args {
69#define PTRACE_SECCOMP_GET_METADATA 0x420d 69#define PTRACE_SECCOMP_GET_METADATA 0x420d
70 70
71struct seccomp_metadata { 71struct seccomp_metadata {
72 unsigned long filter_off; /* Input: which filter */ 72 __u64 filter_off; /* Input: which filter */
73 unsigned int flags; /* Output: filter's flags */ 73 __u64 flags; /* Output: filter's flags */
74}; 74};
75 75
76/* Read signals from a shared (process wide) queue */ 76/* Read signals from a shared (process wide) queue */
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 03557b5f9aa6..46de0885e800 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -65,7 +65,7 @@ struct ib_uverbs_attr {
65 __u16 len; /* only for pointers */ 65 __u16 len; /* only for pointers */
66 __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ 66 __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
67 __u16 reserved; 67 __u16 reserved;
68 __u64 data; /* ptr to command, inline data or idr/fd */ 68 __aligned_u64 data; /* ptr to command, inline data or idr/fd */
69}; 69};
70 70
71struct ib_uverbs_ioctl_hdr { 71struct ib_uverbs_ioctl_hdr {
@@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr {
73 __u16 object_id; 73 __u16 object_id;
74 __u16 method_id; 74 __u16 method_id;
75 __u16 num_attrs; 75 __u16 num_attrs;
76 __u64 reserved; 76 __aligned_u64 reserved;
77 struct ib_uverbs_attr attrs[0]; 77 struct ib_uverbs_attr attrs[0];
78}; 78};
79 79
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..14750e7c5ee4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
26{ 26{
27 int i; 27 int i;
28 28
29 for (i = 0; i < array->map.max_entries; i++) 29 for (i = 0; i < array->map.max_entries; i++) {
30 free_percpu(array->pptrs[i]); 30 free_percpu(array->pptrs[i]);
31 cond_resched();
32 }
31} 33}
32 34
33static int bpf_array_alloc_percpu(struct bpf_array *array) 35static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
43 return -ENOMEM; 45 return -ENOMEM;
44 } 46 }
45 array->pptrs[i] = ptr; 47 array->pptrs[i] = ptr;
48 cond_resched();
46 } 49 }
47 50
48 return 0; 51 return 0;
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
73static struct bpf_map *array_map_alloc(union bpf_attr *attr) 76static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74{ 77{
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 78 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int numa_node = bpf_map_attr_numa_node(attr); 79 int ret, numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries; 80 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN); 81 bool unpriv = !capable(CAP_SYS_ADMIN);
82 u64 cost, array_size, mask64;
79 struct bpf_array *array; 83 struct bpf_array *array;
80 u64 array_size, mask64;
81 84
82 elem_size = round_up(attr->value_size, 8); 85 elem_size = round_up(attr->value_size, 8);
83 86
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
109 array_size += (u64) max_entries * elem_size; 112 array_size += (u64) max_entries * elem_size;
110 113
111 /* make sure there is no u32 overflow later in round_up() */ 114 /* make sure there is no u32 overflow later in round_up() */
112 if (array_size >= U32_MAX - PAGE_SIZE) 115 cost = array_size;
116 if (cost >= U32_MAX - PAGE_SIZE)
113 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
118 if (percpu) {
119 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-ENOMEM);
122 }
123 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
124
125 ret = bpf_map_precharge_memlock(cost);
126 if (ret < 0)
127 return ERR_PTR(ret);
114 128
115 /* allocate all map elements and zero-initialize them */ 129 /* allocate all map elements and zero-initialize them */
116 array = bpf_map_area_alloc(array_size, numa_node); 130 array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
121 135
122 /* copy mandatory map attributes */ 136 /* copy mandatory map attributes */
123 bpf_map_init_from_attr(&array->map, attr); 137 bpf_map_init_from_attr(&array->map, attr);
138 array->map.pages = cost;
124 array->elem_size = elem_size; 139 array->elem_size = elem_size;
125 140
126 if (!percpu) 141 if (percpu && bpf_array_alloc_percpu(array)) {
127 goto out;
128
129 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
130
131 if (array_size >= U32_MAX - PAGE_SIZE ||
132 bpf_array_alloc_percpu(array)) {
133 bpf_map_area_free(array); 142 bpf_map_area_free(array);
134 return ERR_PTR(-ENOMEM); 143 return ERR_PTR(-ENOMEM);
135 } 144 }
136out:
137 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
138 145
139 return &array->map; 146 return &array->map;
140} 147}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 29ca9208dcfa..d315b393abdd 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1590 * so always copy 'cnt' prog_ids to the user. 1590 * so always copy 'cnt' prog_ids to the user.
1591 * In a rare race the user will see zero prog_ids 1591 * In a rare race the user will see zero prog_ids
1592 */ 1592 */
1593 ids = kcalloc(cnt, sizeof(u32), GFP_USER); 1593 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1594 if (!ids) 1594 if (!ids)
1595 return -ENOMEM; 1595 return -ENOMEM;
1596 rcu_read_lock(); 1596 rcu_read_lock();
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbfdada6caee..a4bb0b34375a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
334static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, 334static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
335 int map_id) 335 int map_id)
336{ 336{
337 gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN; 337 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
338 struct bpf_cpu_map_entry *rcpu; 338 struct bpf_cpu_map_entry *rcpu;
339 int numa, err; 339 int numa, err;
340 340
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 7b469d10d0e9..b4b5b81e7251 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
555 struct lpm_trie_node __rcu **slot; 555 struct lpm_trie_node __rcu **slot;
556 struct lpm_trie_node *node; 556 struct lpm_trie_node *node;
557 557
558 raw_spin_lock(&trie->lock); 558 /* Wait for outstanding programs to complete
559 * update/lookup/delete/get_next_key and free the trie.
560 */
561 synchronize_rcu();
559 562
560 /* Always start at the root and walk down to a node that has no 563 /* Always start at the root and walk down to a node that has no
561 * children. Then free that node, nullify its reference in the parent 564 * children. Then free that node, nullify its reference in the parent
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
566 slot = &trie->root; 569 slot = &trie->root;
567 570
568 for (;;) { 571 for (;;) {
569 node = rcu_dereference_protected(*slot, 572 node = rcu_dereference_protected(*slot, 1);
570 lockdep_is_held(&trie->lock));
571 if (!node) 573 if (!node)
572 goto unlock; 574 goto out;
573 575
574 if (rcu_access_pointer(node->child[0])) { 576 if (rcu_access_pointer(node->child[0])) {
575 slot = &node->child[0]; 577 slot = &node->child[0];
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
587 } 589 }
588 } 590 }
589 591
590unlock: 592out:
591 raw_spin_unlock(&trie->lock); 593 kfree(trie);
592} 594}
593 595
594static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) 596static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 48c33417d13c..a927e89dad6e 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
521static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 521static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
522{ 522{
523 struct bpf_stab *stab; 523 struct bpf_stab *stab;
524 int err = -EINVAL;
525 u64 cost; 524 u64 cost;
525 int err;
526 526
527 if (!capable(CAP_NET_ADMIN)) 527 if (!capable(CAP_NET_ADMIN))
528 return ERR_PTR(-EPERM); 528 return ERR_PTR(-EPERM);
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
547 547
548 /* make sure page count doesn't overflow */ 548 /* make sure page count doesn't overflow */
549 cost = (u64) stab->map.max_entries * sizeof(struct sock *); 549 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
550 err = -EINVAL;
550 if (cost >= U32_MAX - PAGE_SIZE) 551 if (cost >= U32_MAX - PAGE_SIZE)
551 goto free_stab; 552 goto free_stab;
552 553
diff --git a/kernel/fork.c b/kernel/fork.c
index be8aa5b98666..e5d9d405ae4e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
592 * is dropped: either by a lazy thread or by 592 * is dropped: either by a lazy thread or by
593 * mmput. Free the page directory and the mm. 593 * mmput. Free the page directory and the mm.
594 */ 594 */
595static void __mmdrop(struct mm_struct *mm) 595void __mmdrop(struct mm_struct *mm)
596{ 596{
597 BUG_ON(mm == &init_mm); 597 BUG_ON(mm == &init_mm);
598 mm_free_pgd(mm); 598 mm_free_pgd(mm);
@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
603 put_user_ns(mm->user_ns); 603 put_user_ns(mm->user_ns);
604 free_mm(mm); 604 free_mm(mm);
605} 605}
606 606EXPORT_SYMBOL_GPL(__mmdrop);
607void mmdrop(struct mm_struct *mm)
608{
609 /*
610 * The implicit full barrier implied by atomic_dec_and_test() is
611 * required by the membarrier system call before returning to
612 * user-space, after storing to rq->curr.
613 */
614 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
615 __mmdrop(mm);
616}
617EXPORT_SYMBOL_GPL(mmdrop);
618 607
619static void mmdrop_async_fn(struct work_struct *work) 608static void mmdrop_async_fn(struct work_struct *work)
620{ 609{
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index e6a9c36470ee..82b8b18ee1eb 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1726,25 +1726,14 @@ static int irq_domain_debug_show(struct seq_file *m, void *p)
1726 irq_domain_debug_show_one(m, d, 0); 1726 irq_domain_debug_show_one(m, d, 0);
1727 return 0; 1727 return 0;
1728} 1728}
1729 1729DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
1730static int irq_domain_debug_open(struct inode *inode, struct file *file)
1731{
1732 return single_open(file, irq_domain_debug_show, inode->i_private);
1733}
1734
1735static const struct file_operations dfs_domain_ops = {
1736 .open = irq_domain_debug_open,
1737 .read = seq_read,
1738 .llseek = seq_lseek,
1739 .release = single_release,
1740};
1741 1730
1742static void debugfs_add_domain_dir(struct irq_domain *d) 1731static void debugfs_add_domain_dir(struct irq_domain *d)
1743{ 1732{
1744 if (!d->name || !domain_dir || d->debugfs_file) 1733 if (!d->name || !domain_dir || d->debugfs_file)
1745 return; 1734 return;
1746 d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d, 1735 d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
1747 &dfs_domain_ops); 1736 &irq_domain_debug_fops);
1748} 1737}
1749 1738
1750static void debugfs_remove_domain_dir(struct irq_domain *d) 1739static void debugfs_remove_domain_dir(struct irq_domain *d)
@@ -1760,7 +1749,8 @@ void __init irq_domain_debugfs_init(struct dentry *root)
1760 if (!domain_dir) 1749 if (!domain_dir)
1761 return; 1750 return;
1762 1751
1763 debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops); 1752 debugfs_create_file("default", 0444, domain_dir, NULL,
1753 &irq_domain_debug_fops);
1764 mutex_lock(&irq_domain_mutex); 1754 mutex_lock(&irq_domain_mutex);
1765 list_for_each_entry(d, &irq_domain_list, link) 1755 list_for_each_entry(d, &irq_domain_list, link)
1766 debugfs_add_domain_dir(d); 1756 debugfs_add_domain_dir(d);
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5187dfe809ac..4c5770407031 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -16,6 +16,7 @@ struct cpumap {
16 unsigned int available; 16 unsigned int available;
17 unsigned int allocated; 17 unsigned int allocated;
18 unsigned int managed; 18 unsigned int managed;
19 bool initialized;
19 bool online; 20 bool online;
20 unsigned long alloc_map[IRQ_MATRIX_SIZE]; 21 unsigned long alloc_map[IRQ_MATRIX_SIZE];
21 unsigned long managed_map[IRQ_MATRIX_SIZE]; 22 unsigned long managed_map[IRQ_MATRIX_SIZE];
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m)
81 82
82 BUG_ON(cm->online); 83 BUG_ON(cm->online);
83 84
84 bitmap_zero(cm->alloc_map, m->matrix_bits); 85 if (!cm->initialized) {
85 cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc); 86 cm->available = m->alloc_size;
86 cm->allocated = 0; 87 cm->available -= cm->managed + m->systembits_inalloc;
88 cm->initialized = true;
89 }
87 m->global_available += cm->available; 90 m->global_available += cm->available;
88 cm->online = true; 91 cm->online = true;
89 m->online_maps++; 92 m->online_maps++;
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
370 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) 373 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
371 return; 374 return;
372 375
373 if (cm->online) { 376 clear_bit(bit, cm->alloc_map);
374 clear_bit(bit, cm->alloc_map); 377 cm->allocated--;
375 cm->allocated--; 378
379 if (cm->online)
376 m->total_allocated--; 380 m->total_allocated--;
377 if (!managed) { 381
378 cm->available++; 382 if (!managed) {
383 cm->available++;
384 if (cm->online)
379 m->global_available++; 385 m->global_available++;
380 }
381 } 386 }
382 trace_irq_matrix_free(bit, cpu, m, cm); 387 trace_irq_matrix_free(bit, cpu, m, cm);
383} 388}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da2ccf142358..102160ff5c66 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p)
978} 978}
979 979
980/* Caller must lock kprobe_mutex */ 980/* Caller must lock kprobe_mutex */
981static void arm_kprobe_ftrace(struct kprobe *p) 981static int arm_kprobe_ftrace(struct kprobe *p)
982{ 982{
983 int ret; 983 int ret = 0;
984 984
985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
986 (unsigned long)p->addr, 0, 0); 986 (unsigned long)p->addr, 0, 0);
987 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 987 if (ret) {
988 kprobe_ftrace_enabled++; 988 pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
989 if (kprobe_ftrace_enabled == 1) { 989 return ret;
990 }
991
992 if (kprobe_ftrace_enabled == 0) {
990 ret = register_ftrace_function(&kprobe_ftrace_ops); 993 ret = register_ftrace_function(&kprobe_ftrace_ops);
991 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 994 if (ret) {
995 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
996 goto err_ftrace;
997 }
992 } 998 }
999
1000 kprobe_ftrace_enabled++;
1001 return ret;
1002
1003err_ftrace:
1004 /*
1005 * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006 * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007 * empty filter_hash which would undesirably trace all functions.
1008 */
1009 ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1010 return ret;
993} 1011}
994 1012
995/* Caller must lock kprobe_mutex */ 1013/* Caller must lock kprobe_mutex */
996static void disarm_kprobe_ftrace(struct kprobe *p) 1014static int disarm_kprobe_ftrace(struct kprobe *p)
997{ 1015{
998 int ret; 1016 int ret = 0;
999 1017
1000 kprobe_ftrace_enabled--; 1018 if (kprobe_ftrace_enabled == 1) {
1001 if (kprobe_ftrace_enabled == 0) {
1002 ret = unregister_ftrace_function(&kprobe_ftrace_ops); 1019 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1003 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 1020 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1021 return ret;
1004 } 1022 }
1023
1024 kprobe_ftrace_enabled--;
1025
1005 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 1026 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1006 (unsigned long)p->addr, 1, 0); 1027 (unsigned long)p->addr, 1, 0);
1007 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); 1028 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1029 return ret;
1008} 1030}
1009#else /* !CONFIG_KPROBES_ON_FTRACE */ 1031#else /* !CONFIG_KPROBES_ON_FTRACE */
1010#define prepare_kprobe(p) arch_prepare_kprobe(p) 1032#define prepare_kprobe(p) arch_prepare_kprobe(p)
1011#define arm_kprobe_ftrace(p) do {} while (0) 1033#define arm_kprobe_ftrace(p) (-ENODEV)
1012#define disarm_kprobe_ftrace(p) do {} while (0) 1034#define disarm_kprobe_ftrace(p) (-ENODEV)
1013#endif 1035#endif
1014 1036
1015/* Arm a kprobe with text_mutex */ 1037/* Arm a kprobe with text_mutex */
1016static void arm_kprobe(struct kprobe *kp) 1038static int arm_kprobe(struct kprobe *kp)
1017{ 1039{
1018 if (unlikely(kprobe_ftrace(kp))) { 1040 if (unlikely(kprobe_ftrace(kp)))
1019 arm_kprobe_ftrace(kp); 1041 return arm_kprobe_ftrace(kp);
1020 return; 1042
1021 }
1022 cpus_read_lock(); 1043 cpus_read_lock();
1023 mutex_lock(&text_mutex); 1044 mutex_lock(&text_mutex);
1024 __arm_kprobe(kp); 1045 __arm_kprobe(kp);
1025 mutex_unlock(&text_mutex); 1046 mutex_unlock(&text_mutex);
1026 cpus_read_unlock(); 1047 cpus_read_unlock();
1048
1049 return 0;
1027} 1050}
1028 1051
1029/* Disarm a kprobe with text_mutex */ 1052/* Disarm a kprobe with text_mutex */
1030static void disarm_kprobe(struct kprobe *kp, bool reopt) 1053static int disarm_kprobe(struct kprobe *kp, bool reopt)
1031{ 1054{
1032 if (unlikely(kprobe_ftrace(kp))) { 1055 if (unlikely(kprobe_ftrace(kp)))
1033 disarm_kprobe_ftrace(kp); 1056 return disarm_kprobe_ftrace(kp);
1034 return;
1035 }
1036 1057
1037 cpus_read_lock(); 1058 cpus_read_lock();
1038 mutex_lock(&text_mutex); 1059 mutex_lock(&text_mutex);
1039 __disarm_kprobe(kp, reopt); 1060 __disarm_kprobe(kp, reopt);
1040 mutex_unlock(&text_mutex); 1061 mutex_unlock(&text_mutex);
1041 cpus_read_unlock(); 1062 cpus_read_unlock();
1063
1064 return 0;
1042} 1065}
1043 1066
1044/* 1067/*
@@ -1362,9 +1385,15 @@ out:
1362 1385
1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1386 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1364 ap->flags &= ~KPROBE_FLAG_DISABLED; 1387 ap->flags &= ~KPROBE_FLAG_DISABLED;
1365 if (!kprobes_all_disarmed) 1388 if (!kprobes_all_disarmed) {
1366 /* Arm the breakpoint again. */ 1389 /* Arm the breakpoint again. */
1367 arm_kprobe(ap); 1390 ret = arm_kprobe(ap);
1391 if (ret) {
1392 ap->flags |= KPROBE_FLAG_DISABLED;
1393 list_del_rcu(&p->list);
1394 synchronize_sched();
1395 }
1396 }
1368 } 1397 }
1369 return ret; 1398 return ret;
1370} 1399}
@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p)
1573 hlist_add_head_rcu(&p->hlist, 1602 hlist_add_head_rcu(&p->hlist,
1574 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1603 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1575 1604
1576 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1605 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1577 arm_kprobe(p); 1606 ret = arm_kprobe(p);
1607 if (ret) {
1608 hlist_del_rcu(&p->hlist);
1609 synchronize_sched();
1610 goto out;
1611 }
1612 }
1578 1613
1579 /* Try to optimize kprobe */ 1614 /* Try to optimize kprobe */
1580 try_to_optimize_kprobe(p); 1615 try_to_optimize_kprobe(p);
@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
1608static struct kprobe *__disable_kprobe(struct kprobe *p) 1643static struct kprobe *__disable_kprobe(struct kprobe *p)
1609{ 1644{
1610 struct kprobe *orig_p; 1645 struct kprobe *orig_p;
1646 int ret;
1611 1647
1612 /* Get an original kprobe for return */ 1648 /* Get an original kprobe for return */
1613 orig_p = __get_valid_kprobe(p); 1649 orig_p = __get_valid_kprobe(p);
1614 if (unlikely(orig_p == NULL)) 1650 if (unlikely(orig_p == NULL))
1615 return NULL; 1651 return ERR_PTR(-EINVAL);
1616 1652
1617 if (!kprobe_disabled(p)) { 1653 if (!kprobe_disabled(p)) {
1618 /* Disable probe if it is a child probe */ 1654 /* Disable probe if it is a child probe */
@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
1626 * should have already been disarmed, so 1662 * should have already been disarmed, so
1627 * skip unneed disarming process. 1663 * skip unneed disarming process.
1628 */ 1664 */
1629 if (!kprobes_all_disarmed) 1665 if (!kprobes_all_disarmed) {
1630 disarm_kprobe(orig_p, true); 1666 ret = disarm_kprobe(orig_p, true);
1667 if (ret) {
1668 p->flags &= ~KPROBE_FLAG_DISABLED;
1669 return ERR_PTR(ret);
1670 }
1671 }
1631 orig_p->flags |= KPROBE_FLAG_DISABLED; 1672 orig_p->flags |= KPROBE_FLAG_DISABLED;
1632 } 1673 }
1633 } 1674 }
@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
1644 1685
1645 /* Disable kprobe. This will disarm it if needed. */ 1686 /* Disable kprobe. This will disarm it if needed. */
1646 ap = __disable_kprobe(p); 1687 ap = __disable_kprobe(p);
1647 if (ap == NULL) 1688 if (IS_ERR(ap))
1648 return -EINVAL; 1689 return PTR_ERR(ap);
1649 1690
1650 if (ap == p) 1691 if (ap == p)
1651 /* 1692 /*
@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p)
2078int disable_kprobe(struct kprobe *kp) 2119int disable_kprobe(struct kprobe *kp)
2079{ 2120{
2080 int ret = 0; 2121 int ret = 0;
2122 struct kprobe *p;
2081 2123
2082 mutex_lock(&kprobe_mutex); 2124 mutex_lock(&kprobe_mutex);
2083 2125
2084 /* Disable this kprobe */ 2126 /* Disable this kprobe */
2085 if (__disable_kprobe(kp) == NULL) 2127 p = __disable_kprobe(kp);
2086 ret = -EINVAL; 2128 if (IS_ERR(p))
2129 ret = PTR_ERR(p);
2087 2130
2088 mutex_unlock(&kprobe_mutex); 2131 mutex_unlock(&kprobe_mutex);
2089 return ret; 2132 return ret;
@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp)
2116 2159
2117 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2160 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2118 p->flags &= ~KPROBE_FLAG_DISABLED; 2161 p->flags &= ~KPROBE_FLAG_DISABLED;
2119 arm_kprobe(p); 2162 ret = arm_kprobe(p);
2163 if (ret)
2164 p->flags |= KPROBE_FLAG_DISABLED;
2120 } 2165 }
2121out: 2166out:
2122 mutex_unlock(&kprobe_mutex); 2167 mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
2407 .release = seq_release, 2452 .release = seq_release,
2408}; 2453};
2409 2454
2410static void arm_all_kprobes(void) 2455static int arm_all_kprobes(void)
2411{ 2456{
2412 struct hlist_head *head; 2457 struct hlist_head *head;
2413 struct kprobe *p; 2458 struct kprobe *p;
2414 unsigned int i; 2459 unsigned int i, total = 0, errors = 0;
2460 int err, ret = 0;
2415 2461
2416 mutex_lock(&kprobe_mutex); 2462 mutex_lock(&kprobe_mutex);
2417 2463
@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void)
2428 /* Arming kprobes doesn't optimize kprobe itself */ 2474 /* Arming kprobes doesn't optimize kprobe itself */
2429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2475 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2430 head = &kprobe_table[i]; 2476 head = &kprobe_table[i];
2431 hlist_for_each_entry_rcu(p, head, hlist) 2477 /* Arm all kprobes on a best-effort basis */
2432 if (!kprobe_disabled(p)) 2478 hlist_for_each_entry_rcu(p, head, hlist) {
2433 arm_kprobe(p); 2479 if (!kprobe_disabled(p)) {
2480 err = arm_kprobe(p);
2481 if (err) {
2482 errors++;
2483 ret = err;
2484 }
2485 total++;
2486 }
2487 }
2434 } 2488 }
2435 2489
2436 printk(KERN_INFO "Kprobes globally enabled\n"); 2490 if (errors)
2491 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2492 errors, total);
2493 else
2494 pr_info("Kprobes globally enabled\n");
2437 2495
2438already_enabled: 2496already_enabled:
2439 mutex_unlock(&kprobe_mutex); 2497 mutex_unlock(&kprobe_mutex);
2440 return; 2498 return ret;
2441} 2499}
2442 2500
2443static void disarm_all_kprobes(void) 2501static int disarm_all_kprobes(void)
2444{ 2502{
2445 struct hlist_head *head; 2503 struct hlist_head *head;
2446 struct kprobe *p; 2504 struct kprobe *p;
2447 unsigned int i; 2505 unsigned int i, total = 0, errors = 0;
2506 int err, ret = 0;
2448 2507
2449 mutex_lock(&kprobe_mutex); 2508 mutex_lock(&kprobe_mutex);
2450 2509
2451 /* If kprobes are already disarmed, just return */ 2510 /* If kprobes are already disarmed, just return */
2452 if (kprobes_all_disarmed) { 2511 if (kprobes_all_disarmed) {
2453 mutex_unlock(&kprobe_mutex); 2512 mutex_unlock(&kprobe_mutex);
2454 return; 2513 return 0;
2455 } 2514 }
2456 2515
2457 kprobes_all_disarmed = true; 2516 kprobes_all_disarmed = true;
2458 printk(KERN_INFO "Kprobes globally disabled\n");
2459 2517
2460 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2518 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2461 head = &kprobe_table[i]; 2519 head = &kprobe_table[i];
2520 /* Disarm all kprobes on a best-effort basis */
2462 hlist_for_each_entry_rcu(p, head, hlist) { 2521 hlist_for_each_entry_rcu(p, head, hlist) {
2463 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2522 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2464 disarm_kprobe(p, false); 2523 err = disarm_kprobe(p, false);
2524 if (err) {
2525 errors++;
2526 ret = err;
2527 }
2528 total++;
2529 }
2465 } 2530 }
2466 } 2531 }
2532
2533 if (errors)
2534 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2535 errors, total);
2536 else
2537 pr_info("Kprobes globally disabled\n");
2538
2467 mutex_unlock(&kprobe_mutex); 2539 mutex_unlock(&kprobe_mutex);
2468 2540
2469 /* Wait for disarming all kprobes by optimizer */ 2541 /* Wait for disarming all kprobes by optimizer */
2470 wait_for_kprobe_optimizer(); 2542 wait_for_kprobe_optimizer();
2543
2544 return ret;
2471} 2545}
2472 2546
2473/* 2547/*
@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
2494{ 2568{
2495 char buf[32]; 2569 char buf[32];
2496 size_t buf_size; 2570 size_t buf_size;
2571 int ret = 0;
2497 2572
2498 buf_size = min(count, (sizeof(buf)-1)); 2573 buf_size = min(count, (sizeof(buf)-1));
2499 if (copy_from_user(buf, user_buf, buf_size)) 2574 if (copy_from_user(buf, user_buf, buf_size))
@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file,
2504 case 'y': 2579 case 'y':
2505 case 'Y': 2580 case 'Y':
2506 case '1': 2581 case '1':
2507 arm_all_kprobes(); 2582 ret = arm_all_kprobes();
2508 break; 2583 break;
2509 case 'n': 2584 case 'n':
2510 case 'N': 2585 case 'N':
2511 case '0': 2586 case '0':
2512 disarm_all_kprobes(); 2587 ret = disarm_all_kprobes();
2513 break; 2588 break;
2514 default: 2589 default:
2515 return -EINVAL; 2590 return -EINVAL;
2516 } 2591 }
2517 2592
2593 if (ret)
2594 return ret;
2595
2518 return count; 2596 return count;
2519} 2597}
2520 2598
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38ece035039e..d880296245c5 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -379,6 +379,14 @@ queue:
379 tail = encode_tail(smp_processor_id(), idx); 379 tail = encode_tail(smp_processor_id(), idx);
380 380
381 node += idx; 381 node += idx;
382
383 /*
384 * Ensure that we increment the head node->count before initialising
385 * the actual node. If the compiler is kind enough to reorder these
386 * stores, then an IRQ could overwrite our assignments.
387 */
388 barrier();
389
382 node->locked = 0; 390 node->locked = 0;
383 node->next = NULL; 391 node->next = NULL;
384 pv_init_node(node); 392 pv_init_node(node);
@@ -408,14 +416,15 @@ queue:
408 */ 416 */
409 if (old & _Q_TAIL_MASK) { 417 if (old & _Q_TAIL_MASK) {
410 prev = decode_tail(old); 418 prev = decode_tail(old);
419
411 /* 420 /*
412 * The above xchg_tail() is also a load of @lock which 421 * We must ensure that the stores to @node are observed before
413 * generates, through decode_tail(), a pointer. The address 422 * the write to prev->next. The address dependency from
414 * dependency matches the RELEASE of xchg_tail() such that 423 * xchg_tail is not sufficient to ensure this because the read
415 * the subsequent access to @prev happens after. 424 * component of xchg_tail is unordered with respect to the
425 * initialisation of @node.
416 */ 426 */
417 427 smp_store_release(&prev->next, node);
418 WRITE_ONCE(prev->next, node);
419 428
420 pv_wait_node(node, prev); 429 pv_wait_node(node, prev);
421 arch_mcs_spin_lock_contended(&node->locked); 430 arch_mcs_spin_lock_contended(&node->locked);
diff --git a/kernel/relay.c b/kernel/relay.c
index c3029402f15c..c955b10c973c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
163{ 163{
164 struct rchan_buf *buf; 164 struct rchan_buf *buf;
165 165
166 if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) 166 if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
167 return NULL; 167 return NULL;
168 168
169 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); 169 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bf724c1952ea..e7c535eee0a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2601,19 +2601,31 @@ static inline void finish_task(struct task_struct *prev)
2601#endif 2601#endif
2602} 2602}
2603 2603
2604static inline void finish_lock_switch(struct rq *rq) 2604static inline void
2605prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
2605{ 2606{
2607 /*
2608 * Since the runqueue lock will be released by the next
2609 * task (which is an invalid locking op but in the case
2610 * of the scheduler it's an obvious special-case), so we
2611 * do an early lockdep release here:
2612 */
2613 rq_unpin_lock(rq, rf);
2614 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2606#ifdef CONFIG_DEBUG_SPINLOCK 2615#ifdef CONFIG_DEBUG_SPINLOCK
2607 /* this is a valid case when another task releases the spinlock */ 2616 /* this is a valid case when another task releases the spinlock */
2608 rq->lock.owner = current; 2617 rq->lock.owner = next;
2609#endif 2618#endif
2619}
2620
2621static inline void finish_lock_switch(struct rq *rq)
2622{
2610 /* 2623 /*
2611 * If we are tracking spinlock dependencies then we have to 2624 * If we are tracking spinlock dependencies then we have to
2612 * fix up the runqueue lock - which gets 'carried over' from 2625 * fix up the runqueue lock - which gets 'carried over' from
2613 * prev into current: 2626 * prev into current:
2614 */ 2627 */
2615 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 2628 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
2616
2617 raw_spin_unlock_irq(&rq->lock); 2629 raw_spin_unlock_irq(&rq->lock);
2618} 2630}
2619 2631
@@ -2844,14 +2856,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2844 2856
2845 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 2857 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
2846 2858
2847 /* 2859 prepare_lock_switch(rq, next, rf);
2848 * Since the runqueue lock will be released by the next
2849 * task (which is an invalid locking op but in the case
2850 * of the scheduler it's an obvious special-case), so we
2851 * do an early lockdep release here:
2852 */
2853 rq_unpin_lock(rq, rf);
2854 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2855 2860
2856 /* Here we just switch the register state and the stack. */ 2861 /* Here we just switch the register state and the stack. */
2857 switch_to(prev, next, prev); 2862 switch_to(prev, next, prev);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index dd062a1c8cf0..7936f548e071 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -19,8 +19,6 @@
19 19
20#include "sched.h" 20#include "sched.h"
21 21
22#define SUGOV_KTHREAD_PRIORITY 50
23
24struct sugov_tunables { 22struct sugov_tunables {
25 struct gov_attr_set attr_set; 23 struct gov_attr_set attr_set;
26 unsigned int rate_limit_us; 24 unsigned int rate_limit_us;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9bb0e0c412ec..9df09782025c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
1153 struct sched_dl_entity *dl_se = &curr->dl; 1153 struct sched_dl_entity *dl_se = &curr->dl;
1154 u64 delta_exec, scaled_delta_exec; 1154 u64 delta_exec, scaled_delta_exec;
1155 int cpu = cpu_of(rq); 1155 int cpu = cpu_of(rq);
1156 u64 now;
1156 1157
1157 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1158 if (!dl_task(curr) || !on_dl_rq(dl_se))
1158 return; 1159 return;
@@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
1165 * natural solution, but the full ramifications of this 1166 * natural solution, but the full ramifications of this
1166 * approach need further study. 1167 * approach need further study.
1167 */ 1168 */
1168 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 1169 now = rq_clock_task(rq);
1170 delta_exec = now - curr->se.exec_start;
1169 if (unlikely((s64)delta_exec <= 0)) { 1171 if (unlikely((s64)delta_exec <= 0)) {
1170 if (unlikely(dl_se->dl_yielded)) 1172 if (unlikely(dl_se->dl_yielded))
1171 goto throttle; 1173 goto throttle;
@@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
1178 curr->se.sum_exec_runtime += delta_exec; 1180 curr->se.sum_exec_runtime += delta_exec;
1179 account_group_exec_runtime(curr, delta_exec); 1181 account_group_exec_runtime(curr, delta_exec);
1180 1182
1181 curr->se.exec_start = rq_clock_task(rq); 1183 curr->se.exec_start = now;
1182 cgroup_account_cputime(curr, delta_exec); 1184 cgroup_account_cputime(curr, delta_exec);
1183 1185
1184 sched_rt_avg_update(rq, delta_exec); 1186 sched_rt_avg_update(rq, delta_exec);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 663b2355a3aa..aad49451584e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -950,12 +950,13 @@ static void update_curr_rt(struct rq *rq)
950{ 950{
951 struct task_struct *curr = rq->curr; 951 struct task_struct *curr = rq->curr;
952 struct sched_rt_entity *rt_se = &curr->rt; 952 struct sched_rt_entity *rt_se = &curr->rt;
953 u64 now = rq_clock_task(rq);
954 u64 delta_exec; 953 u64 delta_exec;
954 u64 now;
955 955
956 if (curr->sched_class != &rt_sched_class) 956 if (curr->sched_class != &rt_sched_class)
957 return; 957 return;
958 958
959 now = rq_clock_task(rq);
959 delta_exec = now - curr->se.exec_start; 960 delta_exec = now - curr->se.exec_start;
960 if (unlikely((s64)delta_exec <= 0)) 961 if (unlikely((s64)delta_exec <= 0))
961 return; 962 return;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 940fa408a288..dc77548167ef 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task,
1076 1076
1077 size = min_t(unsigned long, size, sizeof(kmd)); 1077 size = min_t(unsigned long, size, sizeof(kmd));
1078 1078
1079 if (copy_from_user(&kmd, data, size)) 1079 if (size < sizeof(kmd.filter_off))
1080 return -EINVAL;
1081
1082 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
1080 return -EFAULT; 1083 return -EFAULT;
1081 1084
1082 filter = get_nth_filter(task, kmd.filter_off); 1085 filter = get_nth_filter(task, kmd.filter_off);
1083 if (IS_ERR(filter)) 1086 if (IS_ERR(filter))
1084 return PTR_ERR(filter); 1087 return PTR_ERR(filter);
1085 1088
1086 memset(&kmd, 0, sizeof(kmd));
1087 if (filter->log) 1089 if (filter->log)
1088 kmd.flags |= SECCOMP_FILTER_FLAG_LOG; 1090 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1089 1091
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fc2838ac8b78..c0a9e310d715 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
872 return -EINVAL; 872 return -EINVAL;
873 if (copy_from_user(&query, uquery, sizeof(query))) 873 if (copy_from_user(&query, uquery, sizeof(query)))
874 return -EFAULT; 874 return -EFAULT;
875 if (query.ids_len > BPF_TRACE_MAX_PROGS)
876 return -E2BIG;
875 877
876 mutex_lock(&bpf_event_mutex); 878 mutex_lock(&bpf_event_mutex);
877 ret = bpf_prog_array_copy_info(event->tp_event->prog_array, 879 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
diff --git a/kernel/user.c b/kernel/user.c
index 9a20acce460d..36288d840675 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -101,6 +101,7 @@ struct user_struct root_user = {
101 .sigpending = ATOMIC_INIT(0), 101 .sigpending = ATOMIC_INIT(0),
102 .locked_shm = 0, 102 .locked_shm = 0,
103 .uid = GLOBAL_ROOT_UID, 103 .uid = GLOBAL_ROOT_UID,
104 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
104}; 105};
105 106
106/* 107/*
@@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid)
191 192
192 new->uid = uid; 193 new->uid = uid;
193 atomic_set(&new->__count, 1); 194 atomic_set(&new->__count, 1);
195 ratelimit_state_init(&new->ratelimit, HZ, 100);
196 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
194 197
195 /* 198 /*
196 * Before adding this, check whether we raced 199 * Before adding this, check whether we raced
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 017044c26233..bb9a519cbf50 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4180,6 +4180,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4180EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4180EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4181 4181
4182/** 4182/**
4183 * current_work - retrieve %current task's work struct
4184 *
4185 * Determine if %current task is a workqueue worker and what it's working on.
4186 * Useful to find out the context that the %current task is running in.
4187 *
4188 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4189 */
4190struct work_struct *current_work(void)
4191{
4192 struct worker *worker = current_wq_worker();
4193
4194 return worker ? worker->current_work : NULL;
4195}
4196EXPORT_SYMBOL(current_work);
4197
4198/**
4183 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4199 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4184 * 4200 *
4185 * Determine whether %current is a workqueue rescuer. Can be used from 4201 * Determine whether %current is a workqueue rescuer. Can be used from
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6088408ef26c..64155e310a9f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1642,6 +1642,7 @@ config DMA_API_DEBUG
1642 1642
1643menuconfig RUNTIME_TESTING_MENU 1643menuconfig RUNTIME_TESTING_MENU
1644 bool "Runtime Testing" 1644 bool "Runtime Testing"
1645 def_bool y
1645 1646
1646if RUNTIME_TESTING_MENU 1647if RUNTIME_TESTING_MENU
1647 1648
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index 40b1f92f2214..c9e8e21cb334 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -84,6 +84,10 @@ again:
84 return page_address(page); 84 return page_address(page);
85} 85}
86 86
87/*
88 * NOTE: this function must never look at the dma_addr argument, because we want
89 * to be able to use it as a helper for iommu implementations as well.
90 */
87void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 91void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
88 dma_addr_t dma_addr, unsigned long attrs) 92 dma_addr_t dma_addr, unsigned long attrs)
89{ 93{
@@ -152,5 +156,6 @@ const struct dma_map_ops dma_direct_ops = {
152 .map_sg = dma_direct_map_sg, 156 .map_sg = dma_direct_map_sg,
153 .dma_supported = dma_direct_supported, 157 .dma_supported = dma_direct_supported,
154 .mapping_error = dma_direct_mapping_error, 158 .mapping_error = dma_direct_mapping_error,
159 .is_phys = 1,
155}; 160};
156EXPORT_SYMBOL(dma_direct_ops); 161EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/idr.c b/lib/idr.c
index c98d77fcf393..99ec5bc89d25 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -431,7 +431,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
431 bitmap = this_cpu_xchg(ida_bitmap, NULL); 431 bitmap = this_cpu_xchg(ida_bitmap, NULL);
432 if (!bitmap) 432 if (!bitmap)
433 return -EAGAIN; 433 return -EAGAIN;
434 memset(bitmap, 0, sizeof(*bitmap));
435 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; 434 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
436 rcu_assign_pointer(*slot, bitmap); 435 rcu_assign_pointer(*slot, bitmap);
437 } 436 }
@@ -464,7 +463,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
464 bitmap = this_cpu_xchg(ida_bitmap, NULL); 463 bitmap = this_cpu_xchg(ida_bitmap, NULL);
465 if (!bitmap) 464 if (!bitmap)
466 return -EAGAIN; 465 return -EAGAIN;
467 memset(bitmap, 0, sizeof(*bitmap));
468 __set_bit(bit, bitmap->bitmap); 466 __set_bit(bit, bitmap->bitmap);
469 radix_tree_iter_replace(root, &iter, slot, bitmap); 467 radix_tree_iter_replace(root, &iter, slot, bitmap);
470 } 468 }
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0a7ae3288a24..8e00138d593f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2125,7 +2125,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
2125 preempt_enable(); 2125 preempt_enable();
2126 2126
2127 if (!this_cpu_read(ida_bitmap)) { 2127 if (!this_cpu_read(ida_bitmap)) {
2128 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); 2128 struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
2129 if (!bitmap) 2129 if (!bitmap)
2130 return 0; 2130 return 0;
2131 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) 2131 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 77ee6ced11b1..d7a708f82559 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1849{ 1849{
1850 const int default_width = 2 * sizeof(void *); 1850 const int default_width = 2 * sizeof(void *);
1851 1851
1852 if (!ptr && *fmt != 'K') { 1852 if (!ptr && *fmt != 'K' && *fmt != 'x') {
1853 /* 1853 /*
1854 * Print (null) with the same width as a pointer so it makes 1854 * Print (null) with the same width as a pointer so it makes
1855 * tabular output look nice. 1855 * tabular output look nice.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4b80ccee4535..8291b75f42c8 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags)
1139 return 0; 1139 return 0;
1140 } 1140 }
1141 1141
1142 arch_unmap_kpfn(pfn);
1143
1144 orig_head = hpage = compound_head(p); 1142 orig_head = hpage = compound_head(p);
1145 num_poisoned_pages_inc(); 1143 num_poisoned_pages_inc();
1146 1144
diff --git a/mm/memory.c b/mm/memory.c
index dd8de96f5547..5fcfc24904d1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -80,7 +80,7 @@
80 80
81#include "internal.h" 81#include "internal.h"
82 82
83#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 83#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
84#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 84#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
85#endif 85#endif
86 86
diff --git a/mm/mlock.c b/mm/mlock.c
index 79398200e423..74e5a6547c3d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page)
64 mod_zone_page_state(page_zone(page), NR_MLOCK, 64 mod_zone_page_state(page_zone(page), NR_MLOCK,
65 -hpage_nr_pages(page)); 65 -hpage_nr_pages(page));
66 count_vm_event(UNEVICTABLE_PGCLEARED); 66 count_vm_event(UNEVICTABLE_PGCLEARED);
67 /*
68 * The previous TestClearPageMlocked() corresponds to the smp_mb()
69 * in __pagevec_lru_add_fn().
70 *
71 * See __pagevec_lru_add_fn for more explanation.
72 */
67 if (!isolate_lru_page(page)) { 73 if (!isolate_lru_page(page)) {
68 putback_lru_page(page); 74 putback_lru_page(page);
69 } else { 75 } else {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 81e18ceef579..cb416723538f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -46,6 +46,7 @@
46#include <linux/stop_machine.h> 46#include <linux/stop_machine.h>
47#include <linux/sort.h> 47#include <linux/sort.h>
48#include <linux/pfn.h> 48#include <linux/pfn.h>
49#include <xen/xen.h>
49#include <linux/backing-dev.h> 50#include <linux/backing-dev.h>
50#include <linux/fault-inject.h> 51#include <linux/fault-inject.h>
51#include <linux/page-isolation.h> 52#include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
347 /* Always populate low zones for address-constrained allocations */ 348 /* Always populate low zones for address-constrained allocations */
348 if (zone_end < pgdat_end_pfn(pgdat)) 349 if (zone_end < pgdat_end_pfn(pgdat))
349 return true; 350 return true;
351 /* Xen PV domains need page structures early */
352 if (xen_pv_domain())
353 return true;
350 (*nr_initialised)++; 354 (*nr_initialised)++;
351 if ((*nr_initialised > pgdat->static_init_pgcnt) && 355 if ((*nr_initialised > pgdat->static_init_pgcnt) &&
352 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 356 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
diff --git a/mm/swap.c b/mm/swap.c
index 567a7b96e41d..0f17330dd0e5 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -446,30 +446,6 @@ void lru_cache_add(struct page *page)
446} 446}
447 447
448/** 448/**
449 * add_page_to_unevictable_list - add a page to the unevictable list
450 * @page: the page to be added to the unevictable list
451 *
452 * Add page directly to its zone's unevictable list. To avoid races with
453 * tasks that might be making the page evictable, through eg. munlock,
454 * munmap or exit, while it's not on the lru, we want to add the page
455 * while it's locked or otherwise "invisible" to other tasks. This is
456 * difficult to do when using the pagevec cache, so bypass that.
457 */
458void add_page_to_unevictable_list(struct page *page)
459{
460 struct pglist_data *pgdat = page_pgdat(page);
461 struct lruvec *lruvec;
462
463 spin_lock_irq(&pgdat->lru_lock);
464 lruvec = mem_cgroup_page_lruvec(page, pgdat);
465 ClearPageActive(page);
466 SetPageUnevictable(page);
467 SetPageLRU(page);
468 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
469 spin_unlock_irq(&pgdat->lru_lock);
470}
471
472/**
473 * lru_cache_add_active_or_unevictable 449 * lru_cache_add_active_or_unevictable
474 * @page: the page to be added to LRU 450 * @page: the page to be added to LRU
475 * @vma: vma in which page is mapped for determining reclaimability 451 * @vma: vma in which page is mapped for determining reclaimability
@@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page,
484{ 460{
485 VM_BUG_ON_PAGE(PageLRU(page), page); 461 VM_BUG_ON_PAGE(PageLRU(page), page);
486 462
487 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 463 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
488 SetPageActive(page); 464 SetPageActive(page);
489 lru_cache_add(page); 465 else if (!TestSetPageMlocked(page)) {
490 return;
491 }
492
493 if (!TestSetPageMlocked(page)) {
494 /* 466 /*
495 * We use the irq-unsafe __mod_zone_page_stat because this 467 * We use the irq-unsafe __mod_zone_page_stat because this
496 * counter is not modified from interrupt context, and the pte 468 * counter is not modified from interrupt context, and the pte
@@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
500 hpage_nr_pages(page)); 472 hpage_nr_pages(page));
501 count_vm_event(UNEVICTABLE_PGMLOCKED); 473 count_vm_event(UNEVICTABLE_PGMLOCKED);
502 } 474 }
503 add_page_to_unevictable_list(page); 475 lru_cache_add(page);
504} 476}
505 477
506/* 478/*
@@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
886static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 858static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
887 void *arg) 859 void *arg)
888{ 860{
889 int file = page_is_file_cache(page); 861 enum lru_list lru;
890 int active = PageActive(page); 862 int was_unevictable = TestClearPageUnevictable(page);
891 enum lru_list lru = page_lru(page);
892 863
893 VM_BUG_ON_PAGE(PageLRU(page), page); 864 VM_BUG_ON_PAGE(PageLRU(page), page);
894 865
895 SetPageLRU(page); 866 SetPageLRU(page);
867 /*
868 * Page becomes evictable in two ways:
869 * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
870 * 2) Before acquiring LRU lock to put the page to correct LRU and then
871 * a) do PageLRU check with lock [check_move_unevictable_pages]
872 * b) do PageLRU check before lock [clear_page_mlock]
873 *
874 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
875 * following strict ordering:
876 *
877 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
878 *
879 * SetPageLRU() TestClearPageMlocked()
880 * smp_mb() // explicit ordering // above provides strict
881 * // ordering
882 * PageMlocked() PageLRU()
883 *
884 *
885 * if '#1' does not observe setting of PG_lru by '#0' and fails
886 * isolation, the explicit barrier will make sure that page_evictable
887 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
888 * can be reordered after PageMlocked check and can make '#1' to fail
889 * the isolation of the page whose Mlocked bit is cleared (#0 is also
890 * looking at the same page) and the evictable page will be stranded
891 * in an unevictable LRU.
892 */
893 smp_mb();
894
895 if (page_evictable(page)) {
896 lru = page_lru(page);
897 update_page_reclaim_stat(lruvec, page_is_file_cache(page),
898 PageActive(page));
899 if (was_unevictable)
900 count_vm_event(UNEVICTABLE_PGRESCUED);
901 } else {
902 lru = LRU_UNEVICTABLE;
903 ClearPageActive(page);
904 SetPageUnevictable(page);
905 if (!was_unevictable)
906 count_vm_event(UNEVICTABLE_PGCULLED);
907 }
908
896 add_page_to_lru_list(page, lruvec, lru); 909 add_page_to_lru_list(page, lruvec, lru);
897 update_page_reclaim_stat(lruvec, file, active);
898 trace_mm_lru_insertion(page, lru); 910 trace_mm_lru_insertion(page, lru);
899} 911}
900 912
@@ -913,7 +925,7 @@ EXPORT_SYMBOL(__pagevec_lru_add);
913 * @pvec: Where the resulting entries are placed 925 * @pvec: Where the resulting entries are placed
914 * @mapping: The address_space to search 926 * @mapping: The address_space to search
915 * @start: The starting entry index 927 * @start: The starting entry index
916 * @nr_pages: The maximum number of pages 928 * @nr_entries: The maximum number of pages
917 * @indices: The cache indices corresponding to the entries in @pvec 929 * @indices: The cache indices corresponding to the entries in @pvec
918 * 930 *
919 * pagevec_lookup_entries() will search for and return a group of up 931 * pagevec_lookup_entries() will search for and return a group of up
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 673942094328..ebff729cc956 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size)
1943} 1943}
1944 1944
1945#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1945#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1946#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1946#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
1947#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1947#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1948#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1948#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
1949#else 1949#else
1950#define GFP_VMALLOC32 GFP_KERNEL 1950/*
1951 * 64b systems should always have either DMA or DMA32 zones. For others
1952 * GFP_DMA32 should do the right thing and use the normal zone.
1953 */
1954#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1951#endif 1955#endif
1952 1956
1953/** 1957/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 444749669187..bee53495a829 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page)
769 */ 769 */
770void putback_lru_page(struct page *page) 770void putback_lru_page(struct page *page)
771{ 771{
772 bool is_unevictable; 772 lru_cache_add(page);
773 int was_unevictable = PageUnevictable(page);
774
775 VM_BUG_ON_PAGE(PageLRU(page), page);
776
777redo:
778 ClearPageUnevictable(page);
779
780 if (page_evictable(page)) {
781 /*
782 * For evictable pages, we can use the cache.
783 * In event of a race, worst case is we end up with an
784 * unevictable page on [in]active list.
785 * We know how to handle that.
786 */
787 is_unevictable = false;
788 lru_cache_add(page);
789 } else {
790 /*
791 * Put unevictable pages directly on zone's unevictable
792 * list.
793 */
794 is_unevictable = true;
795 add_page_to_unevictable_list(page);
796 /*
797 * When racing with an mlock or AS_UNEVICTABLE clearing
798 * (page is unlocked) make sure that if the other thread
799 * does not observe our setting of PG_lru and fails
800 * isolation/check_move_unevictable_pages,
801 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
802 * the page back to the evictable list.
803 *
804 * The other side is TestClearPageMlocked() or shmem_lock().
805 */
806 smp_mb();
807 }
808
809 /*
810 * page's status can change while we move it among lru. If an evictable
811 * page is on unevictable list, it never be freed. To avoid that,
812 * check after we added it to the list, again.
813 */
814 if (is_unevictable && page_evictable(page)) {
815 if (!isolate_lru_page(page)) {
816 put_page(page);
817 goto redo;
818 }
819 /* This means someone else dropped this page from LRU
820 * So, it will be freed or putback to LRU again. There is
821 * nothing to do here.
822 */
823 }
824
825 if (was_unevictable && !is_unevictable)
826 count_vm_event(UNEVICTABLE_PGRESCUED);
827 else if (!was_unevictable && is_unevictable)
828 count_vm_event(UNEVICTABLE_PGCULLED);
829
830 put_page(page); /* drop ref from isolate */ 773 put_page(page); /* drop ref from isolate */
831} 774}
832 775
diff --git a/mm/zpool.c b/mm/zpool.c
index f8cb83e7699b..01a771e304fa 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -360,7 +360,7 @@ u64 zpool_get_total_size(struct zpool *zpool)
360 360
361/** 361/**
362 * zpool_evictable() - Test if zpool is potentially evictable 362 * zpool_evictable() - Test if zpool is potentially evictable
363 * @pool The zpool to test 363 * @zpool: The zpool to test
364 * 364 *
365 * Zpool is only potentially evictable when it's created with struct 365 * Zpool is only potentially evictable when it's created with struct
366 * zpool_ops.evict and its driver implements struct zpool_driver.shrink. 366 * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
diff --git a/mm/zswap.c b/mm/zswap.c
index c004aa4fd3f4..61a5c41972db 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1007,6 +1007,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1007 u8 *src, *dst; 1007 u8 *src, *dst;
1008 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; 1008 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1009 1009
1010 /* THP isn't supported */
1011 if (PageTransHuge(page)) {
1012 ret = -EINVAL;
1013 goto reject;
1014 }
1015
1010 if (!zswap_enabled || !tree) { 1016 if (!zswap_enabled || !tree) {
1011 ret = -ENODEV; 1017 ret = -ENODEV;
1012 goto reject; 1018 goto reject;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f3a4efcf1456..3aa5a93ad107 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
160 spin_unlock_irqrestore(&chan->lock, flags); 160 spin_unlock_irqrestore(&chan->lock, flags);
161 /* Wakeup if anyone waiting for VirtIO ring space. */ 161 /* Wakeup if anyone waiting for VirtIO ring space. */
162 wake_up(chan->vc_wq); 162 wake_up(chan->vc_wq);
163 p9_client_cb(chan->client, req, REQ_STATUS_RCVD); 163 if (len)
164 p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
164 } 165 }
165} 166}
166 167
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 0254c35b2bf0..126a8ea73c96 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
255 struct brport_attribute *brport_attr = to_brport_attr(attr); 255 struct brport_attribute *brport_attr = to_brport_attr(attr);
256 struct net_bridge_port *p = to_brport(kobj); 256 struct net_bridge_port *p = to_brport(kobj);
257 257
258 if (!brport_attr->show)
259 return -EINVAL;
260
258 return brport_attr->show(p, buf); 261 return brport_attr->show(p, buf);
259} 262}
260 263
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 279527f8b1fe..ce7152a12bd8 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -187,17 +187,17 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
187 expected_length += ebt_mac_wormhash_size(wh_src); 187 expected_length += ebt_mac_wormhash_size(wh_src);
188 188
189 if (em->match_size != EBT_ALIGN(expected_length)) { 189 if (em->match_size != EBT_ALIGN(expected_length)) {
190 pr_info("wrong size: %d against expected %d, rounded to %zd\n", 190 pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n",
191 em->match_size, expected_length, 191 em->match_size, expected_length,
192 EBT_ALIGN(expected_length)); 192 EBT_ALIGN(expected_length));
193 return -EINVAL; 193 return -EINVAL;
194 } 194 }
195 if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { 195 if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
196 pr_info("dst integrity fail: %x\n", -err); 196 pr_err_ratelimited("dst integrity fail: %x\n", -err);
197 return -EINVAL; 197 return -EINVAL;
198 } 198 }
199 if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { 199 if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
200 pr_info("src integrity fail: %x\n", -err); 200 pr_err_ratelimited("src integrity fail: %x\n", -err);
201 return -EINVAL; 201 return -EINVAL;
202 } 202 }
203 return 0; 203 return 0;
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index 61a9f1be1263..165b9d678cf1 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
72 /* Check for overflow. */ 72 /* Check for overflow. */
73 if (info->burst == 0 || 73 if (info->burst == 0 ||
74 user2credits(info->avg * info->burst) < user2credits(info->avg)) { 74 user2credits(info->avg * info->burst) < user2credits(info->avg)) {
75 pr_info("overflow, try lower: %u/%u\n", 75 pr_info_ratelimited("overflow, try lower: %u/%u\n",
76 info->avg, info->burst); 76 info->avg, info->burst);
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
diff --git a/net/core/dev.c b/net/core/dev.c
index dda9d7b9a840..d4362befe7e2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
2382 */ 2382 */
2383int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2383int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2384{ 2384{
2385 bool disabling;
2385 int rc; 2386 int rc;
2386 2387
2388 disabling = txq < dev->real_num_tx_queues;
2389
2387 if (txq < 1 || txq > dev->num_tx_queues) 2390 if (txq < 1 || txq > dev->num_tx_queues)
2388 return -EINVAL; 2391 return -EINVAL;
2389 2392
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2399 if (dev->num_tc) 2402 if (dev->num_tc)
2400 netif_setup_tc(dev, txq); 2403 netif_setup_tc(dev, txq);
2401 2404
2402 if (txq < dev->real_num_tx_queues) { 2405 dev->real_num_tx_queues = txq;
2406
2407 if (disabling) {
2408 synchronize_net();
2403 qdisc_reset_all_tx_gt(dev, txq); 2409 qdisc_reset_all_tx_gt(dev, txq);
2404#ifdef CONFIG_XPS 2410#ifdef CONFIG_XPS
2405 netif_reset_xps_queues_gt(dev, txq); 2411 netif_reset_xps_queues_gt(dev, txq);
2406#endif 2412#endif
2407 } 2413 }
2414 } else {
2415 dev->real_num_tx_queues = txq;
2408 } 2416 }
2409 2417
2410 dev->real_num_tx_queues = txq;
2411 return 0; 2418 return 0;
2412} 2419}
2413EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2420EXPORT_SYMBOL(netif_set_real_num_tx_queues);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08ab4c65a998..0c121adbdbaa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3381 struct sock *sk = bpf_sock->sk; 3381 struct sock *sk = bpf_sock->sk;
3382 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; 3382 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3383 3383
3384 if (!sk_fullsock(sk)) 3384 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
3385 return -EINVAL; 3385 return -EINVAL;
3386 3386
3387#ifdef CONFIG_INET
3388 if (val) 3387 if (val)
3389 tcp_sk(sk)->bpf_sock_ops_cb_flags = val; 3388 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3390 3389
3391 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); 3390 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
3392#else
3393 return -EINVAL;
3394#endif
3395} 3391}
3396 3392
3397static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { 3393static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 0a3f88f08727..98fd12721221 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,6 +66,7 @@ struct net_rate_estimator {
66static void est_fetch_counters(struct net_rate_estimator *e, 66static void est_fetch_counters(struct net_rate_estimator *e,
67 struct gnet_stats_basic_packed *b) 67 struct gnet_stats_basic_packed *b)
68{ 68{
69 memset(b, 0, sizeof(*b));
69 if (e->stats_lock) 70 if (e->stats_lock)
70 spin_lock(e->stats_lock); 71 spin_lock(e->stats_lock);
71 72
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 91dd09f79808..791aff68af88 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1338 lock_sock(sk); 1338 lock_sock(sk);
1339 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); 1339 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1340 release_sock(sk); 1340 release_sock(sk);
1341#ifdef CONFIG_NETFILTER
1342 /* we need to exclude all possible ENOPROTOOPTs except default case */
1343 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1344 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1345 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1346#endif
1341 1347
1342 return err; 1348 return err;
1343} 1349}
@@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1445 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); 1451 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1446 break; 1452 break;
1447 1453
1448 default:
1449#ifdef CONFIG_NETFILTER
1450 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1451#endif
1452 case DSO_LINKINFO:
1453 case DSO_STREAM:
1454 case DSO_SEQPACKET:
1455 return -ENOPROTOOPT;
1456
1457 case DSO_MAXWINDOW: 1454 case DSO_MAXWINDOW:
1458 if (optlen != sizeof(unsigned long)) 1455 if (optlen != sizeof(unsigned long))
1459 return -EINVAL; 1456 return -EINVAL;
@@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1501 return -EINVAL; 1498 return -EINVAL;
1502 scp->info_loc = u.info; 1499 scp->info_loc = u.info;
1503 break; 1500 break;
1501
1502 case DSO_LINKINFO:
1503 case DSO_STREAM:
1504 case DSO_SEQPACKET:
1505 default:
1506 return -ENOPROTOOPT;
1504 } 1507 }
1505 1508
1506 return 0; 1509 return 0;
@@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
1514 lock_sock(sk); 1517 lock_sock(sk);
1515 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); 1518 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1516 release_sock(sk); 1519 release_sock(sk);
1520#ifdef CONFIG_NETFILTER
1521 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1522 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1523 optname != DSO_CONREJECT) {
1524 int len;
1525
1526 if (get_user(len, optlen))
1527 return -EFAULT;
1528
1529 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1530 if (err >= 0)
1531 err = put_user(len, optlen);
1532 }
1533#endif
1517 1534
1518 return err; 1535 return err;
1519} 1536}
@@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1579 r_data = &link; 1596 r_data = &link;
1580 break; 1597 break;
1581 1598
1582 default:
1583#ifdef CONFIG_NETFILTER
1584 {
1585 int ret, len;
1586
1587 if (get_user(len, optlen))
1588 return -EFAULT;
1589
1590 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1591 if (ret >= 0)
1592 ret = put_user(len, optlen);
1593 return ret;
1594 }
1595#endif
1596 case DSO_STREAM:
1597 case DSO_SEQPACKET:
1598 case DSO_CONACCEPT:
1599 case DSO_CONREJECT:
1600 return -ENOPROTOOPT;
1601
1602 case DSO_MAXWINDOW: 1599 case DSO_MAXWINDOW:
1603 if (r_len > sizeof(unsigned long)) 1600 if (r_len > sizeof(unsigned long))
1604 r_len = sizeof(unsigned long); 1601 r_len = sizeof(unsigned long);
@@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1630 r_len = sizeof(unsigned char); 1627 r_len = sizeof(unsigned char);
1631 r_data = &scp->info_rem; 1628 r_data = &scp->info_rem;
1632 break; 1629 break;
1630
1631 case DSO_STREAM:
1632 case DSO_SEQPACKET:
1633 case DSO_CONACCEPT:
1634 case DSO_CONREJECT:
1635 default:
1636 return -ENOPROTOOPT;
1633 } 1637 }
1634 1638
1635 if (r_data) { 1639 if (r_data) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c586597da20d..7d36a950d961 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
646 fi->fib_nh, cfg, extack)) 646 fi->fib_nh, cfg, extack))
647 return 1; 647 return 1;
648 } 648 }
649#ifdef CONFIG_IP_ROUTE_CLASSID
650 if (cfg->fc_flow &&
651 cfg->fc_flow != fi->fib_nh->nh_tclassid)
652 return 1;
653#endif
649 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 654 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
650 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 655 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
651 return 0; 656 return 0;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 008be04ac1cc..9c41a0cef1a5 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1567,10 +1567,7 @@ int ip_getsockopt(struct sock *sk, int level,
1567 if (get_user(len, optlen)) 1567 if (get_user(len, optlen))
1568 return -EFAULT; 1568 return -EFAULT;
1569 1569
1570 lock_sock(sk); 1570 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1571 err = nf_getsockopt(sk, PF_INET, optname, optval,
1572 &len);
1573 release_sock(sk);
1574 if (err >= 0) 1571 if (err >= 0)
1575 err = put_user(len, optlen); 1572 err = put_user(len, optlen);
1576 return err; 1573 return err;
@@ -1602,9 +1599,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1602 if (get_user(len, optlen)) 1599 if (get_user(len, optlen))
1603 return -EFAULT; 1600 return -EFAULT;
1604 1601
1605 lock_sock(sk);
1606 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); 1602 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1607 release_sock(sk);
1608 if (err >= 0) 1603 if (err >= 0)
1609 err = put_user(len, optlen); 1604 err = put_user(len, optlen);
1610 return err; 1605 return err;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 4ffe302f9b82..e3e420f3ba7b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
252 } 252 }
253 if (table_base + v 253 if (table_base + v
254 != arpt_next_entry(e)) { 254 != arpt_next_entry(e)) {
255 if (unlikely(stackidx >= private->stacksize)) {
256 verdict = NF_DROP;
257 break;
258 }
255 jumpstack[stackidx++] = e; 259 jumpstack[stackidx++] = e;
256 } 260 }
257 261
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 9a71f3149507..e38395a8dcf2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb,
330 continue; 330 continue;
331 } 331 }
332 if (table_base + v != ipt_next_entry(e) && 332 if (table_base + v != ipt_next_entry(e) &&
333 !(e->ip.flags & IPT_F_GOTO)) 333 !(e->ip.flags & IPT_F_GOTO)) {
334 if (unlikely(stackidx >= private->stacksize)) {
335 verdict = NF_DROP;
336 break;
337 }
334 jumpstack[stackidx++] = e; 338 jumpstack[stackidx++] = e;
339 }
335 340
336 e = get_entry(table_base, v); 341 e = get_entry(table_base, v);
337 continue; 342 continue;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a84a60f6b39..4b02ab39ebc5 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
107 107
108 local_bh_disable(); 108 local_bh_disable();
109 if (refcount_dec_and_lock(&c->entries, &cn->lock)) { 109 if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
110 list_del_rcu(&c->list);
111 spin_unlock(&cn->lock);
112 local_bh_enable();
113
114 unregister_netdevice_notifier(&c->notifier);
115
116 /* In case anyone still accesses the file, the open/close 110 /* In case anyone still accesses the file, the open/close
117 * functions are also incrementing the refcount on their own, 111 * functions are also incrementing the refcount on their own,
118 * so it's safe to remove the entry even if it's in use. */ 112 * so it's safe to remove the entry even if it's in use. */
@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
120 if (cn->procdir) 114 if (cn->procdir)
121 proc_remove(c->pde); 115 proc_remove(c->pde);
122#endif 116#endif
117 list_del_rcu(&c->list);
118 spin_unlock(&cn->lock);
119 local_bh_enable();
120
121 unregister_netdevice_notifier(&c->notifier);
122
123 return; 123 return;
124 } 124 }
125 local_bh_enable(); 125 local_bh_enable();
@@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
154#endif 154#endif
155 if (unlikely(!refcount_inc_not_zero(&c->refcount))) 155 if (unlikely(!refcount_inc_not_zero(&c->refcount)))
156 c = NULL; 156 c = NULL;
157 else if (entry) 157 else if (entry) {
158 refcount_inc(&c->entries); 158 if (unlikely(!refcount_inc_not_zero(&c->entries))) {
159 clusterip_config_put(c);
160 c = NULL;
161 }
162 }
159 } 163 }
160 rcu_read_unlock_bh(); 164 rcu_read_unlock_bh();
161 165
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 270765236f5e..aaaf9a81fbc9 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par)
98 const struct ipt_ECN_info *einfo = par->targinfo; 98 const struct ipt_ECN_info *einfo = par->targinfo;
99 const struct ipt_entry *e = par->entryinfo; 99 const struct ipt_entry *e = par->entryinfo;
100 100
101 if (einfo->operation & IPT_ECN_OP_MASK) { 101 if (einfo->operation & IPT_ECN_OP_MASK)
102 pr_info("unsupported ECN operation %x\n", einfo->operation);
103 return -EINVAL; 102 return -EINVAL;
104 } 103
105 if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { 104 if (einfo->ip_ect & ~IPT_ECN_IP_MASK)
106 pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
107 return -EINVAL; 105 return -EINVAL;
108 } 106
109 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && 107 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
110 (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { 108 (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
111 pr_info("cannot use TCP operations on a non-tcp rule\n"); 109 pr_info_ratelimited("cannot use operation on non-tcp rule\n");
112 return -EINVAL; 110 return -EINVAL;
113 } 111 }
114 return 0; 112 return 0;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 8bd0d7b26632..e8bed3390e58 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par)
74 const struct ipt_entry *e = par->entryinfo; 74 const struct ipt_entry *e = par->entryinfo;
75 75
76 if (rejinfo->with == IPT_ICMP_ECHOREPLY) { 76 if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
77 pr_info("ECHOREPLY no longer supported.\n"); 77 pr_info_ratelimited("ECHOREPLY no longer supported.\n");
78 return -EINVAL; 78 return -EINVAL;
79 } else if (rejinfo->with == IPT_TCP_RESET) { 79 } else if (rejinfo->with == IPT_TCP_RESET) {
80 /* Must specify that it's a TCP packet */ 80 /* Must specify that it's a TCP packet */
81 if (e->ip.proto != IPPROTO_TCP || 81 if (e->ip.proto != IPPROTO_TCP ||
82 (e->ip.invflags & XT_INV_PROTO)) { 82 (e->ip.invflags & XT_INV_PROTO)) {
83 pr_info("TCP_RESET invalid for non-tcp\n"); 83 pr_info_ratelimited("TCP_RESET invalid for non-tcp\n");
84 return -EINVAL; 84 return -EINVAL;
85 } 85 }
86 } 86 }
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 37fb9552e858..fd01f13c896a 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
105 const struct xt_rpfilter_info *info = par->matchinfo; 105 const struct xt_rpfilter_info *info = par->matchinfo;
106 unsigned int options = ~XT_RPFILTER_OPTION_MASK; 106 unsigned int options = ~XT_RPFILTER_OPTION_MASK;
107 if (info->flags & options) { 107 if (info->flags & options) {
108 pr_info("unknown options encountered"); 108 pr_info_ratelimited("unknown options\n");
109 return -EINVAL; 109 return -EINVAL;
110 } 110 }
111 111
112 if (strcmp(par->table, "mangle") != 0 && 112 if (strcmp(par->table, "mangle") != 0 &&
113 strcmp(par->table, "raw") != 0) { 113 strcmp(par->table, "raw") != 0) {
114 pr_info("match only valid in the \'raw\' " 114 pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
115 "or \'mangle\' tables, not \'%s\'.\n", par->table); 115 par->table);
116 return -EINVAL; 116 return -EINVAL;
117 } 117 }
118 118
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 49cc1c1df1ba..a4f44d815a61 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1826,6 +1826,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
1826 return skb_get_hash_raw(skb) >> 1; 1826 return skb_get_hash_raw(skb) >> 1;
1827 memset(&hash_keys, 0, sizeof(hash_keys)); 1827 memset(&hash_keys, 0, sizeof(hash_keys));
1828 skb_flow_dissect_flow_keys(skb, &keys, flag); 1828 skb_flow_dissect_flow_keys(skb, &keys, flag);
1829
1830 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1829 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1831 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1830 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1832 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1831 hash_keys.ports.src = keys.ports.src; 1833 hash_keys.ports.src = keys.ports.src;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e9f985e42405..6818042cd8a9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1730 */ 1730 */
1731 segs = max_t(u32, bytes / mss_now, min_tso_segs); 1731 segs = max_t(u32, bytes / mss_now, min_tso_segs);
1732 1732
1733 return min_t(u32, segs, sk->sk_gso_max_segs); 1733 return segs;
1734} 1734}
1735EXPORT_SYMBOL(tcp_tso_autosize); 1735EXPORT_SYMBOL(tcp_tso_autosize);
1736 1736
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1742 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1742 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1743 u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; 1743 u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1744 1744
1745 return tso_segs ? : 1745 if (!tso_segs)
1746 tcp_tso_autosize(sk, mss_now, 1746 tso_segs = tcp_tso_autosize(sk, mss_now,
1747 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); 1747 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
1748 return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1748} 1749}
1749 1750
1750/* Returns the portion of skb which can be sent right away */ 1751/* Returns the portion of skb which can be sent right away */
@@ -2027,6 +2028,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
2027 } 2028 }
2028} 2029}
2029 2030
2031static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2032{
2033 struct sk_buff *skb, *next;
2034
2035 skb = tcp_send_head(sk);
2036 tcp_for_write_queue_from_safe(skb, next, sk) {
2037 if (len <= skb->len)
2038 break;
2039
2040 if (unlikely(TCP_SKB_CB(skb)->eor))
2041 return false;
2042
2043 len -= skb->len;
2044 }
2045
2046 return true;
2047}
2048
2030/* Create a new MTU probe if we are ready. 2049/* Create a new MTU probe if we are ready.
2031 * MTU probe is regularly attempting to increase the path MTU by 2050 * MTU probe is regularly attempting to increase the path MTU by
2032 * deliberately sending larger packets. This discovers routing 2051 * deliberately sending larger packets. This discovers routing
@@ -2099,6 +2118,9 @@ static int tcp_mtu_probe(struct sock *sk)
2099 return 0; 2118 return 0;
2100 } 2119 }
2101 2120
2121 if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2122 return -1;
2123
2102 /* We're allowed to probe. Build it now. */ 2124 /* We're allowed to probe. Build it now. */
2103 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 2125 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2104 if (!nskb) 2126 if (!nskb)
@@ -2134,6 +2156,10 @@ static int tcp_mtu_probe(struct sock *sk)
2134 /* We've eaten all the data from this skb. 2156 /* We've eaten all the data from this skb.
2135 * Throw it away. */ 2157 * Throw it away. */
2136 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2158 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2159 /* If this is the last SKB we copy and eor is set
2160 * we need to propagate it to the new skb.
2161 */
2162 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2137 tcp_unlink_write_queue(skb, sk); 2163 tcp_unlink_write_queue(skb, sk);
2138 sk_wmem_free_skb(sk, skb); 2164 sk_wmem_free_skb(sk, skb);
2139 } else { 2165 } else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bfaefe560b5c..e5ef7c38c934 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2024 err = udplite_checksum_init(skb, uh); 2024 err = udplite_checksum_init(skb, uh);
2025 if (err) 2025 if (err)
2026 return err; 2026 return err;
2027
2028 if (UDP_SKB_CB(skb)->partial_cov) {
2029 skb->csum = inet_compute_pseudo(skb, proto);
2030 return 0;
2031 }
2027 } 2032 }
2028 2033
2029 /* Note, we are only interested in != 0 or == 0, thus the 2034 /* Note, we are only interested in != 0 or == 0, thus the
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index ec43d18b5ff9..547515e8450a 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
73 err = udplite_checksum_init(skb, uh); 73 err = udplite_checksum_init(skb, uh);
74 if (err) 74 if (err)
75 return err; 75 return err;
76
77 if (UDP_SKB_CB(skb)->partial_cov) {
78 skb->csum = ip6_compute_pseudo(skb, proto);
79 return 0;
80 }
76 } 81 }
77 82
78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) 83 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d78d41fc4b1a..24535169663d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
1367 if (get_user(len, optlen)) 1367 if (get_user(len, optlen))
1368 return -EFAULT; 1368 return -EFAULT;
1369 1369
1370 lock_sock(sk); 1370 err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
1371 err = nf_getsockopt(sk, PF_INET6, optname, optval,
1372 &len);
1373 release_sock(sk);
1374 if (err >= 0) 1371 if (err >= 0)
1375 err = put_user(len, optlen); 1372 err = put_user(len, optlen);
1376 } 1373 }
@@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1409 if (get_user(len, optlen)) 1406 if (get_user(len, optlen))
1410 return -EFAULT; 1407 return -EFAULT;
1411 1408
1412 lock_sock(sk); 1409 err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
1413 err = compat_nf_getsockopt(sk, PF_INET6,
1414 optname, optval, &len);
1415 release_sock(sk);
1416 if (err >= 0) 1410 if (err >= 0)
1417 err = put_user(len, optlen); 1411 err = put_user(len, optlen);
1418 } 1412 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index af4c917e0836..62358b93bbac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb,
352 } 352 }
353 if (table_base + v != ip6t_next_entry(e) && 353 if (table_base + v != ip6t_next_entry(e) &&
354 !(e->ipv6.flags & IP6T_F_GOTO)) { 354 !(e->ipv6.flags & IP6T_F_GOTO)) {
355 if (unlikely(stackidx >= private->stacksize)) {
356 verdict = NF_DROP;
357 break;
358 }
355 jumpstack[stackidx++] = e; 359 jumpstack[stackidx++] = e;
356 } 360 }
357 361
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index fa51a205918d..38dea8ff680f 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
85 const struct ip6t_entry *e = par->entryinfo; 85 const struct ip6t_entry *e = par->entryinfo;
86 86
87 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { 87 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
88 pr_info("ECHOREPLY is not supported.\n"); 88 pr_info_ratelimited("ECHOREPLY is not supported\n");
89 return -EINVAL; 89 return -EINVAL;
90 } else if (rejinfo->with == IP6T_TCP_RESET) { 90 } else if (rejinfo->with == IP6T_TCP_RESET) {
91 /* Must specify that it's a TCP packet */ 91 /* Must specify that it's a TCP packet */
92 if (!(e->ipv6.flags & IP6T_F_PROTO) || 92 if (!(e->ipv6.flags & IP6T_F_PROTO) ||
93 e->ipv6.proto != IPPROTO_TCP || 93 e->ipv6.proto != IPPROTO_TCP ||
94 (e->ipv6.invflags & XT_INV_PROTO)) { 94 (e->ipv6.invflags & XT_INV_PROTO)) {
95 pr_info("TCP_RESET illegal for non-tcp\n"); 95 pr_info_ratelimited("TCP_RESET illegal for non-tcp\n");
96 return -EINVAL; 96 return -EINVAL;
97 } 97 }
98 } 98 }
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index b12e61b7b16c..94deb69bbbda 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -103,14 +103,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
103 unsigned int options = ~XT_RPFILTER_OPTION_MASK; 103 unsigned int options = ~XT_RPFILTER_OPTION_MASK;
104 104
105 if (info->flags & options) { 105 if (info->flags & options) {
106 pr_info("unknown options encountered"); 106 pr_info_ratelimited("unknown options\n");
107 return -EINVAL; 107 return -EINVAL;
108 } 108 }
109 109
110 if (strcmp(par->table, "mangle") != 0 && 110 if (strcmp(par->table, "mangle") != 0 &&
111 strcmp(par->table, "raw") != 0) { 111 strcmp(par->table, "raw") != 0) {
112 pr_info("match only valid in the \'raw\' " 112 pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
113 "or \'mangle\' tables, not \'%s\'.\n", par->table); 113 par->table);
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 9642164107ce..33719d5560c8 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par)
122 const struct ip6t_srh *srhinfo = par->matchinfo; 122 const struct ip6t_srh *srhinfo = par->matchinfo;
123 123
124 if (srhinfo->mt_flags & ~IP6T_SRH_MASK) { 124 if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
125 pr_err("unknown srh match flags %X\n", srhinfo->mt_flags); 125 pr_info_ratelimited("unknown srh match flags %X\n",
126 srhinfo->mt_flags);
126 return -EINVAL; 127 return -EINVAL;
127 } 128 }
128 129
129 if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) { 130 if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
130 pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags); 131 pr_info_ratelimited("unknown srh invflags %X\n",
132 srhinfo->mt_invflags);
131 return -EINVAL; 133 return -EINVAL;
132 } 134 }
133 135
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3873d3877135..3a1775a62973 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
182#ifdef CONFIG_IPV6_SIT_6RD 182#ifdef CONFIG_IPV6_SIT_6RD
183 struct ip_tunnel *t = netdev_priv(dev); 183 struct ip_tunnel *t = netdev_priv(dev);
184 184
185 if (t->dev == sitn->fb_tunnel_dev) { 185 if (dev == sitn->fb_tunnel_dev) {
186 ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); 186 ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
187 t->ip6rd.relay_prefix = 0; 187 t->ip6rd.relay_prefix = 0;
188 t->ip6rd.prefixlen = 16; 188 t->ip6rd.prefixlen = 16;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a8b1616cec41..1f3188d03840 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -8,6 +8,7 @@
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015-2017 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * Copyright (C) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
304 * driver so reject the timeout update. 305 * driver so reject the timeout update.
305 */ 306 */
306 status = WLAN_STATUS_REQUEST_DECLINED; 307 status = WLAN_STATUS_REQUEST_DECLINED;
307 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
308 tid, dialog_token, status,
309 1, buf_size, timeout);
310 goto end; 308 goto end;
311 } 309 }
312 310
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 46028e12e216..f4195a0f0279 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
2892 } 2892 }
2893 if (beacon->probe_resp_len) { 2893 if (beacon->probe_resp_len) {
2894 new_beacon->probe_resp_len = beacon->probe_resp_len; 2894 new_beacon->probe_resp_len = beacon->probe_resp_len;
2895 beacon->probe_resp = pos; 2895 new_beacon->probe_resp = pos;
2896 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); 2896 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
2897 pos += beacon->probe_resp_len; 2897 pos += beacon->probe_resp_len;
2898 } 2898 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 26900025de2f..ae9c33cd8ada 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1467,7 +1467,7 @@ struct ieee802_11_elems {
1467 const struct ieee80211_timeout_interval_ie *timeout_int; 1467 const struct ieee80211_timeout_interval_ie *timeout_int;
1468 const u8 *opmode_notif; 1468 const u8 *opmode_notif;
1469 const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; 1469 const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
1470 const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; 1470 struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
1471 const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; 1471 const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
1472 1472
1473 /* length of them, respectively */ 1473 /* length of them, respectively */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 73ac607beb5d..6a381cbe1e33 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
1255} 1255}
1256 1256
1257static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, 1257static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
1258 struct ieee80211_mgmt *mgmt, size_t len) 1258 struct ieee80211_mgmt *mgmt, size_t len,
1259 struct ieee802_11_elems *elems)
1259{ 1260{
1260 struct ieee80211_mgmt *mgmt_fwd; 1261 struct ieee80211_mgmt *mgmt_fwd;
1261 struct sk_buff *skb; 1262 struct sk_buff *skb;
1262 struct ieee80211_local *local = sdata->local; 1263 struct ieee80211_local *local = sdata->local;
1263 u8 *pos = mgmt->u.action.u.chan_switch.variable;
1264 size_t offset_ttl;
1265 1264
1266 skb = dev_alloc_skb(local->tx_headroom + len); 1265 skb = dev_alloc_skb(local->tx_headroom + len);
1267 if (!skb) 1266 if (!skb)
@@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
1269 skb_reserve(skb, local->tx_headroom); 1268 skb_reserve(skb, local->tx_headroom);
1270 mgmt_fwd = skb_put(skb, len); 1269 mgmt_fwd = skb_put(skb, len);
1271 1270
1272 /* offset_ttl is based on whether the secondary channel 1271 elems->mesh_chansw_params_ie->mesh_ttl--;
1273 * offset is available or not. Subtract 1 from the mesh TTL 1272 elems->mesh_chansw_params_ie->mesh_flags &=
1274 * and disable the initiator flag before forwarding. 1273 ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
1275 */
1276 offset_ttl = (len < 42) ? 7 : 10;
1277 *(pos + offset_ttl) -= 1;
1278 *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
1279 1274
1280 memcpy(mgmt_fwd, mgmt, len); 1275 memcpy(mgmt_fwd, mgmt, len);
1281 eth_broadcast_addr(mgmt_fwd->da); 1276 eth_broadcast_addr(mgmt_fwd->da);
@@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
1323 1318
1324 /* forward or re-broadcast the CSA frame */ 1319 /* forward or re-broadcast the CSA frame */
1325 if (fwd_csa) { 1320 if (fwd_csa) {
1326 if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0) 1321 if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
1327 mcsa_dbg(sdata, "Failed to forward the CSA frame"); 1322 mcsa_dbg(sdata, "Failed to forward the CSA frame");
1328 } 1323 }
1329} 1324}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index ee0181778a42..029334835747 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -8,6 +8,7 @@
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation 9 * Copyright 2007-2008, Intel Corporation
10 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 10 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
11 * Copyright (C) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
27 u32 sta_flags, u8 *bssid, 28 u32 sta_flags, u8 *bssid,
28 struct ieee80211_csa_ie *csa_ie) 29 struct ieee80211_csa_ie *csa_ie)
29{ 30{
30 enum nl80211_band new_band; 31 enum nl80211_band new_band = current_band;
31 int new_freq; 32 int new_freq;
32 u8 new_chan_no; 33 u8 new_chan_no;
33 struct ieee80211_channel *new_chan; 34 struct ieee80211_channel *new_chan;
@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
55 elems->ext_chansw_ie->new_operating_class, 56 elems->ext_chansw_ie->new_operating_class,
56 &new_band)) { 57 &new_band)) {
57 sdata_info(sdata, 58 sdata_info(sdata,
58 "cannot understand ECSA IE operating class %d, disconnecting\n", 59 "cannot understand ECSA IE operating class, %d, ignoring\n",
59 elems->ext_chansw_ie->new_operating_class); 60 elems->ext_chansw_ie->new_operating_class);
60 return -EINVAL;
61 } 61 }
62 new_chan_no = elems->ext_chansw_ie->new_ch_num; 62 new_chan_no = elems->ext_chansw_ie->new_ch_num;
63 csa_ie->count = elems->ext_chansw_ie->count; 63 csa_ie->count = elems->ext_chansw_ie->count;
64 csa_ie->mode = elems->ext_chansw_ie->mode; 64 csa_ie->mode = elems->ext_chansw_ie->mode;
65 } else if (elems->ch_switch_ie) { 65 } else if (elems->ch_switch_ie) {
66 new_band = current_band;
67 new_chan_no = elems->ch_switch_ie->new_ch_num; 66 new_chan_no = elems->ch_switch_ie->new_ch_num;
68 csa_ie->count = elems->ch_switch_ie->count; 67 csa_ie->count = elems->ch_switch_ie->count;
69 csa_ie->mode = elems->ch_switch_ie->mode; 68 csa_ie->mode = elems->ch_switch_ie->mode;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 0c5627f8a104..af0b608ee8ed 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
314 314
315 if (ieee80211_hw_check(hw, USES_RSS)) { 315 if (ieee80211_hw_check(hw, USES_RSS)) {
316 sta->pcpu_rx_stats = 316 sta->pcpu_rx_stats =
317 alloc_percpu(struct ieee80211_sta_rx_stats); 317 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
318 if (!sta->pcpu_rx_stats) 318 if (!sta->pcpu_rx_stats)
319 goto free; 319 goto free;
320 } 320 }
@@ -433,6 +433,7 @@ free_txq:
433 if (sta->sta.txq[0]) 433 if (sta->sta.txq[0])
434 kfree(to_txq_info(sta->sta.txq[0])); 434 kfree(to_txq_info(sta->sta.txq[0]));
435free: 435free:
436 free_percpu(sta->pcpu_rx_stats);
436#ifdef CONFIG_MAC80211_MESH 437#ifdef CONFIG_MAC80211_MESH
437 kfree(sta->mesh); 438 kfree(sta->mesh);
438#endif 439#endif
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index fbce552a796e..7d7466dbf663 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
41 const struct nf_conn *ct, 41 const struct nf_conn *ct,
42 u16 *rover) 42 u16 *rover)
43{ 43{
44 unsigned int range_size, min, i; 44 unsigned int range_size, min, max, i;
45 __be16 *portptr; 45 __be16 *portptr;
46 u_int16_t off; 46 u_int16_t off;
47 47
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
71 } 71 }
72 } else { 72 } else {
73 min = ntohs(range->min_proto.all); 73 min = ntohs(range->min_proto.all);
74 range_size = ntohs(range->max_proto.all) - min + 1; 74 max = ntohs(range->max_proto.all);
75 if (unlikely(max < min))
76 swap(max, min);
77 range_size = max - min + 1;
75 } 78 }
76 79
77 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { 80 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 2f685ee1f9c8..fa1655aff8d3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -434,36 +434,35 @@ int xt_check_match(struct xt_mtchk_param *par,
434 * ebt_among is exempt from centralized matchsize checking 434 * ebt_among is exempt from centralized matchsize checking
435 * because it uses a dynamic-size data set. 435 * because it uses a dynamic-size data set.
436 */ 436 */
437 pr_err("%s_tables: %s.%u match: invalid size " 437 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
438 "%u (kernel) != (user) %u\n", 438 xt_prefix[par->family], par->match->name,
439 xt_prefix[par->family], par->match->name, 439 par->match->revision,
440 par->match->revision, 440 XT_ALIGN(par->match->matchsize), size);
441 XT_ALIGN(par->match->matchsize), size);
442 return -EINVAL; 441 return -EINVAL;
443 } 442 }
444 if (par->match->table != NULL && 443 if (par->match->table != NULL &&
445 strcmp(par->match->table, par->table) != 0) { 444 strcmp(par->match->table, par->table) != 0) {
446 pr_err("%s_tables: %s match: only valid in %s table, not %s\n", 445 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
447 xt_prefix[par->family], par->match->name, 446 xt_prefix[par->family], par->match->name,
448 par->match->table, par->table); 447 par->match->table, par->table);
449 return -EINVAL; 448 return -EINVAL;
450 } 449 }
451 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { 450 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
452 char used[64], allow[64]; 451 char used[64], allow[64];
453 452
454 pr_err("%s_tables: %s match: used from hooks %s, but only " 453 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
455 "valid from %s\n", 454 xt_prefix[par->family], par->match->name,
456 xt_prefix[par->family], par->match->name, 455 textify_hooks(used, sizeof(used),
457 textify_hooks(used, sizeof(used), par->hook_mask, 456 par->hook_mask, par->family),
458 par->family), 457 textify_hooks(allow, sizeof(allow),
459 textify_hooks(allow, sizeof(allow), par->match->hooks, 458 par->match->hooks,
460 par->family)); 459 par->family));
461 return -EINVAL; 460 return -EINVAL;
462 } 461 }
463 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 462 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
464 pr_err("%s_tables: %s match: only valid for protocol %u\n", 463 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
465 xt_prefix[par->family], par->match->name, 464 xt_prefix[par->family], par->match->name,
466 par->match->proto); 465 par->match->proto);
467 return -EINVAL; 466 return -EINVAL;
468 } 467 }
469 if (par->match->checkentry != NULL) { 468 if (par->match->checkentry != NULL) {
@@ -814,36 +813,35 @@ int xt_check_target(struct xt_tgchk_param *par,
814 int ret; 813 int ret;
815 814
816 if (XT_ALIGN(par->target->targetsize) != size) { 815 if (XT_ALIGN(par->target->targetsize) != size) {
817 pr_err("%s_tables: %s.%u target: invalid size " 816 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
818 "%u (kernel) != (user) %u\n", 817 xt_prefix[par->family], par->target->name,
819 xt_prefix[par->family], par->target->name, 818 par->target->revision,
820 par->target->revision, 819 XT_ALIGN(par->target->targetsize), size);
821 XT_ALIGN(par->target->targetsize), size);
822 return -EINVAL; 820 return -EINVAL;
823 } 821 }
824 if (par->target->table != NULL && 822 if (par->target->table != NULL &&
825 strcmp(par->target->table, par->table) != 0) { 823 strcmp(par->target->table, par->table) != 0) {
826 pr_err("%s_tables: %s target: only valid in %s table, not %s\n", 824 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
827 xt_prefix[par->family], par->target->name, 825 xt_prefix[par->family], par->target->name,
828 par->target->table, par->table); 826 par->target->table, par->table);
829 return -EINVAL; 827 return -EINVAL;
830 } 828 }
831 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { 829 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
832 char used[64], allow[64]; 830 char used[64], allow[64];
833 831
834 pr_err("%s_tables: %s target: used from hooks %s, but only " 832 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
835 "usable from %s\n", 833 xt_prefix[par->family], par->target->name,
836 xt_prefix[par->family], par->target->name, 834 textify_hooks(used, sizeof(used),
837 textify_hooks(used, sizeof(used), par->hook_mask, 835 par->hook_mask, par->family),
838 par->family), 836 textify_hooks(allow, sizeof(allow),
839 textify_hooks(allow, sizeof(allow), par->target->hooks, 837 par->target->hooks,
840 par->family)); 838 par->family));
841 return -EINVAL; 839 return -EINVAL;
842 } 840 }
843 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 841 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
844 pr_err("%s_tables: %s target: only valid for protocol %u\n", 842 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
845 xt_prefix[par->family], par->target->name, 843 xt_prefix[par->family], par->target->name,
846 par->target->proto); 844 par->target->proto);
847 return -EINVAL; 845 return -EINVAL;
848 } 846 }
849 if (par->target->checkentry != NULL) { 847 if (par->target->checkentry != NULL) {
@@ -1004,10 +1002,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
1004 if (sz < sizeof(*info)) 1002 if (sz < sizeof(*info))
1005 return NULL; 1003 return NULL;
1006 1004
1007 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1008 if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
1009 return NULL;
1010
1011 /* __GFP_NORETRY is not fully supported by kvmalloc but it should 1005 /* __GFP_NORETRY is not fully supported by kvmalloc but it should
1012 * work reasonably well if sz is too large and bail out rather 1006 * work reasonably well if sz is too large and bail out rather
1013 * than shoot all processes down before realizing there is nothing 1007 * than shoot all processes down before realizing there is nothing
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index c502419d6306..f368ee6741db 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par)
120 const struct xt_audit_info *info = par->targinfo; 120 const struct xt_audit_info *info = par->targinfo;
121 121
122 if (info->type > XT_AUDIT_TYPE_MAX) { 122 if (info->type > XT_AUDIT_TYPE_MAX) {
123 pr_info("Audit type out of range (valid range: 0..%hhu)\n", 123 pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n",
124 XT_AUDIT_TYPE_MAX); 124 XT_AUDIT_TYPE_MAX);
125 return -ERANGE; 125 return -ERANGE;
126 } 126 }
127 127
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
index 0f642ef8cd26..9f4151ec3e06 100644
--- a/net/netfilter/xt_CHECKSUM.c
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
36 const struct xt_CHECKSUM_info *einfo = par->targinfo; 36 const struct xt_CHECKSUM_info *einfo = par->targinfo;
37 37
38 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { 38 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
39 pr_info("unsupported CHECKSUM operation %x\n", einfo->operation); 39 pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
40 einfo->operation);
40 return -EINVAL; 41 return -EINVAL;
41 } 42 }
42 if (!einfo->operation) { 43 if (!einfo->operation)
43 pr_info("no CHECKSUM operation enabled\n");
44 return -EINVAL; 44 return -EINVAL;
45 } 45
46 return 0; 46 return 0;
47} 47}
48 48
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index da56c06a443c..f3f1caac949b 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
91 91
92 if (strcmp(par->table, "mangle") != 0 && 92 if (strcmp(par->table, "mangle") != 0 &&
93 strcmp(par->table, "security") != 0) { 93 strcmp(par->table, "security") != 0) {
94 pr_info("target only valid in the \'mangle\' " 94 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
95 "or \'security\' tables, not \'%s\'.\n", par->table); 95 par->table);
96 return -EINVAL; 96 return -EINVAL;
97 } 97 }
98 98
@@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
102 break; 102 break;
103 103
104 default: 104 default:
105 pr_info("invalid mode: %hu\n", info->mode); 105 pr_info_ratelimited("invalid mode: %hu\n", info->mode);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 108
109 ret = nf_ct_netns_get(par->net, par->family); 109 ret = nf_ct_netns_get(par->net, par->family);
110 if (ret < 0) 110 if (ret < 0)
111 pr_info("cannot load conntrack support for proto=%u\n", 111 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
112 par->family); 112 par->family);
113 return ret; 113 return ret;
114} 114}
115 115
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5a152e2acfd5..8790190c6feb 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
82 82
83 proto = xt_ct_find_proto(par); 83 proto = xt_ct_find_proto(par);
84 if (!proto) { 84 if (!proto) {
85 pr_info("You must specify a L4 protocol, and not use " 85 pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n");
86 "inversions on it.\n");
87 return -ENOENT; 86 return -ENOENT;
88 } 87 }
89 88
90 helper = nf_conntrack_helper_try_module_get(helper_name, par->family, 89 helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
91 proto); 90 proto);
92 if (helper == NULL) { 91 if (helper == NULL) {
93 pr_info("No such helper \"%s\"\n", helper_name); 92 pr_info_ratelimited("No such helper \"%s\"\n", helper_name);
94 return -ENOENT; 93 return -ENOENT;
95 } 94 }
96 95
@@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
124 const struct nf_conntrack_l4proto *l4proto; 123 const struct nf_conntrack_l4proto *l4proto;
125 struct ctnl_timeout *timeout; 124 struct ctnl_timeout *timeout;
126 struct nf_conn_timeout *timeout_ext; 125 struct nf_conn_timeout *timeout_ext;
126 const char *errmsg = NULL;
127 int ret = 0; 127 int ret = 0;
128 u8 proto; 128 u8 proto;
129 129
@@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
131 timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); 131 timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
132 if (timeout_find_get == NULL) { 132 if (timeout_find_get == NULL) {
133 ret = -ENOENT; 133 ret = -ENOENT;
134 pr_info("Timeout policy base is empty\n"); 134 errmsg = "Timeout policy base is empty";
135 goto out; 135 goto out;
136 } 136 }
137 137
138 proto = xt_ct_find_proto(par); 138 proto = xt_ct_find_proto(par);
139 if (!proto) { 139 if (!proto) {
140 ret = -EINVAL; 140 ret = -EINVAL;
141 pr_info("You must specify a L4 protocol, and not use " 141 errmsg = "You must specify a L4 protocol and not use inversions on it";
142 "inversions on it.\n");
143 goto out; 142 goto out;
144 } 143 }
145 144
146 timeout = timeout_find_get(par->net, timeout_name); 145 timeout = timeout_find_get(par->net, timeout_name);
147 if (timeout == NULL) { 146 if (timeout == NULL) {
148 ret = -ENOENT; 147 ret = -ENOENT;
149 pr_info("No such timeout policy \"%s\"\n", timeout_name); 148 pr_info_ratelimited("No such timeout policy \"%s\"\n",
149 timeout_name);
150 goto out; 150 goto out;
151 } 151 }
152 152
153 if (timeout->l3num != par->family) { 153 if (timeout->l3num != par->family) {
154 ret = -EINVAL; 154 ret = -EINVAL;
155 pr_info("Timeout policy `%s' can only be used by L3 protocol " 155 pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
156 "number %d\n", timeout_name, timeout->l3num); 156 timeout_name, 3, timeout->l3num);
157 goto err_put_timeout; 157 goto err_put_timeout;
158 } 158 }
159 /* Make sure the timeout policy matches any existing protocol tracker, 159 /* Make sure the timeout policy matches any existing protocol tracker,
@@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
162 l4proto = __nf_ct_l4proto_find(par->family, proto); 162 l4proto = __nf_ct_l4proto_find(par->family, proto);
163 if (timeout->l4proto->l4proto != l4proto->l4proto) { 163 if (timeout->l4proto->l4proto != l4proto->l4proto) {
164 ret = -EINVAL; 164 ret = -EINVAL;
165 pr_info("Timeout policy `%s' can only be used by L4 protocol " 165 pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
166 "number %d\n", 166 timeout_name, 4, timeout->l4proto->l4proto);
167 timeout_name, timeout->l4proto->l4proto);
168 goto err_put_timeout; 167 goto err_put_timeout;
169 } 168 }
170 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); 169 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
@@ -180,6 +179,8 @@ err_put_timeout:
180 __xt_ct_tg_timeout_put(timeout); 179 __xt_ct_tg_timeout_put(timeout);
181out: 180out:
182 rcu_read_unlock(); 181 rcu_read_unlock();
182 if (errmsg)
183 pr_info_ratelimited("%s\n", errmsg);
183 return ret; 184 return ret;
184#else 185#else
185 return -EOPNOTSUPP; 186 return -EOPNOTSUPP;
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 3f83d38c4e5b..098ed851b7a7 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par)
66{ 66{
67 const struct xt_DSCP_info *info = par->targinfo; 67 const struct xt_DSCP_info *info = par->targinfo;
68 68
69 if (info->dscp > XT_DSCP_MAX) { 69 if (info->dscp > XT_DSCP_MAX)
70 pr_info("dscp %x out of range\n", info->dscp);
71 return -EDOM; 70 return -EDOM;
72 }
73 return 0; 71 return 0;
74} 72}
75 73
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 1535e87ed9bd..4653b071bed4 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par)
105{ 105{
106 const struct ipt_TTL_info *info = par->targinfo; 106 const struct ipt_TTL_info *info = par->targinfo;
107 107
108 if (info->mode > IPT_TTL_MAXMODE) { 108 if (info->mode > IPT_TTL_MAXMODE)
109 pr_info("TTL: invalid or unknown mode %u\n", info->mode);
110 return -EINVAL; 109 return -EINVAL;
111 }
112 if (info->mode != IPT_TTL_SET && info->ttl == 0) 110 if (info->mode != IPT_TTL_SET && info->ttl == 0)
113 return -EINVAL; 111 return -EINVAL;
114 return 0; 112 return 0;
@@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par)
118{ 116{
119 const struct ip6t_HL_info *info = par->targinfo; 117 const struct ip6t_HL_info *info = par->targinfo;
120 118
121 if (info->mode > IP6T_HL_MAXMODE) { 119 if (info->mode > IP6T_HL_MAXMODE)
122 pr_info("invalid or unknown mode %u\n", info->mode);
123 return -EINVAL; 120 return -EINVAL;
124 } 121 if (info->mode != IP6T_HL_SET && info->hop_limit == 0)
125 if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
126 pr_info("increment/decrement does not "
127 "make sense with value 0\n");
128 return -EINVAL; 122 return -EINVAL;
129 }
130 return 0; 123 return 0;
131} 124}
132 125
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 60e6dbe12460..9c75f419cd80 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -9,6 +9,8 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/icmp.h> 16#include <linux/icmp.h>
@@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
312static int hmark_tg_check(const struct xt_tgchk_param *par) 314static int hmark_tg_check(const struct xt_tgchk_param *par)
313{ 315{
314 const struct xt_hmark_info *info = par->targinfo; 316 const struct xt_hmark_info *info = par->targinfo;
317 const char *errmsg = "proto mask must be zero with L3 mode";
315 318
316 if (!info->hmodulus) { 319 if (!info->hmodulus)
317 pr_info("xt_HMARK: hash modulus can't be zero\n");
318 return -EINVAL; 320 return -EINVAL;
319 } 321
320 if (info->proto_mask && 322 if (info->proto_mask &&
321 (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { 323 (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
322 pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); 324 goto err;
323 return -EINVAL; 325
324 }
325 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && 326 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
326 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | 327 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
327 XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { 328 XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
328 pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
329 return -EINVAL; 329 return -EINVAL;
330 } 330
331 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && 331 if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
332 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | 332 (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
333 XT_HMARK_FLAG(XT_HMARK_DPORT)))) { 333 XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
334 pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); 334 errmsg = "spi-set and port-set can't be combined";
335 return -EINVAL; 335 goto err;
336 } 336 }
337 return 0; 337 return 0;
338err:
339 pr_info_ratelimited("%s\n", errmsg);
340 return -EINVAL;
338} 341}
339 342
340static struct xt_target hmark_tg_reg[] __read_mostly = { 343static struct xt_target hmark_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 6c2482b709b1..1ac6600bfafd 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
146 timer_setup(&info->timer->timer, idletimer_tg_expired, 0); 146 timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
147 info->timer->refcnt = 1; 147 info->timer->refcnt = 1;
148 148
149 INIT_WORK(&info->timer->work, idletimer_tg_work);
150
149 mod_timer(&info->timer->timer, 151 mod_timer(&info->timer->timer,
150 msecs_to_jiffies(info->timeout * 1000) + jiffies); 152 msecs_to_jiffies(info->timeout * 1000) + jiffies);
151 153
152 INIT_WORK(&info->timer->work, idletimer_tg_work);
153
154 return 0; 154 return 0;
155 155
156out_free_attr: 156out_free_attr:
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
191 pr_debug("timeout value is zero\n"); 191 pr_debug("timeout value is zero\n");
192 return -EINVAL; 192 return -EINVAL;
193 } 193 }
194 194 if (info->timeout >= INT_MAX / 1000) {
195 pr_debug("timeout value is too big\n");
196 return -EINVAL;
197 }
195 if (info->label[0] == '\0' || 198 if (info->label[0] == '\0' ||
196 strnlen(info->label, 199 strnlen(info->label,
197 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { 200 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 1dcad893df78..19846445504d 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par)
111 struct xt_led_info_internal *ledinternal; 111 struct xt_led_info_internal *ledinternal;
112 int err; 112 int err;
113 113
114 if (ledinfo->id[0] == '\0') { 114 if (ledinfo->id[0] == '\0')
115 pr_info("No 'id' parameter given.\n");
116 return -EINVAL; 115 return -EINVAL;
117 }
118 116
119 mutex_lock(&xt_led_mutex); 117 mutex_lock(&xt_led_mutex);
120 118
@@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par)
138 136
139 err = led_trigger_register(&ledinternal->netfilter_led_trigger); 137 err = led_trigger_register(&ledinternal->netfilter_led_trigger);
140 if (err) { 138 if (err) {
141 pr_err("Trigger name is already in use.\n"); 139 pr_info_ratelimited("Trigger name is already in use.\n");
142 goto exit_alloc; 140 goto exit_alloc;
143 } 141 }
144 142
145 /* See if we need to set up a timer */ 143 /* Since the letinternal timer can be shared between multiple targets,
146 if (ledinfo->delay > 0) 144 * always set it up, even if the current target does not need it
147 timer_setup(&ledinternal->timer, led_timeout_callback, 0); 145 */
146 timer_setup(&ledinternal->timer, led_timeout_callback, 0);
148 147
149 list_add_tail(&ledinternal->list, &xt_led_triggers); 148 list_add_tail(&ledinternal->list, &xt_led_triggers);
150 149
@@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
181 180
182 list_del(&ledinternal->list); 181 list_del(&ledinternal->list);
183 182
184 if (ledinfo->delay > 0) 183 del_timer_sync(&ledinternal->timer);
185 del_timer_sync(&ledinternal->timer);
186 184
187 led_trigger_unregister(&ledinternal->netfilter_led_trigger); 185 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
188 186
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index a360b99a958a..a9aca80a32ae 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
11#include <linux/module.h> 13#include <linux/module.h>
12#include <linux/skbuff.h> 14#include <linux/skbuff.h>
13 15
@@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
67 init_hashrandom(&jhash_initval); 69 init_hashrandom(&jhash_initval);
68 70
69 if (info->queues_total == 0) { 71 if (info->queues_total == 0) {
70 pr_err("NFQUEUE: number of total queues is 0\n"); 72 pr_info_ratelimited("number of total queues is 0\n");
71 return -EINVAL; 73 return -EINVAL;
72 } 74 }
73 maxid = info->queues_total - 1 + info->queuenum; 75 maxid = info->queues_total - 1 + info->queuenum;
74 if (maxid > 0xffff) { 76 if (maxid > 0xffff) {
75 pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", 77 pr_info_ratelimited("number of queues (%u) out of range (got %u)\n",
76 info->queues_total, maxid); 78 info->queues_total, maxid);
77 return -ERANGE; 79 return -ERANGE;
78 } 80 }
79 if (par->target->revision == 2 && info->flags > 1) 81 if (par->target->revision == 2 && info->flags > 1)
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 9faf5e050b79..4ad5fe27e08b 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
60 &info->secid); 60 &info->secid);
61 if (err) { 61 if (err) {
62 if (err == -EINVAL) 62 if (err == -EINVAL)
63 pr_info("invalid security context \'%s\'\n", info->secctx); 63 pr_info_ratelimited("invalid security context \'%s\'\n",
64 info->secctx);
64 return err; 65 return err;
65 } 66 }
66 67
67 if (!info->secid) { 68 if (!info->secid) {
68 pr_info("unable to map security context \'%s\'\n", info->secctx); 69 pr_info_ratelimited("unable to map security context \'%s\'\n",
70 info->secctx);
69 return -ENOENT; 71 return -ENOENT;
70 } 72 }
71 73
72 err = security_secmark_relabel_packet(info->secid); 74 err = security_secmark_relabel_packet(info->secid);
73 if (err) { 75 if (err) {
74 pr_info("unable to obtain relabeling permission\n"); 76 pr_info_ratelimited("unable to obtain relabeling permission\n");
75 return err; 77 return err;
76 } 78 }
77 79
@@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
86 88
87 if (strcmp(par->table, "mangle") != 0 && 89 if (strcmp(par->table, "mangle") != 0 &&
88 strcmp(par->table, "security") != 0) { 90 strcmp(par->table, "security") != 0) {
89 pr_info("target only valid in the \'mangle\' " 91 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
90 "or \'security\' tables, not \'%s\'.\n", par->table); 92 par->table);
91 return -EINVAL; 93 return -EINVAL;
92 } 94 }
93 95
94 if (mode && mode != info->mode) { 96 if (mode && mode != info->mode) {
95 pr_info("mode already set to %hu cannot mix with " 97 pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
96 "rules for mode %hu\n", mode, info->mode); 98 mode, info->mode);
97 return -EINVAL; 99 return -EINVAL;
98 } 100 }
99 101
@@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
101 case SECMARK_MODE_SEL: 103 case SECMARK_MODE_SEL:
102 break; 104 break;
103 default: 105 default:
104 pr_info("invalid mode: %hu\n", info->mode); 106 pr_info_ratelimited("invalid mode: %hu\n", info->mode);
105 return -EINVAL; 107 return -EINVAL;
106 } 108 }
107 109
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 99bb8e410f22..98efb202f8b4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
273 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 273 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
274 (1 << NF_INET_LOCAL_OUT) | 274 (1 << NF_INET_LOCAL_OUT) |
275 (1 << NF_INET_POST_ROUTING))) != 0) { 275 (1 << NF_INET_POST_ROUTING))) != 0) {
276 pr_info("path-MTU clamping only supported in " 276 pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
277 "FORWARD, OUTPUT and POSTROUTING hooks\n");
278 return -EINVAL; 277 return -EINVAL;
279 } 278 }
280 if (par->nft_compat) 279 if (par->nft_compat)
@@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
283 xt_ematch_foreach(ematch, e) 282 xt_ematch_foreach(ematch, e)
284 if (find_syn_match(ematch)) 283 if (find_syn_match(ematch))
285 return 0; 284 return 0;
286 pr_info("Only works on TCP SYN packets\n"); 285 pr_info_ratelimited("Only works on TCP SYN packets\n");
287 return -EINVAL; 286 return -EINVAL;
288} 287}
289 288
@@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
298 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 297 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
299 (1 << NF_INET_LOCAL_OUT) | 298 (1 << NF_INET_LOCAL_OUT) |
300 (1 << NF_INET_POST_ROUTING))) != 0) { 299 (1 << NF_INET_POST_ROUTING))) != 0) {
301 pr_info("path-MTU clamping only supported in " 300 pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
302 "FORWARD, OUTPUT and POSTROUTING hooks\n");
303 return -EINVAL; 301 return -EINVAL;
304 } 302 }
305 if (par->nft_compat) 303 if (par->nft_compat)
@@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
308 xt_ematch_foreach(ematch, e) 306 xt_ematch_foreach(ematch, e)
309 if (find_syn_match(ematch)) 307 if (find_syn_match(ematch))
310 return 0; 308 return 0;
311 pr_info("Only works on TCP SYN packets\n"); 309 pr_info_ratelimited("Only works on TCP SYN packets\n");
312 return -EINVAL; 310 return -EINVAL;
313} 311}
314#endif 312#endif
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 17d7705e3bd4..8c89323c06af 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
540 !(i->invflags & IP6T_INV_PROTO)) 540 !(i->invflags & IP6T_INV_PROTO))
541 return 0; 541 return 0;
542 542
543 pr_info("Can be used only in combination with " 543 pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
544 "either -p tcp or -p udp\n");
545 return -EINVAL; 544 return -EINVAL;
546} 545}
547#endif 546#endif
@@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par)
559 && !(i->invflags & IPT_INV_PROTO)) 558 && !(i->invflags & IPT_INV_PROTO))
560 return 0; 559 return 0;
561 560
562 pr_info("Can be used only in combination with " 561 pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
563 "either -p tcp or -p udp\n");
564 return -EINVAL; 562 return -EINVAL;
565} 563}
566 564
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 911a7c0da504..89e281b3bfc2 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
164 164
165static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) 165static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
166{ 166{
167 const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
167 struct xt_addrtype_info_v1 *info = par->matchinfo; 168 struct xt_addrtype_info_v1 *info = par->matchinfo;
168 169
169 if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN && 170 if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
170 info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { 171 info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
171 pr_info("both incoming and outgoing " 172 goto err;
172 "interface limitation cannot be selected\n");
173 return -EINVAL;
174 }
175 173
176 if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | 174 if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
177 (1 << NF_INET_LOCAL_IN)) && 175 (1 << NF_INET_LOCAL_IN)) &&
178 info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { 176 info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
179 pr_info("output interface limitation " 177 errmsg = "output interface limitation not valid in PREROUTING and INPUT";
180 "not valid in PREROUTING and INPUT\n"); 178 goto err;
181 return -EINVAL;
182 } 179 }
183 180
184 if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | 181 if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
185 (1 << NF_INET_LOCAL_OUT)) && 182 (1 << NF_INET_LOCAL_OUT)) &&
186 info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) { 183 info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
187 pr_info("input interface limitation " 184 errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT";
188 "not valid in POSTROUTING and OUTPUT\n"); 185 goto err;
189 return -EINVAL;
190 } 186 }
191 187
192#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 188#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
193 if (par->family == NFPROTO_IPV6) { 189 if (par->family == NFPROTO_IPV6) {
194 if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { 190 if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
195 pr_err("ipv6 BLACKHOLE matching not supported\n"); 191 errmsg = "ipv6 BLACKHOLE matching not supported";
196 return -EINVAL; 192 goto err;
197 } 193 }
198 if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { 194 if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
199 pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n"); 195 errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported";
200 return -EINVAL; 196 goto err;
201 } 197 }
202 if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { 198 if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
203 pr_err("ipv6 does not support BROADCAST matching\n"); 199 errmsg = "ipv6 does not support BROADCAST matching";
204 return -EINVAL; 200 goto err;
205 } 201 }
206 } 202 }
207#endif 203#endif
208 return 0; 204 return 0;
205err:
206 pr_info_ratelimited("%s\n", errmsg);
207 return -EINVAL;
209} 208}
210 209
211static struct xt_match addrtype_mt_reg[] __read_mostly = { 210static struct xt_match addrtype_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 06b090d8e901..a2cf8a6236d6 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -7,6 +7,8 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/syscalls.h> 13#include <linux/syscalls.h>
12#include <linux/skbuff.h> 14#include <linux/skbuff.h>
@@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
34 program.filter = insns; 36 program.filter = insns;
35 37
36 if (bpf_prog_create(ret, &program)) { 38 if (bpf_prog_create(ret, &program)) {
37 pr_info("bpf: check failed: parse error\n"); 39 pr_info_ratelimited("check failed: parse error\n");
38 return -EINVAL; 40 return -EINVAL;
39 } 41 }
40 42
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 891f4e7e8ea7..7df2dece57d3 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -12,6 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#include <linux/skbuff.h> 17#include <linux/skbuff.h>
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/netfilter/x_tables.h> 19#include <linux/netfilter/x_tables.h>
@@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
48 } 50 }
49 51
50 if (info->has_path && info->has_classid) { 52 if (info->has_path && info->has_classid) {
51 pr_info("xt_cgroup: both path and classid specified\n"); 53 pr_info_ratelimited("path and classid specified\n");
52 return -EINVAL; 54 return -EINVAL;
53 } 55 }
54 56
@@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
56 if (info->has_path) { 58 if (info->has_path) {
57 cgrp = cgroup_get_from_path(info->path); 59 cgrp = cgroup_get_from_path(info->path);
58 if (IS_ERR(cgrp)) { 60 if (IS_ERR(cgrp)) {
59 pr_info("xt_cgroup: invalid path, errno=%ld\n", 61 pr_info_ratelimited("invalid path, errno=%ld\n",
60 PTR_ERR(cgrp)); 62 PTR_ERR(cgrp));
61 return -EINVAL; 63 return -EINVAL;
62 } 64 }
63 info->priv = cgrp; 65 info->priv = cgrp;
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 57ef175dfbfa..0068688995c8 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
135 struct xt_cluster_match_info *info = par->matchinfo; 135 struct xt_cluster_match_info *info = par->matchinfo;
136 136
137 if (info->total_nodes > XT_CLUSTER_NODES_MAX) { 137 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
138 pr_info("you have exceeded the maximum " 138 pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
139 "number of cluster nodes (%u > %u)\n", 139 info->total_nodes, XT_CLUSTER_NODES_MAX);
140 info->total_nodes, XT_CLUSTER_NODES_MAX);
141 return -EINVAL; 140 return -EINVAL;
142 } 141 }
143 if (info->node_mask >= (1ULL << info->total_nodes)) { 142 if (info->node_mask >= (1ULL << info->total_nodes)) {
144 pr_info("this node mask cannot be " 143 pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
145 "higher than the total number of nodes\n");
146 return -EDOM; 144 return -EDOM;
147 } 145 }
148 return 0; 146 return 0;
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index cad0b7b5eb35..93cb018c3055 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
112 112
113 ret = nf_ct_netns_get(par->net, par->family); 113 ret = nf_ct_netns_get(par->net, par->family);
114 if (ret < 0) 114 if (ret < 0)
115 pr_info("cannot load conntrack support for proto=%u\n", 115 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
116 par->family); 116 par->family);
117 117
118 /* 118 /*
119 * This filter cannot function correctly unless connection tracking 119 * This filter cannot function correctly unless connection tracking
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index 23372879e6e3..4fa4efd24353 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
57 int ret; 57 int ret;
58 58
59 if (info->options & ~options) { 59 if (info->options & ~options) {
60 pr_err("Unknown options in mask %x\n", info->options); 60 pr_info_ratelimited("Unknown options in mask %x\n",
61 info->options);
61 return -EINVAL; 62 return -EINVAL;
62 } 63 }
63 64
64 ret = nf_ct_netns_get(par->net, par->family); 65 ret = nf_ct_netns_get(par->net, par->family);
65 if (ret < 0) { 66 if (ret < 0) {
66 pr_info("cannot load conntrack support for proto=%u\n", 67 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
67 par->family); 68 par->family);
68 return ret; 69 return ret;
69 } 70 }
70 71
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index ec377cc6a369..809639ce6f5a 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par)
79 79
80 ret = nf_ct_netns_get(par->net, par->family); 80 ret = nf_ct_netns_get(par->net, par->family);
81 if (ret < 0) 81 if (ret < 0)
82 pr_info("cannot load conntrack support for proto=%u\n", 82 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
83 par->family); 83 par->family);
84 return ret; 84 return ret;
85} 85}
86 86
@@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par)
109 109
110 ret = nf_ct_netns_get(par->net, par->family); 110 ret = nf_ct_netns_get(par->net, par->family);
111 if (ret < 0) 111 if (ret < 0)
112 pr_info("cannot load conntrack support for proto=%u\n", 112 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
113 par->family); 113 par->family);
114 return ret; 114 return ret;
115} 115}
116 116
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39cf1d019240..df80fe7d391c 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
272 272
273 ret = nf_ct_netns_get(par->net, par->family); 273 ret = nf_ct_netns_get(par->net, par->family);
274 if (ret < 0) 274 if (ret < 0)
275 pr_info("cannot load conntrack support for proto=%u\n", 275 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
276 par->family); 276 par->family);
277 return ret; 277 return ret;
278} 278}
279 279
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index 236ac8008909..a4c2b862f820 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par)
46{ 46{
47 const struct xt_dscp_info *info = par->matchinfo; 47 const struct xt_dscp_info *info = par->matchinfo;
48 48
49 if (info->dscp > XT_DSCP_MAX) { 49 if (info->dscp > XT_DSCP_MAX)
50 pr_info("dscp %x out of range\n", info->dscp);
51 return -EDOM; 50 return -EDOM;
52 }
53 51
54 return 0; 52 return 0;
55} 53}
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c
index 3c831a8efebc..c7ad4afa5fb8 100644
--- a/net/netfilter/xt_ecn.c
+++ b/net/netfilter/xt_ecn.c
@@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par)
97 97
98 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
100 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
101 return -EINVAL; 101 return -EINVAL;
102 } 102 }
103 103
@@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par)
139 139
140 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && 140 if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
141 (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { 141 (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
142 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 142 pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
143 return -EINVAL; 143 return -EINVAL;
144 } 144 }
145 145
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index ca6847403ca2..66f5aca62a08 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -523,7 +523,8 @@ static u64 user2rate(u64 user)
523 if (user != 0) { 523 if (user != 0) {
524 return div64_u64(XT_HASHLIMIT_SCALE_v2, user); 524 return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
525 } else { 525 } else {
526 pr_warn("invalid rate from userspace: %llu\n", user); 526 pr_info_ratelimited("invalid rate from userspace: %llu\n",
527 user);
527 return 0; 528 return 0;
528 } 529 }
529} 530}
@@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
774 if (!dh->rateinfo.prev_window && 775 if (!dh->rateinfo.prev_window &&
775 (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { 776 (dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
776 spin_unlock(&dh->lock); 777 spin_unlock(&dh->lock);
777 rcu_read_unlock_bh(); 778 local_bh_enable();
778 return !(cfg->mode & XT_HASHLIMIT_INVERT); 779 return !(cfg->mode & XT_HASHLIMIT_INVERT);
779 } else { 780 } else {
780 goto overlimit; 781 goto overlimit;
@@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
865 } 866 }
866 867
867 if (cfg->mode & ~XT_HASHLIMIT_ALL) { 868 if (cfg->mode & ~XT_HASHLIMIT_ALL) {
868 pr_info("Unknown mode mask %X, kernel too old?\n", 869 pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n",
869 cfg->mode); 870 cfg->mode);
870 return -EINVAL; 871 return -EINVAL;
871 } 872 }
872 873
873 /* Check for overflow. */ 874 /* Check for overflow. */
874 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { 875 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
875 if (cfg->avg == 0 || cfg->avg > U32_MAX) { 876 if (cfg->avg == 0 || cfg->avg > U32_MAX) {
876 pr_info("hashlimit invalid rate\n"); 877 pr_info_ratelimited("invalid rate\n");
877 return -ERANGE; 878 return -ERANGE;
878 } 879 }
879 880
880 if (cfg->interval == 0) { 881 if (cfg->interval == 0) {
881 pr_info("hashlimit invalid interval\n"); 882 pr_info_ratelimited("invalid interval\n");
882 return -EINVAL; 883 return -EINVAL;
883 } 884 }
884 } else if (cfg->mode & XT_HASHLIMIT_BYTES) { 885 } else if (cfg->mode & XT_HASHLIMIT_BYTES) {
885 if (user2credits_byte(cfg->avg) == 0) { 886 if (user2credits_byte(cfg->avg) == 0) {
886 pr_info("overflow, rate too high: %llu\n", cfg->avg); 887 pr_info_ratelimited("overflow, rate too high: %llu\n",
888 cfg->avg);
887 return -EINVAL; 889 return -EINVAL;
888 } 890 }
889 } else if (cfg->burst == 0 || 891 } else if (cfg->burst == 0 ||
890 user2credits(cfg->avg * cfg->burst, revision) < 892 user2credits(cfg->avg * cfg->burst, revision) <
891 user2credits(cfg->avg, revision)) { 893 user2credits(cfg->avg, revision)) {
892 pr_info("overflow, try lower: %llu/%llu\n", 894 pr_info_ratelimited("overflow, try lower: %llu/%llu\n",
893 cfg->avg, cfg->burst); 895 cfg->avg, cfg->burst);
894 return -ERANGE; 896 return -ERANGE;
895 } 897 }
896 898
897 mutex_lock(&hashlimit_mutex); 899 mutex_lock(&hashlimit_mutex);
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index 38a78151c0e9..fd077aeaaed9 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par)
61 61
62 ret = nf_ct_netns_get(par->net, par->family); 62 ret = nf_ct_netns_get(par->net, par->family);
63 if (ret < 0) { 63 if (ret < 0) {
64 pr_info("cannot load conntrack support for proto=%u\n", 64 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
65 par->family); 65 par->family);
66 return ret; 66 return ret;
67 } 67 }
68 info->name[sizeof(info->name) - 1] = '\0'; 68 info->name[sizeof(info->name) - 1] = '\0';
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 7ca64a50db04..57f1df575701 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par)
72 72
73 /* Must specify no unknown invflags */ 73 /* Must specify no unknown invflags */
74 if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) { 74 if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
75 pr_err("unknown flags %X\n", compinfo->invflags); 75 pr_info_ratelimited("unknown flags %X\n", compinfo->invflags);
76 return -EINVAL; 76 return -EINVAL;
77 } 77 }
78 return 0; 78 return 0;
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 42540d26c2b8..1d950a6100af 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par)
158 && par->family != NFPROTO_IPV6 158 && par->family != NFPROTO_IPV6
159#endif 159#endif
160 ) { 160 ) {
161 pr_info("protocol family %u not supported\n", par->family); 161 pr_info_ratelimited("protocol family %u not supported\n",
162 par->family);
162 return -EINVAL; 163 return -EINVAL;
163 } 164 }
164 165
diff --git a/net/netfilter/xt_l2tp.c b/net/netfilter/xt_l2tp.c
index 8aee572771f2..c43482bf48e6 100644
--- a/net/netfilter/xt_l2tp.c
+++ b/net/netfilter/xt_l2tp.c
@@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
216 /* Check for invalid flags */ 216 /* Check for invalid flags */
217 if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION | 217 if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
218 XT_L2TP_TYPE)) { 218 XT_L2TP_TYPE)) {
219 pr_info("unknown flags: %x\n", info->flags); 219 pr_info_ratelimited("unknown flags: %x\n", info->flags);
220 return -EINVAL; 220 return -EINVAL;
221 } 221 }
222 222
@@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
225 (!(info->flags & XT_L2TP_SID)) && 225 (!(info->flags & XT_L2TP_SID)) &&
226 ((!(info->flags & XT_L2TP_TYPE)) || 226 ((!(info->flags & XT_L2TP_TYPE)) ||
227 (info->type != XT_L2TP_TYPE_CONTROL))) { 227 (info->type != XT_L2TP_TYPE_CONTROL))) {
228 pr_info("invalid flags combination: %x\n", info->flags); 228 pr_info_ratelimited("invalid flags combination: %x\n",
229 info->flags);
229 return -EINVAL; 230 return -EINVAL;
230 } 231 }
231 232
@@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
234 */ 235 */
235 if (info->flags & XT_L2TP_VERSION) { 236 if (info->flags & XT_L2TP_VERSION) {
236 if ((info->version < 2) || (info->version > 3)) { 237 if ((info->version < 2) || (info->version > 3)) {
237 pr_info("wrong L2TP version: %u\n", info->version); 238 pr_info_ratelimited("wrong L2TP version: %u\n",
239 info->version);
238 return -EINVAL; 240 return -EINVAL;
239 } 241 }
240 242
241 if (info->version == 2) { 243 if (info->version == 2) {
242 if ((info->flags & XT_L2TP_TID) && 244 if ((info->flags & XT_L2TP_TID) &&
243 (info->tid > 0xffff)) { 245 (info->tid > 0xffff)) {
244 pr_info("v2 tid > 0xffff: %u\n", info->tid); 246 pr_info_ratelimited("v2 tid > 0xffff: %u\n",
247 info->tid);
245 return -EINVAL; 248 return -EINVAL;
246 } 249 }
247 if ((info->flags & XT_L2TP_SID) && 250 if ((info->flags & XT_L2TP_SID) &&
248 (info->sid > 0xffff)) { 251 (info->sid > 0xffff)) {
249 pr_info("v2 sid > 0xffff: %u\n", info->sid); 252 pr_info_ratelimited("v2 sid > 0xffff: %u\n",
253 info->sid);
250 return -EINVAL; 254 return -EINVAL;
251 } 255 }
252 } 256 }
@@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par)
268 272
269 if ((ip->proto != IPPROTO_UDP) && 273 if ((ip->proto != IPPROTO_UDP) &&
270 (ip->proto != IPPROTO_L2TP)) { 274 (ip->proto != IPPROTO_L2TP)) {
271 pr_info("missing protocol rule (udp|l2tpip)\n"); 275 pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
272 return -EINVAL; 276 return -EINVAL;
273 } 277 }
274 278
275 if ((ip->proto == IPPROTO_L2TP) && 279 if ((ip->proto == IPPROTO_L2TP) &&
276 (info->version == 2)) { 280 (info->version == 2)) {
277 pr_info("v2 doesn't support IP mode\n"); 281 pr_info_ratelimited("v2 doesn't support IP mode\n");
278 return -EINVAL; 282 return -EINVAL;
279 } 283 }
280 284
@@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par)
295 299
296 if ((ip->proto != IPPROTO_UDP) && 300 if ((ip->proto != IPPROTO_UDP) &&
297 (ip->proto != IPPROTO_L2TP)) { 301 (ip->proto != IPPROTO_L2TP)) {
298 pr_info("missing protocol rule (udp|l2tpip)\n"); 302 pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
299 return -EINVAL; 303 return -EINVAL;
300 } 304 }
301 305
302 if ((ip->proto == IPPROTO_L2TP) && 306 if ((ip->proto == IPPROTO_L2TP) &&
303 (info->version == 2)) { 307 (info->version == 2)) {
304 pr_info("v2 doesn't support IP mode\n"); 308 pr_info_ratelimited("v2 doesn't support IP mode\n");
305 return -EINVAL; 309 return -EINVAL;
306 } 310 }
307 311
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 61403b77361c..55d18cd67635 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
106 /* Check for overflow. */ 106 /* Check for overflow. */
107 if (r->burst == 0 107 if (r->burst == 0
108 || user2credits(r->avg * r->burst) < user2credits(r->avg)) { 108 || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
109 pr_info("Overflow, try lower: %u/%u\n", 109 pr_info_ratelimited("Overflow, try lower: %u/%u\n",
110 r->avg, r->burst); 110 r->avg, r->burst);
111 return -ERANGE; 111 return -ERANGE;
112 } 112 }
113 113
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 0fd14d1eb09d..bdb689cdc829 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -8,6 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
11#include <linux/module.h> 13#include <linux/module.h>
12#include <linux/skbuff.h> 14#include <linux/skbuff.h>
13#include <linux/netfilter.h> 15#include <linux/netfilter.h>
@@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
19 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 21 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
20 22
21 if (mr->rangesize != 1) { 23 if (mr->rangesize != 1) {
22 pr_info("%s: multiple ranges no longer supported\n", 24 pr_info_ratelimited("multiple ranges no longer supported\n");
23 par->target->name);
24 return -EINVAL; 25 return -EINVAL;
25 } 26 }
26 return nf_ct_netns_get(par->net, par->family); 27 return nf_ct_netns_get(par->net, par->family);
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 6f92d25590a8..c8674deed4eb 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -6,6 +6,8 @@
6 * it under the terms of the GNU General Public License version 2 (or any 6 * it under the terms of the GNU General Public License version 2 (or any
7 * later at your option) as published by the Free Software Foundation. 7 * later at your option) as published by the Free Software Foundation.
8 */ 8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/skbuff.h> 12#include <linux/skbuff.h>
11 13
@@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
39 41
40 nfacct = nfnl_acct_find_get(par->net, info->name); 42 nfacct = nfnl_acct_find_get(par->net, info->name);
41 if (nfacct == NULL) { 43 if (nfacct == NULL) {
42 pr_info("xt_nfacct: accounting object with name `%s' " 44 pr_info_ratelimited("accounting object `%s' does not exists\n",
43 "does not exists\n", info->name); 45 info->name);
44 return -ENOENT; 46 return -ENOENT;
45 } 47 }
46 info->nfacct = nfacct; 48 info->nfacct = nfacct;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index bb33598e4530..9d6d67b953ac 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
107 info->invert & XT_PHYSDEV_OP_BRIDGED) && 107 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
108 par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | 108 par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
109 (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { 109 (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
110 pr_info("using --physdev-out and --physdev-is-out are only " 110 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
111 "supported in the FORWARD and POSTROUTING chains with "
112 "bridged traffic.\n");
113 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 111 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
114 return -EINVAL; 112 return -EINVAL;
115 } 113 }
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 5639fb03bdd9..13f8ccf946d6 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
132static int policy_mt_check(const struct xt_mtchk_param *par) 132static int policy_mt_check(const struct xt_mtchk_param *par)
133{ 133{
134 const struct xt_policy_info *info = par->matchinfo; 134 const struct xt_policy_info *info = par->matchinfo;
135 const char *errmsg = "neither incoming nor outgoing policy selected";
136
137 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
138 goto err;
135 139
136 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
137 pr_info("neither incoming nor outgoing policy selected\n");
138 return -EINVAL;
139 }
140 if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | 140 if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
141 (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { 141 (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
142 pr_info("output policy not valid in PREROUTING and INPUT\n"); 142 errmsg = "output policy not valid in PREROUTING and INPUT";
143 return -EINVAL; 143 goto err;
144 } 144 }
145 if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | 145 if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
146 (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { 146 (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
147 pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); 147 errmsg = "input policy not valid in POSTROUTING and OUTPUT";
148 return -EINVAL; 148 goto err;
149 } 149 }
150 if (info->len > XT_POLICY_MAX_ELEM) { 150 if (info->len > XT_POLICY_MAX_ELEM) {
151 pr_info("too many policy elements\n"); 151 errmsg = "too many policy elements";
152 return -EINVAL; 152 goto err;
153 } 153 }
154 return 0; 154 return 0;
155err:
156 pr_info_ratelimited("%s\n", errmsg);
157 return -EINVAL;
155} 158}
156 159
157static struct xt_match policy_mt_reg[] __read_mostly = { 160static struct xt_match policy_mt_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 245fa350a7a8..6d232d18faff 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
342 net_get_random_once(&hash_rnd, sizeof(hash_rnd)); 342 net_get_random_once(&hash_rnd, sizeof(hash_rnd));
343 343
344 if (info->check_set & ~XT_RECENT_VALID_FLAGS) { 344 if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
345 pr_info("Unsupported user space flags (%08x)\n", 345 pr_info_ratelimited("Unsupported userspace flags (%08x)\n",
346 info->check_set); 346 info->check_set);
347 return -EINVAL; 347 return -EINVAL;
348 } 348 }
349 if (hweight8(info->check_set & 349 if (hweight8(info->check_set &
@@ -357,8 +357,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
357 if ((info->check_set & XT_RECENT_REAP) && !info->seconds) 357 if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
358 return -EINVAL; 358 return -EINVAL;
359 if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { 359 if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
360 pr_info("hitcount (%u) is larger than allowed maximum (%u)\n", 360 pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n",
361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); 361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
362 return -EINVAL; 362 return -EINVAL;
363 } 363 }
364 if (info->name[0] == '\0' || 364 if (info->name[0] == '\0' ||
@@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
587 add = true; 587 add = true;
588 break; 588 break;
589 default: 589 default:
590 pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); 590 pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n");
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 593
@@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input,
601 succ = in4_pton(c, size, (void *)&addr, '\n', NULL); 601 succ = in4_pton(c, size, (void *)&addr, '\n', NULL);
602 } 602 }
603 603
604 if (!succ) { 604 if (!succ)
605 pr_info("illegal address written to procfs\n");
606 return -EINVAL; 605 return -EINVAL;
607 }
608 606
609 spin_lock_bh(&recent_lock); 607 spin_lock_bh(&recent_lock);
610 e = recent_entry_lookup(t, &addr, family, 0); 608 e = recent_entry_lookup(t, &addr, family, 0);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 16b6b11ee83f..6f4c5217d835 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
92 index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); 92 index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
93 93
94 if (index == IPSET_INVALID_ID) { 94 if (index == IPSET_INVALID_ID) {
95 pr_warn("Cannot find set identified by id %u to match\n", 95 pr_info_ratelimited("Cannot find set identified by id %u to match\n",
96 info->match_set.index); 96 info->match_set.index);
97 return -ENOENT; 97 return -ENOENT;
98 } 98 }
99 if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) { 99 if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
100 pr_warn("Protocol error: set match dimension is over the limit!\n"); 100 pr_info_ratelimited("set match dimension is over the limit!\n");
101 ip_set_nfnl_put(par->net, info->match_set.index); 101 ip_set_nfnl_put(par->net, info->match_set.index);
102 return -ERANGE; 102 return -ERANGE;
103 } 103 }
@@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par)
143 index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); 143 index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
144 144
145 if (index == IPSET_INVALID_ID) { 145 if (index == IPSET_INVALID_ID) {
146 pr_warn("Cannot find set identified by id %u to match\n", 146 pr_info_ratelimited("Cannot find set identified by id %u to match\n",
147 info->match_set.index); 147 info->match_set.index);
148 return -ENOENT; 148 return -ENOENT;
149 } 149 }
150 if (info->match_set.dim > IPSET_DIM_MAX) { 150 if (info->match_set.dim > IPSET_DIM_MAX) {
151 pr_warn("Protocol error: set match dimension is over the limit!\n"); 151 pr_info_ratelimited("set match dimension is over the limit!\n");
152 ip_set_nfnl_put(par->net, info->match_set.index); 152 ip_set_nfnl_put(par->net, info->match_set.index);
153 return -ERANGE; 153 return -ERANGE;
154 } 154 }
@@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
241 if (info->add_set.index != IPSET_INVALID_ID) { 241 if (info->add_set.index != IPSET_INVALID_ID) {
242 index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); 242 index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
243 if (index == IPSET_INVALID_ID) { 243 if (index == IPSET_INVALID_ID) {
244 pr_warn("Cannot find add_set index %u as target\n", 244 pr_info_ratelimited("Cannot find add_set index %u as target\n",
245 info->add_set.index); 245 info->add_set.index);
246 return -ENOENT; 246 return -ENOENT;
247 } 247 }
248 } 248 }
@@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
250 if (info->del_set.index != IPSET_INVALID_ID) { 250 if (info->del_set.index != IPSET_INVALID_ID) {
251 index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); 251 index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
252 if (index == IPSET_INVALID_ID) { 252 if (index == IPSET_INVALID_ID) {
253 pr_warn("Cannot find del_set index %u as target\n", 253 pr_info_ratelimited("Cannot find del_set index %u as target\n",
254 info->del_set.index); 254 info->del_set.index);
255 if (info->add_set.index != IPSET_INVALID_ID) 255 if (info->add_set.index != IPSET_INVALID_ID)
256 ip_set_nfnl_put(par->net, info->add_set.index); 256 ip_set_nfnl_put(par->net, info->add_set.index);
257 return -ENOENT; 257 return -ENOENT;
@@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
259 } 259 }
260 if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 || 260 if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
261 info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) { 261 info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
262 pr_warn("Protocol error: SET target dimension is over the limit!\n"); 262 pr_info_ratelimited("SET target dimension over the limit!\n");
263 if (info->add_set.index != IPSET_INVALID_ID) 263 if (info->add_set.index != IPSET_INVALID_ID)
264 ip_set_nfnl_put(par->net, info->add_set.index); 264 ip_set_nfnl_put(par->net, info->add_set.index);
265 if (info->del_set.index != IPSET_INVALID_ID) 265 if (info->del_set.index != IPSET_INVALID_ID)
@@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
316 if (info->add_set.index != IPSET_INVALID_ID) { 316 if (info->add_set.index != IPSET_INVALID_ID) {
317 index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); 317 index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
318 if (index == IPSET_INVALID_ID) { 318 if (index == IPSET_INVALID_ID) {
319 pr_warn("Cannot find add_set index %u as target\n", 319 pr_info_ratelimited("Cannot find add_set index %u as target\n",
320 info->add_set.index); 320 info->add_set.index);
321 return -ENOENT; 321 return -ENOENT;
322 } 322 }
323 } 323 }
@@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
325 if (info->del_set.index != IPSET_INVALID_ID) { 325 if (info->del_set.index != IPSET_INVALID_ID) {
326 index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); 326 index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
327 if (index == IPSET_INVALID_ID) { 327 if (index == IPSET_INVALID_ID) {
328 pr_warn("Cannot find del_set index %u as target\n", 328 pr_info_ratelimited("Cannot find del_set index %u as target\n",
329 info->del_set.index); 329 info->del_set.index);
330 if (info->add_set.index != IPSET_INVALID_ID) 330 if (info->add_set.index != IPSET_INVALID_ID)
331 ip_set_nfnl_put(par->net, info->add_set.index); 331 ip_set_nfnl_put(par->net, info->add_set.index);
332 return -ENOENT; 332 return -ENOENT;
@@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
334 } 334 }
335 if (info->add_set.dim > IPSET_DIM_MAX || 335 if (info->add_set.dim > IPSET_DIM_MAX ||
336 info->del_set.dim > IPSET_DIM_MAX) { 336 info->del_set.dim > IPSET_DIM_MAX) {
337 pr_warn("Protocol error: SET target dimension is over the limit!\n"); 337 pr_info_ratelimited("SET target dimension over the limit!\n");
338 if (info->add_set.index != IPSET_INVALID_ID) 338 if (info->add_set.index != IPSET_INVALID_ID)
339 ip_set_nfnl_put(par->net, info->add_set.index); 339 ip_set_nfnl_put(par->net, info->add_set.index);
340 if (info->del_set.index != IPSET_INVALID_ID) 340 if (info->del_set.index != IPSET_INVALID_ID)
@@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
444 index = ip_set_nfnl_get_byindex(par->net, 444 index = ip_set_nfnl_get_byindex(par->net,
445 info->add_set.index); 445 info->add_set.index);
446 if (index == IPSET_INVALID_ID) { 446 if (index == IPSET_INVALID_ID) {
447 pr_warn("Cannot find add_set index %u as target\n", 447 pr_info_ratelimited("Cannot find add_set index %u as target\n",
448 info->add_set.index); 448 info->add_set.index);
449 return -ENOENT; 449 return -ENOENT;
450 } 450 }
451 } 451 }
@@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
454 index = ip_set_nfnl_get_byindex(par->net, 454 index = ip_set_nfnl_get_byindex(par->net,
455 info->del_set.index); 455 info->del_set.index);
456 if (index == IPSET_INVALID_ID) { 456 if (index == IPSET_INVALID_ID) {
457 pr_warn("Cannot find del_set index %u as target\n", 457 pr_info_ratelimited("Cannot find del_set index %u as target\n",
458 info->del_set.index); 458 info->del_set.index);
459 if (info->add_set.index != IPSET_INVALID_ID) 459 if (info->add_set.index != IPSET_INVALID_ID)
460 ip_set_nfnl_put(par->net, 460 ip_set_nfnl_put(par->net,
461 info->add_set.index); 461 info->add_set.index);
@@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
465 465
466 if (info->map_set.index != IPSET_INVALID_ID) { 466 if (info->map_set.index != IPSET_INVALID_ID) {
467 if (strncmp(par->table, "mangle", 7)) { 467 if (strncmp(par->table, "mangle", 7)) {
468 pr_warn("--map-set only usable from mangle table\n"); 468 pr_info_ratelimited("--map-set only usable from mangle table\n");
469 return -EINVAL; 469 return -EINVAL;
470 } 470 }
471 if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | 471 if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
@@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
473 !(par->hook_mask & (1 << NF_INET_FORWARD | 473 !(par->hook_mask & (1 << NF_INET_FORWARD |
474 1 << NF_INET_LOCAL_OUT | 474 1 << NF_INET_LOCAL_OUT |
475 1 << NF_INET_POST_ROUTING))) { 475 1 << NF_INET_POST_ROUTING))) {
476 pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); 476 pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
477 return -EINVAL; 477 return -EINVAL;
478 } 478 }
479 index = ip_set_nfnl_get_byindex(par->net, 479 index = ip_set_nfnl_get_byindex(par->net,
480 info->map_set.index); 480 info->map_set.index);
481 if (index == IPSET_INVALID_ID) { 481 if (index == IPSET_INVALID_ID) {
482 pr_warn("Cannot find map_set index %u as target\n", 482 pr_info_ratelimited("Cannot find map_set index %u as target\n",
483 info->map_set.index); 483 info->map_set.index);
484 if (info->add_set.index != IPSET_INVALID_ID) 484 if (info->add_set.index != IPSET_INVALID_ID)
485 ip_set_nfnl_put(par->net, 485 ip_set_nfnl_put(par->net,
486 info->add_set.index); 486 info->add_set.index);
@@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
494 if (info->add_set.dim > IPSET_DIM_MAX || 494 if (info->add_set.dim > IPSET_DIM_MAX ||
495 info->del_set.dim > IPSET_DIM_MAX || 495 info->del_set.dim > IPSET_DIM_MAX ||
496 info->map_set.dim > IPSET_DIM_MAX) { 496 info->map_set.dim > IPSET_DIM_MAX) {
497 pr_warn("Protocol error: SET target dimension is over the limit!\n"); 497 pr_info_ratelimited("SET target dimension over the limit!\n");
498 if (info->add_set.index != IPSET_INVALID_ID) 498 if (info->add_set.index != IPSET_INVALID_ID)
499 ip_set_nfnl_put(par->net, info->add_set.index); 499 ip_set_nfnl_put(par->net, info->add_set.index);
500 if (info->del_set.index != IPSET_INVALID_ID) 500 if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 575d2153e3b8..2ac7f674d19b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par)
171 return err; 171 return err;
172 172
173 if (info->flags & ~XT_SOCKET_FLAGS_V1) { 173 if (info->flags & ~XT_SOCKET_FLAGS_V1) {
174 pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1); 174 pr_info_ratelimited("unknown flags 0x%x\n",
175 info->flags & ~XT_SOCKET_FLAGS_V1);
175 return -EINVAL; 176 return -EINVAL;
176 } 177 }
177 return 0; 178 return 0;
@@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par)
187 return err; 188 return err;
188 189
189 if (info->flags & ~XT_SOCKET_FLAGS_V2) { 190 if (info->flags & ~XT_SOCKET_FLAGS_V2) {
190 pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2); 191 pr_info_ratelimited("unknown flags 0x%x\n",
192 info->flags & ~XT_SOCKET_FLAGS_V2);
191 return -EINVAL; 193 return -EINVAL;
192 } 194 }
193 return 0; 195 return 0;
@@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par)
203 if (err) 205 if (err)
204 return err; 206 return err;
205 if (info->flags & ~XT_SOCKET_FLAGS_V3) { 207 if (info->flags & ~XT_SOCKET_FLAGS_V3) {
206 pr_info("unknown flags 0x%x\n", 208 pr_info_ratelimited("unknown flags 0x%x\n",
207 info->flags & ~XT_SOCKET_FLAGS_V3); 209 info->flags & ~XT_SOCKET_FLAGS_V3);
208 return -EINVAL; 210 return -EINVAL;
209 } 211 }
210 return 0; 212 return 0;
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index 5fbd79194d21..0b41c0befe3c 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par)
44 44
45 ret = nf_ct_netns_get(par->net, par->family); 45 ret = nf_ct_netns_get(par->net, par->family);
46 if (ret < 0) 46 if (ret < 0)
47 pr_info("cannot load conntrack support for proto=%u\n", 47 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
48 par->family); 48 par->family);
49 return ret; 49 return ret;
50} 50}
51 51
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 1b01eec1fbda..0160f505e337 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par)
235 235
236 if (info->daytime_start > XT_TIME_MAX_DAYTIME || 236 if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
237 info->daytime_stop > XT_TIME_MAX_DAYTIME) { 237 info->daytime_stop > XT_TIME_MAX_DAYTIME) {
238 pr_info("invalid argument - start or " 238 pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
239 "stop time greater than 23:59:59\n");
240 return -EDOM; 239 return -EDOM;
241 } 240 }
242 241
243 if (info->flags & ~XT_TIME_ALL_FLAGS) { 242 if (info->flags & ~XT_TIME_ALL_FLAGS) {
244 pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS); 243 pr_info_ratelimited("unknown flags 0x%x\n",
244 info->flags & ~XT_TIME_ALL_FLAGS);
245 return -EINVAL; 245 return -EINVAL;
246 } 246 }
247 247
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2ad445c1d27c..07e8478068f0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2308 if (cb->start) { 2308 if (cb->start) {
2309 ret = cb->start(cb); 2309 ret = cb->start(cb);
2310 if (ret) 2310 if (ret)
2311 goto error_unlock; 2311 goto error_put;
2312 } 2312 }
2313 2313
2314 nlk->cb_running = true; 2314 nlk->cb_running = true;
@@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2328 */ 2328 */
2329 return -EINTR; 2329 return -EINTR;
2330 2330
2331error_put:
2332 module_put(control->module);
2331error_unlock: 2333error_unlock:
2332 sock_put(sk); 2334 sock_put(sk);
2333 mutex_unlock(nlk->cb_mutex); 2335 mutex_unlock(nlk->cb_mutex);
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 367d8c027101..2ceefa183cee 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
149 149
150 pr_debug("uri: %s, len: %zu\n", uri, uri_len); 150 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
151 151
152 /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
153 if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
154 return NULL;
155
152 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); 156 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
153 if (sdreq == NULL) 157 if (sdreq == NULL)
154 return NULL; 158 return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index c0b83dc9d993..f018eafc2a0d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
61}; 61};
62 62
63static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { 63static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
64 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, 64 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
65 .len = U8_MAX - 4 },
65 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, 66 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
66}; 67};
67 68
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 94e190febfdd..2da3176bf792 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
224 if (rds_destroy_pending(conn)) 224 if (rds_destroy_pending(conn))
225 ret = -ENETDOWN; 225 ret = -ENETDOWN;
226 else 226 else
227 ret = trans->conn_alloc(conn, gfp); 227 ret = trans->conn_alloc(conn, GFP_ATOMIC);
228 if (ret) { 228 if (ret) {
229 rcu_read_unlock(); 229 rcu_read_unlock();
230 kfree(conn->c_path); 230 kfree(conn->c_path);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 42410e910aff..cf73dc006c3b 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -445,7 +445,7 @@ send_fragmentable:
445 (char *)&opt, sizeof(opt)); 445 (char *)&opt, sizeof(opt));
446 if (ret == 0) { 446 if (ret == 0) {
447 ret = kernel_sendmsg(conn->params.local->socket, &msg, 447 ret = kernel_sendmsg(conn->params.local->socket, &msg,
448 iov, 1, iov[0].iov_len); 448 iov, 2, len);
449 449
450 opt = IPV6_PMTUDISC_DO; 450 opt = IPV6_PMTUDISC_DO;
451 kernel_setsockopt(conn->params.local->socket, 451 kernel_setsockopt(conn->params.local->socket,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index cc21e8db25b0..9d45d8b56744 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -517,9 +517,10 @@ try_again:
517 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 517 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
518 sizeof(unsigned int), &id32); 518 sizeof(unsigned int), &id32);
519 } else { 519 } else {
520 unsigned long idl = call->user_call_ID;
521
520 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 522 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
521 sizeof(unsigned long), 523 sizeof(unsigned long), &idl);
522 &call->user_call_ID);
523 } 524 }
524 if (ret < 0) 525 if (ret < 0)
525 goto error_unlock_call; 526 goto error_unlock_call;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2bc1bc23d42e..247b7cc20c13 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -376,17 +376,12 @@ struct tcf_net {
376static unsigned int tcf_net_id; 376static unsigned int tcf_net_id;
377 377
378static int tcf_block_insert(struct tcf_block *block, struct net *net, 378static int tcf_block_insert(struct tcf_block *block, struct net *net,
379 u32 block_index, struct netlink_ext_ack *extack) 379 struct netlink_ext_ack *extack)
380{ 380{
381 struct tcf_net *tn = net_generic(net, tcf_net_id); 381 struct tcf_net *tn = net_generic(net, tcf_net_id);
382 int err;
383 382
384 err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, 383 return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
385 GFP_KERNEL); 384 GFP_KERNEL);
386 if (err)
387 return err;
388 block->index = block_index;
389 return 0;
390} 385}
391 386
392static void tcf_block_remove(struct tcf_block *block, struct net *net) 387static void tcf_block_remove(struct tcf_block *block, struct net *net)
@@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
397} 392}
398 393
399static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 394static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
395 u32 block_index,
400 struct netlink_ext_ack *extack) 396 struct netlink_ext_ack *extack)
401{ 397{
402 struct tcf_block *block; 398 struct tcf_block *block;
@@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
419 err = -ENOMEM; 415 err = -ENOMEM;
420 goto err_chain_create; 416 goto err_chain_create;
421 } 417 }
422 block->net = qdisc_net(q);
423 block->refcnt = 1; 418 block->refcnt = 1;
424 block->net = net; 419 block->net = net;
425 block->q = q; 420 block->index = block_index;
421
422 /* Don't store q pointer for blocks which are shared */
423 if (!tcf_block_shared(block))
424 block->q = q;
426 return block; 425 return block;
427 426
428err_chain_create: 427err_chain_create:
@@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
518 } 517 }
519 518
520 if (!block) { 519 if (!block) {
521 block = tcf_block_create(net, q, extack); 520 block = tcf_block_create(net, q, ei->block_index, extack);
522 if (IS_ERR(block)) 521 if (IS_ERR(block))
523 return PTR_ERR(block); 522 return PTR_ERR(block);
524 created = true; 523 created = true;
525 if (ei->block_index) { 524 if (tcf_block_shared(block)) {
526 err = tcf_block_insert(block, net, 525 err = tcf_block_insert(block, net, extack);
527 ei->block_index, extack);
528 if (err) 526 if (err)
529 goto err_block_insert; 527 goto err_block_insert;
530 } 528 }
@@ -1399,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1399 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 1397 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
1400 continue; 1398 continue;
1401 if (!tcf_chain_dump(chain, q, parent, skb, cb, 1399 if (!tcf_chain_dump(chain, q, parent, skb, cb,
1402 index_start, &index)) 1400 index_start, &index)) {
1401 err = -EMSGSIZE;
1403 break; 1402 break;
1403 }
1404 } 1404 }
1405 1405
1406 cb->args[0] = index; 1406 cb->args[0] = index;
1407 1407
1408out: 1408out:
1409 /* If we did no progress, the error (EMSGSIZE) is real */
1410 if (skb->len == 0 && err)
1411 return err;
1409 return skb->len; 1412 return skb->len;
1410} 1413}
1411 1414
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 6c7601a530e3..ed8b6a24b9e9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -96,7 +96,7 @@ struct tc_u_hnode {
96 96
97struct tc_u_common { 97struct tc_u_common {
98 struct tc_u_hnode __rcu *hlist; 98 struct tc_u_hnode __rcu *hlist;
99 struct tcf_block *block; 99 void *ptr;
100 int refcnt; 100 int refcnt;
101 struct idr handle_idr; 101 struct idr handle_idr;
102 struct hlist_node hnode; 102 struct hlist_node hnode;
@@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash;
330#define U32_HASH_SHIFT 10 330#define U32_HASH_SHIFT 10
331#define U32_HASH_SIZE (1 << U32_HASH_SHIFT) 331#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
332 332
333static void *tc_u_common_ptr(const struct tcf_proto *tp)
334{
335 struct tcf_block *block = tp->chain->block;
336
337 /* The block sharing is currently supported only
338 * for classless qdiscs. In that case we use block
339 * for tc_u_common identification. In case the
340 * block is not shared, block->q is a valid pointer
341 * and we can use that. That works for classful qdiscs.
342 */
343 if (tcf_block_shared(block))
344 return block;
345 else
346 return block->q;
347}
348
333static unsigned int tc_u_hash(const struct tcf_proto *tp) 349static unsigned int tc_u_hash(const struct tcf_proto *tp)
334{ 350{
335 return hash_ptr(tp->chain->block, U32_HASH_SHIFT); 351 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
336} 352}
337 353
338static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) 354static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
@@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
342 358
343 h = tc_u_hash(tp); 359 h = tc_u_hash(tp);
344 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { 360 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
345 if (tc->block == tp->chain->block) 361 if (tc->ptr == tc_u_common_ptr(tp))
346 return tc; 362 return tc;
347 } 363 }
348 return NULL; 364 return NULL;
@@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp)
371 kfree(root_ht); 387 kfree(root_ht);
372 return -ENOBUFS; 388 return -ENOBUFS;
373 } 389 }
374 tp_c->block = tp->chain->block; 390 tp_c->ptr = tc_u_common_ptr(tp);
375 INIT_HLIST_NODE(&tp_c->hnode); 391 INIT_HLIST_NODE(&tp_c->hnode);
376 idr_init(&tp_c->handle_idr); 392 idr_init(&tp_c->handle_idr);
377 393
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 291c97b07058..8f6c2e8c0953 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid)
81 case SCTP_CID_RECONF: 81 case SCTP_CID_RECONF:
82 return "RECONF"; 82 return "RECONF";
83 83
84 case SCTP_CID_I_DATA:
85 return "I_DATA";
86
87 case SCTP_CID_I_FWD_TSN:
88 return "I_FWD_TSN";
89
84 default: 90 default:
85 break; 91 break;
86 } 92 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 141c9c466ec1..0247cc432e02 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
897 rhl_for_each_entry_rcu(transport, tmp, list, node) 897 rhl_for_each_entry_rcu(transport, tmp, list, node)
898 if (transport->asoc->ep == t->asoc->ep) { 898 if (transport->asoc->ep == t->asoc->ep) {
899 rcu_read_unlock(); 899 rcu_read_unlock();
900 err = -EEXIST; 900 return -EEXIST;
901 goto out;
902 } 901 }
903 rcu_read_unlock(); 902 rcu_read_unlock();
904 903
905 err = rhltable_insert_key(&sctp_transport_hashtable, &arg, 904 err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
906 &t->node, sctp_hash_params); 905 &t->node, sctp_hash_params);
907
908out:
909 if (err) 906 if (err)
910 pr_err_once("insert transport fail, errno %d\n", err); 907 pr_err_once("insert transport fail, errno %d\n", err);
911 908
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index cedf672487f9..f799043abec9 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * This file is part of the SCTP kernel implementation 7 * This file is part of the SCTP kernel implementation
8 * 8 *
9 * These functions manipulate sctp tsn mapping array. 9 * This file contains sctp stream maniuplation primitives and helpers.
10 * 10 *
11 * This SCTP implementation is free software; 11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of 12 * you can redistribute it and/or modify it under the terms of
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 8c7cf8f08711..d3764c181299 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is part of the SCTP kernel implementation 4 * This file is part of the SCTP kernel implementation
5 * 5 *
6 * These functions manipulate sctp stream queue/scheduling. 6 * These functions implement sctp stream message interleaving, mostly
7 * including I-DATA and I-FORWARD-TSN chunks process.
7 * 8 *
8 * This SCTP implementation is free software; 9 * This SCTP implementation is free software;
9 * you can redistribute it and/or modify it under the terms of 10 * you can redistribute it and/or modify it under the terms of
@@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
954 __u32 freed = 0; 955 __u32 freed = 0;
955 __u16 needed; 956 __u16 needed;
956 957
957 if (chunk) { 958 needed = ntohs(chunk->chunk_hdr->length) -
958 needed = ntohs(chunk->chunk_hdr->length); 959 sizeof(struct sctp_idata_chunk);
959 needed -= sizeof(struct sctp_idata_chunk);
960 } else {
961 needed = SCTP_DEFAULT_MAXWINDOW;
962 }
963 960
964 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 961 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
965 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); 962 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
@@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
971 needed); 968 needed);
972 } 969 }
973 970
974 if (chunk && freed >= needed) 971 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
975 if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) 972 sctp_intl_start_pd(ulpq, gfp);
976 sctp_intl_start_pd(ulpq, gfp);
977 973
978 sk_mem_reclaim(asoc->base.sk); 974 sk_mem_reclaim(asoc->base.sk);
979} 975}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index c8001471da6c..3e3dce3d4c63 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -813,7 +813,7 @@ err_out:
813 return err; 813 return err;
814} 814}
815 815
816int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) 816int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
817{ 817{
818 int err; 818 int err;
819 char *name; 819 char *name;
@@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
835 835
836 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 836 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
837 837
838 rtnl_lock();
839 bearer = tipc_bearer_find(net, name); 838 bearer = tipc_bearer_find(net, name);
840 if (!bearer) { 839 if (!bearer)
841 rtnl_unlock();
842 return -EINVAL; 840 return -EINVAL;
843 }
844 841
845 bearer_disable(net, bearer); 842 bearer_disable(net, bearer);
846 rtnl_unlock();
847 843
848 return 0; 844 return 0;
849} 845}
850 846
851int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) 847int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
848{
849 int err;
850
851 rtnl_lock();
852 err = __tipc_nl_bearer_disable(skb, info);
853 rtnl_unlock();
854
855 return err;
856}
857
858int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
852{ 859{
853 int err; 860 int err;
854 char *bearer; 861 char *bearer;
@@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
890 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 897 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
891 } 898 }
892 899
900 return tipc_enable_bearer(net, bearer, domain, prio, attrs);
901}
902
903int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
904{
905 int err;
906
893 rtnl_lock(); 907 rtnl_lock();
894 err = tipc_enable_bearer(net, bearer, domain, prio, attrs); 908 err = __tipc_nl_bearer_enable(skb, info);
895 if (err) {
896 rtnl_unlock();
897 return err;
898 }
899 rtnl_unlock(); 909 rtnl_unlock();
900 910
901 return 0; 911 return err;
902} 912}
903 913
904int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) 914int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
@@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
944 return 0; 954 return 0;
945} 955}
946 956
947int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) 957int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
948{ 958{
949 int err; 959 int err;
950 char *name; 960 char *name;
@@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
965 return -EINVAL; 975 return -EINVAL;
966 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 976 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
967 977
968 rtnl_lock();
969 b = tipc_bearer_find(net, name); 978 b = tipc_bearer_find(net, name);
970 if (!b) { 979 if (!b)
971 rtnl_unlock();
972 return -EINVAL; 980 return -EINVAL;
973 }
974 981
975 if (attrs[TIPC_NLA_BEARER_PROP]) { 982 if (attrs[TIPC_NLA_BEARER_PROP]) {
976 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 983 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
977 984
978 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], 985 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
979 props); 986 props);
980 if (err) { 987 if (err)
981 rtnl_unlock();
982 return err; 988 return err;
983 }
984 989
985 if (props[TIPC_NLA_PROP_TOL]) 990 if (props[TIPC_NLA_PROP_TOL])
986 b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 991 b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
989 if (props[TIPC_NLA_PROP_WIN]) 994 if (props[TIPC_NLA_PROP_WIN])
990 b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 995 b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
991 } 996 }
992 rtnl_unlock();
993 997
994 return 0; 998 return 0;
995} 999}
996 1000
1001int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
1002{
1003 int err;
1004
1005 rtnl_lock();
1006 err = __tipc_nl_bearer_set(skb, info);
1007 rtnl_unlock();
1008
1009 return err;
1010}
1011
997static int __tipc_nl_add_media(struct tipc_nl_msg *msg, 1012static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
998 struct tipc_media *media, int nlflags) 1013 struct tipc_media *media, int nlflags)
999{ 1014{
@@ -1115,7 +1130,7 @@ err_out:
1115 return err; 1130 return err;
1116} 1131}
1117 1132
1118int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) 1133int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1119{ 1134{
1120 int err; 1135 int err;
1121 char *name; 1136 char *name;
@@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1133 return -EINVAL; 1148 return -EINVAL;
1134 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); 1149 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
1135 1150
1136 rtnl_lock();
1137 m = tipc_media_find(name); 1151 m = tipc_media_find(name);
1138 if (!m) { 1152 if (!m)
1139 rtnl_unlock();
1140 return -EINVAL; 1153 return -EINVAL;
1141 }
1142 1154
1143 if (attrs[TIPC_NLA_MEDIA_PROP]) { 1155 if (attrs[TIPC_NLA_MEDIA_PROP]) {
1144 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1156 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1145 1157
1146 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], 1158 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
1147 props); 1159 props);
1148 if (err) { 1160 if (err)
1149 rtnl_unlock();
1150 return err; 1161 return err;
1151 }
1152 1162
1153 if (props[TIPC_NLA_PROP_TOL]) 1163 if (props[TIPC_NLA_PROP_TOL])
1154 m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1164 m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1157 if (props[TIPC_NLA_PROP_WIN]) 1167 if (props[TIPC_NLA_PROP_WIN])
1158 m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1168 m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1159 } 1169 }
1160 rtnl_unlock();
1161 1170
1162 return 0; 1171 return 0;
1163} 1172}
1173
1174int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1175{
1176 int err;
1177
1178 rtnl_lock();
1179 err = __tipc_nl_media_set(skb, info);
1180 rtnl_unlock();
1181
1182 return err;
1183}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 42d6eeeb646d..a53613d95bc9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info;
188#endif 188#endif
189 189
190int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); 190int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
191int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
191int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); 192int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
193int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
192int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); 194int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
193int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); 195int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
194int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); 196int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
197int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
195int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); 198int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
196 199
197int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); 200int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
198int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); 201int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
199int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); 202int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
203int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
200 204
201int tipc_media_set_priority(const char *name, u32 new_value); 205int tipc_media_set_priority(const char *name, u32 new_value);
202int tipc_media_set_window(const char *name, u32 new_value); 206int tipc_media_set_window(const char *name, u32 new_value);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 719c5924b638..1a2fde0d6f61 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -200,7 +200,7 @@ out:
200 return skb->len; 200 return skb->len;
201} 201}
202 202
203int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) 203int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
204{ 204{
205 struct net *net = sock_net(skb->sk); 205 struct net *net = sock_net(skb->sk);
206 struct tipc_net *tn = net_generic(net, tipc_net_id); 206 struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
241 if (!tipc_addr_node_valid(addr)) 241 if (!tipc_addr_node_valid(addr))
242 return -EINVAL; 242 return -EINVAL;
243 243
244 rtnl_lock();
245 tipc_net_start(net, addr); 244 tipc_net_start(net, addr);
246 rtnl_unlock();
247 } 245 }
248 246
249 return 0; 247 return 0;
250} 248}
249
250int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
251{
252 int err;
253
254 rtnl_lock();
255 err = __tipc_nl_net_set(skb, info);
256 rtnl_unlock();
257
258 return err;
259}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index c7c254902873..c0306aa2374b 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net);
47 47
48int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 48int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
49int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 49int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
50int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
50 51
51#endif 52#endif
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index e48f0b2c01b9..4492cda45566 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
285 if (!trans_buf) 285 if (!trans_buf)
286 return -ENOMEM; 286 return -ENOMEM;
287 287
288 err = (*cmd->transcode)(cmd, trans_buf, msg);
289 if (err)
290 goto trans_out;
291
292 attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * 288 attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
293 sizeof(struct nlattr *), GFP_KERNEL); 289 sizeof(struct nlattr *), GFP_KERNEL);
294 if (!attrbuf) { 290 if (!attrbuf) {
@@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
296 goto trans_out; 292 goto trans_out;
297 } 293 }
298 294
299 err = nla_parse(attrbuf, tipc_genl_family.maxattr,
300 (const struct nlattr *)trans_buf->data,
301 trans_buf->len, NULL, NULL);
302 if (err)
303 goto parse_out;
304
305 doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 295 doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
306 if (!doit_buf) { 296 if (!doit_buf) {
307 err = -ENOMEM; 297 err = -ENOMEM;
308 goto parse_out; 298 goto attrbuf_out;
309 } 299 }
310 300
311 doit_buf->sk = msg->dst_sk;
312
313 memset(&info, 0, sizeof(info)); 301 memset(&info, 0, sizeof(info));
314 info.attrs = attrbuf; 302 info.attrs = attrbuf;
315 303
304 rtnl_lock();
305 err = (*cmd->transcode)(cmd, trans_buf, msg);
306 if (err)
307 goto doit_out;
308
309 err = nla_parse(attrbuf, tipc_genl_family.maxattr,
310 (const struct nlattr *)trans_buf->data,
311 trans_buf->len, NULL, NULL);
312 if (err)
313 goto doit_out;
314
315 doit_buf->sk = msg->dst_sk;
316
316 err = (*cmd->doit)(doit_buf, &info); 317 err = (*cmd->doit)(doit_buf, &info);
318doit_out:
319 rtnl_unlock();
317 320
318 kfree_skb(doit_buf); 321 kfree_skb(doit_buf);
319parse_out: 322attrbuf_out:
320 kfree(attrbuf); 323 kfree(attrbuf);
321trans_out: 324trans_out:
322 kfree_skb(trans_buf); 325 kfree_skb(trans_buf);
@@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
722 725
723 media = tipc_media_find(lc->name); 726 media = tipc_media_find(lc->name);
724 if (media) { 727 if (media) {
725 cmd->doit = &tipc_nl_media_set; 728 cmd->doit = &__tipc_nl_media_set;
726 return tipc_nl_compat_media_set(skb, msg); 729 return tipc_nl_compat_media_set(skb, msg);
727 } 730 }
728 731
729 bearer = tipc_bearer_find(msg->net, lc->name); 732 bearer = tipc_bearer_find(msg->net, lc->name);
730 if (bearer) { 733 if (bearer) {
731 cmd->doit = &tipc_nl_bearer_set; 734 cmd->doit = &__tipc_nl_bearer_set;
732 return tipc_nl_compat_bearer_set(skb, msg); 735 return tipc_nl_compat_bearer_set(skb, msg);
733 } 736 }
734 737
@@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
1089 return tipc_nl_compat_dumpit(&dump, msg); 1092 return tipc_nl_compat_dumpit(&dump, msg);
1090 case TIPC_CMD_ENABLE_BEARER: 1093 case TIPC_CMD_ENABLE_BEARER:
1091 msg->req_type = TIPC_TLV_BEARER_CONFIG; 1094 msg->req_type = TIPC_TLV_BEARER_CONFIG;
1092 doit.doit = tipc_nl_bearer_enable; 1095 doit.doit = __tipc_nl_bearer_enable;
1093 doit.transcode = tipc_nl_compat_bearer_enable; 1096 doit.transcode = tipc_nl_compat_bearer_enable;
1094 return tipc_nl_compat_doit(&doit, msg); 1097 return tipc_nl_compat_doit(&doit, msg);
1095 case TIPC_CMD_DISABLE_BEARER: 1098 case TIPC_CMD_DISABLE_BEARER:
1096 msg->req_type = TIPC_TLV_BEARER_NAME; 1099 msg->req_type = TIPC_TLV_BEARER_NAME;
1097 doit.doit = tipc_nl_bearer_disable; 1100 doit.doit = __tipc_nl_bearer_disable;
1098 doit.transcode = tipc_nl_compat_bearer_disable; 1101 doit.transcode = tipc_nl_compat_bearer_disable;
1099 return tipc_nl_compat_doit(&doit, msg); 1102 return tipc_nl_compat_doit(&doit, msg);
1100 case TIPC_CMD_SHOW_LINK_STATS: 1103 case TIPC_CMD_SHOW_LINK_STATS:
@@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
1148 return tipc_nl_compat_dumpit(&dump, msg); 1151 return tipc_nl_compat_dumpit(&dump, msg);
1149 case TIPC_CMD_SET_NODE_ADDR: 1152 case TIPC_CMD_SET_NODE_ADDR:
1150 msg->req_type = TIPC_TLV_NET_ADDR; 1153 msg->req_type = TIPC_TLV_NET_ADDR;
1151 doit.doit = tipc_nl_net_set; 1154 doit.doit = __tipc_nl_net_set;
1152 doit.transcode = tipc_nl_compat_net_set; 1155 doit.transcode = tipc_nl_compat_net_set;
1153 return tipc_nl_compat_doit(&doit, msg); 1156 return tipc_nl_compat_doit(&doit, msg);
1154 case TIPC_CMD_SET_NETID: 1157 case TIPC_CMD_SET_NETID:
1155 msg->req_type = TIPC_TLV_UNSIGNED; 1158 msg->req_type = TIPC_TLV_UNSIGNED;
1156 doit.doit = tipc_nl_net_set; 1159 doit.doit = __tipc_nl_net_set;
1157 doit.transcode = tipc_nl_compat_net_set; 1160 doit.transcode = tipc_nl_compat_net_set;
1158 return tipc_nl_compat_doit(&doit, msg); 1161 return tipc_nl_compat_doit(&doit, msg);
1159 case TIPC_CMD_GET_NETID: 1162 case TIPC_CMD_GET_NETID:
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index b0d5fcea47e7..e9b4b53ab53e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,8 +308,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
308 goto out; 308 goto out;
309 } 309 }
310 lock_sock(sk); 310 lock_sock(sk);
311 memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, 311 memcpy(crypto_info_aes_gcm_128->iv,
312 ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
312 TLS_CIPHER_AES_GCM_128_IV_SIZE); 313 TLS_CIPHER_AES_GCM_128_IV_SIZE);
314 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq,
315 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
313 release_sock(sk); 316 release_sock(sk);
314 if (copy_to_user(optval, 317 if (copy_to_user(optval,
315 crypto_info_aes_gcm_128, 318 crypto_info_aes_gcm_128,
@@ -375,7 +378,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
375 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 378 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
376 if (rc) { 379 if (rc) {
377 rc = -EFAULT; 380 rc = -EFAULT;
378 goto out; 381 goto err_crypto_info;
379 } 382 }
380 383
381 /* check version */ 384 /* check version */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d545e1d0dea2..2d465bdeccbc 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1825,7 +1825,7 @@ out:
1825} 1825}
1826 1826
1827/* We use paged skbs for stream sockets, and limit occupancy to 32768 1827/* We use paged skbs for stream sockets, and limit occupancy to 32768
1828 * bytes, and a minimun of a full page. 1828 * bytes, and a minimum of a full page.
1829 */ 1829 */
1830#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) 1830#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1831 1831
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 51aa55618ef7..b12da6ef3c12 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
170 enum nl80211_bss_scan_width scan_width; 170 enum nl80211_bss_scan_width scan_width;
171 struct ieee80211_supported_band *sband = 171 struct ieee80211_supported_band *sband =
172 rdev->wiphy.bands[setup->chandef.chan->band]; 172 rdev->wiphy.bands[setup->chandef.chan->band];
173 scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); 173
174 setup->basic_rates = ieee80211_mandatory_rates(sband, 174 if (setup->chandef.chan->band == NL80211_BAND_2GHZ) {
175 scan_width); 175 int i;
176
177 /*
178 * Older versions selected the mandatory rates for
179 * 2.4 GHz as well, but were broken in that only
180 * 1 Mbps was regarded as a mandatory rate. Keep
181 * using just 1 Mbps as the default basic rate for
182 * mesh to be interoperable with older versions.
183 */
184 for (i = 0; i < sband->n_bitrates; i++) {
185 if (sband->bitrates[i].bitrate == 10) {
186 setup->basic_rates = BIT(i);
187 break;
188 }
189 }
190 } else {
191 scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
192 setup->basic_rates = ieee80211_mandatory_rates(sband,
193 scan_width);
194 }
176 } 195 }
177 196
178 err = cfg80211_chandef_dfs_required(&rdev->wiphy, 197 err = cfg80211_chandef_dfs_required(&rdev->wiphy,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index fdb3646274a5..701cfd7acc1b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1032,6 +1032,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
1032 wdev->current_bss = NULL; 1032 wdev->current_bss = NULL;
1033 wdev->ssid_len = 0; 1033 wdev->ssid_len = 0;
1034 wdev->conn_owner_nlportid = 0; 1034 wdev->conn_owner_nlportid = 0;
1035 kzfree(wdev->connect_keys);
1036 wdev->connect_keys = NULL;
1035 1037
1036 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); 1038 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
1037 1039
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 6f9e4ce568cd..9bb0a7f2863e 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -18,6 +18,7 @@
18#include <linux/cred.h> 18#include <linux/cred.h>
19#include <linux/key-type.h> 19#include <linux/key-type.h>
20#include <linux/digsig.h> 20#include <linux/digsig.h>
21#include <linux/vmalloc.h>
21#include <crypto/public_key.h> 22#include <crypto/public_key.h>
22#include <keys/system_keyring.h> 23#include <keys/system_keyring.h>
23 24
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 929e14978c42..fa728f662a6f 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -22,6 +22,13 @@
22#include <keys/big_key-type.h> 22#include <keys/big_key-type.h>
23#include <crypto/aead.h> 23#include <crypto/aead.h>
24 24
25struct big_key_buf {
26 unsigned int nr_pages;
27 void *virt;
28 struct scatterlist *sg;
29 struct page *pages[];
30};
31
25/* 32/*
26 * Layout of key payload words. 33 * Layout of key payload words.
27 */ 34 */
@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock);
91/* 98/*
92 * Encrypt/decrypt big_key data 99 * Encrypt/decrypt big_key data
93 */ 100 */
94static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) 101static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
95{ 102{
96 int ret; 103 int ret;
97 struct scatterlist sgio;
98 struct aead_request *aead_req; 104 struct aead_request *aead_req;
99 /* We always use a zero nonce. The reason we can get away with this is 105 /* We always use a zero nonce. The reason we can get away with this is
100 * because we're using a different randomly generated key for every 106 * because we're using a different randomly generated key for every
@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
109 return -ENOMEM; 115 return -ENOMEM;
110 116
111 memset(zero_nonce, 0, sizeof(zero_nonce)); 117 memset(zero_nonce, 0, sizeof(zero_nonce));
112 sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0)); 118 aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
113 aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
114 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 119 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
115 aead_request_set_ad(aead_req, 0); 120 aead_request_set_ad(aead_req, 0);
116 121
@@ -130,21 +135,81 @@ error:
130} 135}
131 136
132/* 137/*
138 * Free up the buffer.
139 */
140static void big_key_free_buffer(struct big_key_buf *buf)
141{
142 unsigned int i;
143
144 if (buf->virt) {
145 memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
146 vunmap(buf->virt);
147 }
148
149 for (i = 0; i < buf->nr_pages; i++)
150 if (buf->pages[i])
151 __free_page(buf->pages[i]);
152
153 kfree(buf);
154}
155
156/*
157 * Allocate a buffer consisting of a set of pages with a virtual mapping
158 * applied over them.
159 */
160static void *big_key_alloc_buffer(size_t len)
161{
162 struct big_key_buf *buf;
163 unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
164 unsigned int i, l;
165
166 buf = kzalloc(sizeof(struct big_key_buf) +
167 sizeof(struct page) * npg +
168 sizeof(struct scatterlist) * npg,
169 GFP_KERNEL);
170 if (!buf)
171 return NULL;
172
173 buf->nr_pages = npg;
174 buf->sg = (void *)(buf->pages + npg);
175 sg_init_table(buf->sg, npg);
176
177 for (i = 0; i < buf->nr_pages; i++) {
178 buf->pages[i] = alloc_page(GFP_KERNEL);
179 if (!buf->pages[i])
180 goto nomem;
181
182 l = min_t(size_t, len, PAGE_SIZE);
183 sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
184 len -= l;
185 }
186
187 buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
188 if (!buf->virt)
189 goto nomem;
190
191 return buf;
192
193nomem:
194 big_key_free_buffer(buf);
195 return NULL;
196}
197
198/*
133 * Preparse a big key 199 * Preparse a big key
134 */ 200 */
135int big_key_preparse(struct key_preparsed_payload *prep) 201int big_key_preparse(struct key_preparsed_payload *prep)
136{ 202{
203 struct big_key_buf *buf;
137 struct path *path = (struct path *)&prep->payload.data[big_key_path]; 204 struct path *path = (struct path *)&prep->payload.data[big_key_path];
138 struct file *file; 205 struct file *file;
139 u8 *enckey; 206 u8 *enckey;
140 u8 *data = NULL;
141 ssize_t written; 207 ssize_t written;
142 size_t datalen = prep->datalen; 208 size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
143 int ret; 209 int ret;
144 210
145 ret = -EINVAL;
146 if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) 211 if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
147 goto error; 212 return -EINVAL;
148 213
149 /* Set an arbitrary quota */ 214 /* Set an arbitrary quota */
150 prep->quotalen = 16; 215 prep->quotalen = 16;
@@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
157 * 222 *
158 * File content is stored encrypted with randomly generated key. 223 * File content is stored encrypted with randomly generated key.
159 */ 224 */
160 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
161 loff_t pos = 0; 225 loff_t pos = 0;
162 226
163 data = kmalloc(enclen, GFP_KERNEL); 227 buf = big_key_alloc_buffer(enclen);
164 if (!data) 228 if (!buf)
165 return -ENOMEM; 229 return -ENOMEM;
166 memcpy(data, prep->data, datalen); 230 memcpy(buf->virt, prep->data, datalen);
167 231
168 /* generate random key */ 232 /* generate random key */
169 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); 233 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
176 goto err_enckey; 240 goto err_enckey;
177 241
178 /* encrypt aligned data */ 242 /* encrypt aligned data */
179 ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey); 243 ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
180 if (ret) 244 if (ret)
181 goto err_enckey; 245 goto err_enckey;
182 246
@@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
187 goto err_enckey; 251 goto err_enckey;
188 } 252 }
189 253
190 written = kernel_write(file, data, enclen, &pos); 254 written = kernel_write(file, buf->virt, enclen, &pos);
191 if (written != enclen) { 255 if (written != enclen) {
192 ret = written; 256 ret = written;
193 if (written >= 0) 257 if (written >= 0)
@@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
202 *path = file->f_path; 266 *path = file->f_path;
203 path_get(path); 267 path_get(path);
204 fput(file); 268 fput(file);
205 kzfree(data); 269 big_key_free_buffer(buf);
206 } else { 270 } else {
207 /* Just store the data in a buffer */ 271 /* Just store the data in a buffer */
208 void *data = kmalloc(datalen, GFP_KERNEL); 272 void *data = kmalloc(datalen, GFP_KERNEL);
@@ -220,7 +284,7 @@ err_fput:
220err_enckey: 284err_enckey:
221 kzfree(enckey); 285 kzfree(enckey);
222error: 286error:
223 kzfree(data); 287 big_key_free_buffer(buf);
224 return ret; 288 return ret;
225} 289}
226 290
@@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
298 return datalen; 362 return datalen;
299 363
300 if (datalen > BIG_KEY_FILE_THRESHOLD) { 364 if (datalen > BIG_KEY_FILE_THRESHOLD) {
365 struct big_key_buf *buf;
301 struct path *path = (struct path *)&key->payload.data[big_key_path]; 366 struct path *path = (struct path *)&key->payload.data[big_key_path];
302 struct file *file; 367 struct file *file;
303 u8 *data;
304 u8 *enckey = (u8 *)key->payload.data[big_key_data]; 368 u8 *enckey = (u8 *)key->payload.data[big_key_data];
305 size_t enclen = datalen + ENC_AUTHTAG_SIZE; 369 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
306 loff_t pos = 0; 370 loff_t pos = 0;
307 371
308 data = kmalloc(enclen, GFP_KERNEL); 372 buf = big_key_alloc_buffer(enclen);
309 if (!data) 373 if (!buf)
310 return -ENOMEM; 374 return -ENOMEM;
311 375
312 file = dentry_open(path, O_RDONLY, current_cred()); 376 file = dentry_open(path, O_RDONLY, current_cred());
@@ -316,26 +380,26 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
316 } 380 }
317 381
318 /* read file to kernel and decrypt */ 382 /* read file to kernel and decrypt */
319 ret = kernel_read(file, data, enclen, &pos); 383 ret = kernel_read(file, buf->virt, enclen, &pos);
320 if (ret >= 0 && ret != enclen) { 384 if (ret >= 0 && ret != enclen) {
321 ret = -EIO; 385 ret = -EIO;
322 goto err_fput; 386 goto err_fput;
323 } 387 }
324 388
325 ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); 389 ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
326 if (ret) 390 if (ret)
327 goto err_fput; 391 goto err_fput;
328 392
329 ret = datalen; 393 ret = datalen;
330 394
331 /* copy decrypted data to user */ 395 /* copy decrypted data to user */
332 if (copy_to_user(buffer, data, datalen) != 0) 396 if (copy_to_user(buffer, buf->virt, datalen) != 0)
333 ret = -EFAULT; 397 ret = -EFAULT;
334 398
335err_fput: 399err_fput:
336 fput(file); 400 fput(file);
337error: 401error:
338 kzfree(data); 402 big_key_free_buffer(buf);
339 } else { 403 } else {
340 ret = datalen; 404 ret = datalen;
341 if (copy_to_user(buffer, key->payload.data[big_key_data], 405 if (copy_to_user(buffer, key->payload.data[big_key_data],
diff --git a/sound/ac97/Kconfig b/sound/ac97/Kconfig
index f8a64e15e5bf..baa5f8ef89d2 100644
--- a/sound/ac97/Kconfig
+++ b/sound/ac97/Kconfig
@@ -5,7 +5,6 @@
5 5
6config AC97_BUS_NEW 6config AC97_BUS_NEW
7 tristate 7 tristate
8 select AC97
9 help 8 help
10 This is the new AC97 bus type, successor of AC97_BUS. The ported 9 This is the new AC97 bus type, successor of AC97_BUS. The ported
11 drivers which benefit from the AC97 automatic probing should "select" 10 drivers which benefit from the AC97 automatic probing should "select"
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 60db32785f62..04d4db44fae5 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1003,7 +1003,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1003{ 1003{
1004 struct snd_seq_client *client = file->private_data; 1004 struct snd_seq_client *client = file->private_data;
1005 int written = 0, len; 1005 int written = 0, len;
1006 int err = -EINVAL; 1006 int err;
1007 struct snd_seq_event event; 1007 struct snd_seq_event event;
1008 1008
1009 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) 1009 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1018,11 +1018,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1018 1018
1019 /* allocate the pool now if the pool is not allocated yet */ 1019 /* allocate the pool now if the pool is not allocated yet */
1020 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { 1020 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1021 if (snd_seq_pool_init(client->pool) < 0) 1021 mutex_lock(&client->ioctl_mutex);
1022 err = snd_seq_pool_init(client->pool);
1023 mutex_unlock(&client->ioctl_mutex);
1024 if (err < 0)
1022 return -ENOMEM; 1025 return -ENOMEM;
1023 } 1026 }
1024 1027
1025 /* only process whole events */ 1028 /* only process whole events */
1029 err = -EINVAL;
1026 while (count >= sizeof(struct snd_seq_event)) { 1030 while (count >= sizeof(struct snd_seq_event)) {
1027 /* Read in the event header from the user */ 1031 /* Read in the event header from the user */
1028 len = sizeof(event); 1032 len = sizeof(event);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 23475888192b..ce28f7ce64e6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3465,6 +3465,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
3465 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 3465 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
3466} 3466}
3467 3467
3468static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
3469 const struct hda_fixup *fix,
3470 int action)
3471{
3472 unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
3473 unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
3474
3475 if (cfg_headphone && cfg_headset_mic == 0x411111f0)
3476 snd_hda_codec_set_pincfg(codec, 0x19,
3477 (cfg_headphone & ~AC_DEFCFG_DEVICE) |
3478 (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
3479}
3480
3468static void alc269_fixup_hweq(struct hda_codec *codec, 3481static void alc269_fixup_hweq(struct hda_codec *codec,
3469 const struct hda_fixup *fix, int action) 3482 const struct hda_fixup *fix, int action)
3470{ 3483{
@@ -4972,6 +4985,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4972 } 4985 }
4973} 4986}
4974 4987
4988static void alc_fixup_tpt470_dock(struct hda_codec *codec,
4989 const struct hda_fixup *fix, int action)
4990{
4991 static const struct hda_pintbl pincfgs[] = {
4992 { 0x17, 0x21211010 }, /* dock headphone */
4993 { 0x19, 0x21a11010 }, /* dock mic */
4994 { }
4995 };
4996 struct alc_spec *spec = codec->spec;
4997
4998 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4999 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
5000 /* Enable DOCK device */
5001 snd_hda_codec_write(codec, 0x17, 0,
5002 AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
5003 /* Enable DOCK device */
5004 snd_hda_codec_write(codec, 0x19, 0,
5005 AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
5006 snd_hda_apply_pincfgs(codec, pincfgs);
5007 }
5008}
5009
4975static void alc_shutup_dell_xps13(struct hda_codec *codec) 5010static void alc_shutup_dell_xps13(struct hda_codec *codec)
4976{ 5011{
4977 struct alc_spec *spec = codec->spec; 5012 struct alc_spec *spec = codec->spec;
@@ -5351,6 +5386,7 @@ enum {
5351 ALC269_FIXUP_LIFEBOOK_EXTMIC, 5386 ALC269_FIXUP_LIFEBOOK_EXTMIC,
5352 ALC269_FIXUP_LIFEBOOK_HP_PIN, 5387 ALC269_FIXUP_LIFEBOOK_HP_PIN,
5353 ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, 5388 ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
5389 ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
5354 ALC269_FIXUP_AMIC, 5390 ALC269_FIXUP_AMIC,
5355 ALC269_FIXUP_DMIC, 5391 ALC269_FIXUP_DMIC,
5356 ALC269VB_FIXUP_AMIC, 5392 ALC269VB_FIXUP_AMIC,
@@ -5446,6 +5482,7 @@ enum {
5446 ALC700_FIXUP_INTEL_REFERENCE, 5482 ALC700_FIXUP_INTEL_REFERENCE,
5447 ALC274_FIXUP_DELL_BIND_DACS, 5483 ALC274_FIXUP_DELL_BIND_DACS,
5448 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 5484 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5485 ALC298_FIXUP_TPT470_DOCK,
5449}; 5486};
5450 5487
5451static const struct hda_fixup alc269_fixups[] = { 5488static const struct hda_fixup alc269_fixups[] = {
@@ -5556,6 +5593,10 @@ static const struct hda_fixup alc269_fixups[] = {
5556 .type = HDA_FIXUP_FUNC, 5593 .type = HDA_FIXUP_FUNC,
5557 .v.func = alc269_fixup_pincfg_no_hp_to_lineout, 5594 .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
5558 }, 5595 },
5596 [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
5597 .type = HDA_FIXUP_FUNC,
5598 .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
5599 },
5559 [ALC269_FIXUP_AMIC] = { 5600 [ALC269_FIXUP_AMIC] = {
5560 .type = HDA_FIXUP_PINS, 5601 .type = HDA_FIXUP_PINS,
5561 .v.pins = (const struct hda_pintbl[]) { 5602 .v.pins = (const struct hda_pintbl[]) {
@@ -6271,6 +6312,12 @@ static const struct hda_fixup alc269_fixups[] = {
6271 .chained = true, 6312 .chained = true,
6272 .chain_id = ALC274_FIXUP_DELL_BIND_DACS 6313 .chain_id = ALC274_FIXUP_DELL_BIND_DACS
6273 }, 6314 },
6315 [ALC298_FIXUP_TPT470_DOCK] = {
6316 .type = HDA_FIXUP_FUNC,
6317 .v.func = alc_fixup_tpt470_dock,
6318 .chained = true,
6319 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
6320 },
6274}; 6321};
6275 6322
6276static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6323static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6321,6 +6368,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6321 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6368 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6322 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6369 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6323 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6370 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6371 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6372 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6324 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6373 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6325 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6374 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6326 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6375 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6422,6 +6471,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6422 SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), 6471 SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
6423 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), 6472 SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
6424 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), 6473 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
6474 SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
6425 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 6475 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
6426 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), 6476 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
6427 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 6477 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
@@ -6450,8 +6500,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6450 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), 6500 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
6451 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 6501 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
6452 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 6502 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
6503 SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6504 SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6453 SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), 6505 SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
6454 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), 6506 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
6507 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
6508 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6509 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6510 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6511 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6512 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6455 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6513 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6456 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6514 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6457 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6515 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -6472,7 +6530,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6472 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), 6530 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
6473 SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), 6531 SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
6474 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), 6532 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
6533 SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6534 SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6535 SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6475 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6536 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6537 SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6538 SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6476 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 6539 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
6477 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 6540 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
6478 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ 6541 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -6735,6 +6798,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6735 {0x14, 0x90170110}, 6798 {0x14, 0x90170110},
6736 {0x21, 0x02211020}), 6799 {0x21, 0x02211020}),
6737 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 6800 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6801 {0x12, 0x90a60130},
6802 {0x14, 0x90170110},
6803 {0x14, 0x01011020},
6804 {0x21, 0x0221101f}),
6805 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6738 ALC256_STANDARD_PINS), 6806 ALC256_STANDARD_PINS),
6739 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, 6807 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
6740 {0x14, 0x90170110}, 6808 {0x14, 0x90170110},
@@ -6803,6 +6871,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6803 {0x12, 0x90a60120}, 6871 {0x12, 0x90a60120},
6804 {0x14, 0x90170110}, 6872 {0x14, 0x90170110},
6805 {0x21, 0x0321101f}), 6873 {0x21, 0x0321101f}),
6874 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
6875 {0x12, 0xb7a60130},
6876 {0x14, 0x90170110},
6877 {0x21, 0x04211020}),
6806 SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, 6878 SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
6807 ALC290_STANDARD_PINS, 6879 ALC290_STANDARD_PINS,
6808 {0x15, 0x04211040}, 6880 {0x15, 0x04211040},
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9afb8ab524c7..06b22624ab7a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
347 int validx, int *value_ret) 347 int validx, int *value_ret)
348{ 348{
349 struct snd_usb_audio *chip = cval->head.mixer->chip; 349 struct snd_usb_audio *chip = cval->head.mixer->chip;
350 unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ 350 /* enough space for one range */
351 unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
351 unsigned char *val; 352 unsigned char *val;
352 int idx = 0, ret, size; 353 int idx = 0, ret, val_size, size;
353 __u8 bRequest; 354 __u8 bRequest;
354 355
356 val_size = uac2_ctl_value_size(cval->val_type);
357
355 if (request == UAC_GET_CUR) { 358 if (request == UAC_GET_CUR) {
356 bRequest = UAC2_CS_CUR; 359 bRequest = UAC2_CS_CUR;
357 size = uac2_ctl_value_size(cval->val_type); 360 size = val_size;
358 } else { 361 } else {
359 bRequest = UAC2_CS_RANGE; 362 bRequest = UAC2_CS_RANGE;
360 size = sizeof(buf); 363 size = sizeof(__u16) + 3 * val_size;
361 } 364 }
362 365
363 memset(buf, 0, sizeof(buf)); 366 memset(buf, 0, sizeof(buf));
@@ -390,16 +393,17 @@ error:
390 val = buf + sizeof(__u16); 393 val = buf + sizeof(__u16);
391 break; 394 break;
392 case UAC_GET_MAX: 395 case UAC_GET_MAX:
393 val = buf + sizeof(__u16) * 2; 396 val = buf + sizeof(__u16) + val_size;
394 break; 397 break;
395 case UAC_GET_RES: 398 case UAC_GET_RES:
396 val = buf + sizeof(__u16) * 3; 399 val = buf + sizeof(__u16) + val_size * 2;
397 break; 400 break;
398 default: 401 default:
399 return -EINVAL; 402 return -EINVAL;
400 } 403 }
401 404
402 *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); 405 *value_ret = convert_signed_value(cval,
406 snd_usb_combine_bytes(val, val_size));
403 407
404 return 0; 408 return 0;
405} 409}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b9c9a19f9588..3cbfae6604f9 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -357,6 +357,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
357 357
358 alts = &iface->altsetting[1]; 358 alts = &iface->altsetting[1];
359 goto add_sync_ep; 359 goto add_sync_ep;
360 case USB_ID(0x1397, 0x0002):
361 ep = 0x81;
362 iface = usb_ifnum_to_if(dev, 1);
363
364 if (!iface || iface->num_altsetting == 0)
365 return -EINVAL;
366
367 alts = &iface->altsetting[1];
368 goto add_sync_ep;
360 369
361 } 370 }
362 if (attr == USB_ENDPOINT_SYNC_ASYNC && 371 if (attr == USB_ENDPOINT_SYNC_ASYNC &&
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a66ef5777887..ea8f3de92fa4 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1363,8 +1363,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1363 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1363 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1364 break; 1364 break;
1365 1365
1366 /* Amanero Combo384 USB interface with native DSD support */ 1366 /* Amanero Combo384 USB based DACs with native DSD support */
1367 case USB_ID(0x16d0, 0x071a): 1367 case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
1368 case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
1369 case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
1370 case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
1368 if (fp->altsetting == 2) { 1371 if (fp->altsetting == 2) {
1369 switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) { 1372 switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
1370 case 0x199: 1373 case 0x199:
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 637b7263cb86..833ed9a16adf 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char {
632#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) 632#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
633#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) 633#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
634 634
635#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
636
635/* Transactional Memory checkpointed state: 637/* Transactional Memory checkpointed state:
636 * This is all GPRs, all VSX regs and a subset of SPRs 638 * This is all GPRs, all VSX regs and a subset of SPRs
637 */ 639 */
diff --git a/tools/arch/s390/include/uapi/asm/unistd.h b/tools/arch/s390/include/uapi/asm/unistd.h
deleted file mode 100644
index 725120939051..000000000000
--- a/tools/arch/s390/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,412 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * S390 version
4 *
5 * Derived from "include/asm-i386/unistd.h"
6 */
7
8#ifndef _UAPI_ASM_S390_UNISTD_H_
9#define _UAPI_ASM_S390_UNISTD_H_
10
11/*
12 * This file contains the system call numbers.
13 */
14
15#define __NR_exit 1
16#define __NR_fork 2
17#define __NR_read 3
18#define __NR_write 4
19#define __NR_open 5
20#define __NR_close 6
21#define __NR_restart_syscall 7
22#define __NR_creat 8
23#define __NR_link 9
24#define __NR_unlink 10
25#define __NR_execve 11
26#define __NR_chdir 12
27#define __NR_mknod 14
28#define __NR_chmod 15
29#define __NR_lseek 19
30#define __NR_getpid 20
31#define __NR_mount 21
32#define __NR_umount 22
33#define __NR_ptrace 26
34#define __NR_alarm 27
35#define __NR_pause 29
36#define __NR_utime 30
37#define __NR_access 33
38#define __NR_nice 34
39#define __NR_sync 36
40#define __NR_kill 37
41#define __NR_rename 38
42#define __NR_mkdir 39
43#define __NR_rmdir 40
44#define __NR_dup 41
45#define __NR_pipe 42
46#define __NR_times 43
47#define __NR_brk 45
48#define __NR_signal 48
49#define __NR_acct 51
50#define __NR_umount2 52
51#define __NR_ioctl 54
52#define __NR_fcntl 55
53#define __NR_setpgid 57
54#define __NR_umask 60
55#define __NR_chroot 61
56#define __NR_ustat 62
57#define __NR_dup2 63
58#define __NR_getppid 64
59#define __NR_getpgrp 65
60#define __NR_setsid 66
61#define __NR_sigaction 67
62#define __NR_sigsuspend 72
63#define __NR_sigpending 73
64#define __NR_sethostname 74
65#define __NR_setrlimit 75
66#define __NR_getrusage 77
67#define __NR_gettimeofday 78
68#define __NR_settimeofday 79
69#define __NR_symlink 83
70#define __NR_readlink 85
71#define __NR_uselib 86
72#define __NR_swapon 87
73#define __NR_reboot 88
74#define __NR_readdir 89
75#define __NR_mmap 90
76#define __NR_munmap 91
77#define __NR_truncate 92
78#define __NR_ftruncate 93
79#define __NR_fchmod 94
80#define __NR_getpriority 96
81#define __NR_setpriority 97
82#define __NR_statfs 99
83#define __NR_fstatfs 100
84#define __NR_socketcall 102
85#define __NR_syslog 103
86#define __NR_setitimer 104
87#define __NR_getitimer 105
88#define __NR_stat 106
89#define __NR_lstat 107
90#define __NR_fstat 108
91#define __NR_lookup_dcookie 110
92#define __NR_vhangup 111
93#define __NR_idle 112
94#define __NR_wait4 114
95#define __NR_swapoff 115
96#define __NR_sysinfo 116
97#define __NR_ipc 117
98#define __NR_fsync 118
99#define __NR_sigreturn 119
100#define __NR_clone 120
101#define __NR_setdomainname 121
102#define __NR_uname 122
103#define __NR_adjtimex 124
104#define __NR_mprotect 125
105#define __NR_sigprocmask 126
106#define __NR_create_module 127
107#define __NR_init_module 128
108#define __NR_delete_module 129
109#define __NR_get_kernel_syms 130
110#define __NR_quotactl 131
111#define __NR_getpgid 132
112#define __NR_fchdir 133
113#define __NR_bdflush 134
114#define __NR_sysfs 135
115#define __NR_personality 136
116#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
117#define __NR_getdents 141
118#define __NR_flock 143
119#define __NR_msync 144
120#define __NR_readv 145
121#define __NR_writev 146
122#define __NR_getsid 147
123#define __NR_fdatasync 148
124#define __NR__sysctl 149
125#define __NR_mlock 150
126#define __NR_munlock 151
127#define __NR_mlockall 152
128#define __NR_munlockall 153
129#define __NR_sched_setparam 154
130#define __NR_sched_getparam 155
131#define __NR_sched_setscheduler 156
132#define __NR_sched_getscheduler 157
133#define __NR_sched_yield 158
134#define __NR_sched_get_priority_max 159
135#define __NR_sched_get_priority_min 160
136#define __NR_sched_rr_get_interval 161
137#define __NR_nanosleep 162
138#define __NR_mremap 163
139#define __NR_query_module 167
140#define __NR_poll 168
141#define __NR_nfsservctl 169
142#define __NR_prctl 172
143#define __NR_rt_sigreturn 173
144#define __NR_rt_sigaction 174
145#define __NR_rt_sigprocmask 175
146#define __NR_rt_sigpending 176
147#define __NR_rt_sigtimedwait 177
148#define __NR_rt_sigqueueinfo 178
149#define __NR_rt_sigsuspend 179
150#define __NR_pread64 180
151#define __NR_pwrite64 181
152#define __NR_getcwd 183
153#define __NR_capget 184
154#define __NR_capset 185
155#define __NR_sigaltstack 186
156#define __NR_sendfile 187
157#define __NR_getpmsg 188
158#define __NR_putpmsg 189
159#define __NR_vfork 190
160#define __NR_pivot_root 217
161#define __NR_mincore 218
162#define __NR_madvise 219
163#define __NR_getdents64 220
164#define __NR_readahead 222
165#define __NR_setxattr 224
166#define __NR_lsetxattr 225
167#define __NR_fsetxattr 226
168#define __NR_getxattr 227
169#define __NR_lgetxattr 228
170#define __NR_fgetxattr 229
171#define __NR_listxattr 230
172#define __NR_llistxattr 231
173#define __NR_flistxattr 232
174#define __NR_removexattr 233
175#define __NR_lremovexattr 234
176#define __NR_fremovexattr 235
177#define __NR_gettid 236
178#define __NR_tkill 237
179#define __NR_futex 238
180#define __NR_sched_setaffinity 239
181#define __NR_sched_getaffinity 240
182#define __NR_tgkill 241
183/* Number 242 is reserved for tux */
184#define __NR_io_setup 243
185#define __NR_io_destroy 244
186#define __NR_io_getevents 245
187#define __NR_io_submit 246
188#define __NR_io_cancel 247
189#define __NR_exit_group 248
190#define __NR_epoll_create 249
191#define __NR_epoll_ctl 250
192#define __NR_epoll_wait 251
193#define __NR_set_tid_address 252
194#define __NR_fadvise64 253
195#define __NR_timer_create 254
196#define __NR_timer_settime 255
197#define __NR_timer_gettime 256
198#define __NR_timer_getoverrun 257
199#define __NR_timer_delete 258
200#define __NR_clock_settime 259
201#define __NR_clock_gettime 260
202#define __NR_clock_getres 261
203#define __NR_clock_nanosleep 262
204/* Number 263 is reserved for vserver */
205#define __NR_statfs64 265
206#define __NR_fstatfs64 266
207#define __NR_remap_file_pages 267
208#define __NR_mbind 268
209#define __NR_get_mempolicy 269
210#define __NR_set_mempolicy 270
211#define __NR_mq_open 271
212#define __NR_mq_unlink 272
213#define __NR_mq_timedsend 273
214#define __NR_mq_timedreceive 274
215#define __NR_mq_notify 275
216#define __NR_mq_getsetattr 276
217#define __NR_kexec_load 277
218#define __NR_add_key 278
219#define __NR_request_key 279
220#define __NR_keyctl 280
221#define __NR_waitid 281
222#define __NR_ioprio_set 282
223#define __NR_ioprio_get 283
224#define __NR_inotify_init 284
225#define __NR_inotify_add_watch 285
226#define __NR_inotify_rm_watch 286
227#define __NR_migrate_pages 287
228#define __NR_openat 288
229#define __NR_mkdirat 289
230#define __NR_mknodat 290
231#define __NR_fchownat 291
232#define __NR_futimesat 292
233#define __NR_unlinkat 294
234#define __NR_renameat 295
235#define __NR_linkat 296
236#define __NR_symlinkat 297
237#define __NR_readlinkat 298
238#define __NR_fchmodat 299
239#define __NR_faccessat 300
240#define __NR_pselect6 301
241#define __NR_ppoll 302
242#define __NR_unshare 303
243#define __NR_set_robust_list 304
244#define __NR_get_robust_list 305
245#define __NR_splice 306
246#define __NR_sync_file_range 307
247#define __NR_tee 308
248#define __NR_vmsplice 309
249#define __NR_move_pages 310
250#define __NR_getcpu 311
251#define __NR_epoll_pwait 312
252#define __NR_utimes 313
253#define __NR_fallocate 314
254#define __NR_utimensat 315
255#define __NR_signalfd 316
256#define __NR_timerfd 317
257#define __NR_eventfd 318
258#define __NR_timerfd_create 319
259#define __NR_timerfd_settime 320
260#define __NR_timerfd_gettime 321
261#define __NR_signalfd4 322
262#define __NR_eventfd2 323
263#define __NR_inotify_init1 324
264#define __NR_pipe2 325
265#define __NR_dup3 326
266#define __NR_epoll_create1 327
267#define __NR_preadv 328
268#define __NR_pwritev 329
269#define __NR_rt_tgsigqueueinfo 330
270#define __NR_perf_event_open 331
271#define __NR_fanotify_init 332
272#define __NR_fanotify_mark 333
273#define __NR_prlimit64 334
274#define __NR_name_to_handle_at 335
275#define __NR_open_by_handle_at 336
276#define __NR_clock_adjtime 337
277#define __NR_syncfs 338
278#define __NR_setns 339
279#define __NR_process_vm_readv 340
280#define __NR_process_vm_writev 341
281#define __NR_s390_runtime_instr 342
282#define __NR_kcmp 343
283#define __NR_finit_module 344
284#define __NR_sched_setattr 345
285#define __NR_sched_getattr 346
286#define __NR_renameat2 347
287#define __NR_seccomp 348
288#define __NR_getrandom 349
289#define __NR_memfd_create 350
290#define __NR_bpf 351
291#define __NR_s390_pci_mmio_write 352
292#define __NR_s390_pci_mmio_read 353
293#define __NR_execveat 354
294#define __NR_userfaultfd 355
295#define __NR_membarrier 356
296#define __NR_recvmmsg 357
297#define __NR_sendmmsg 358
298#define __NR_socket 359
299#define __NR_socketpair 360
300#define __NR_bind 361
301#define __NR_connect 362
302#define __NR_listen 363
303#define __NR_accept4 364
304#define __NR_getsockopt 365
305#define __NR_setsockopt 366
306#define __NR_getsockname 367
307#define __NR_getpeername 368
308#define __NR_sendto 369
309#define __NR_sendmsg 370
310#define __NR_recvfrom 371
311#define __NR_recvmsg 372
312#define __NR_shutdown 373
313#define __NR_mlock2 374
314#define __NR_copy_file_range 375
315#define __NR_preadv2 376
316#define __NR_pwritev2 377
317#define __NR_s390_guarded_storage 378
318#define __NR_statx 379
319#define __NR_s390_sthyi 380
320#define NR_syscalls 381
321
322/*
323 * There are some system calls that are not present on 64 bit, some
324 * have a different name although they do the same (e.g. __NR_chown32
325 * is __NR_chown on 64 bit).
326 */
327#ifndef __s390x__
328
329#define __NR_time 13
330#define __NR_lchown 16
331#define __NR_setuid 23
332#define __NR_getuid 24
333#define __NR_stime 25
334#define __NR_setgid 46
335#define __NR_getgid 47
336#define __NR_geteuid 49
337#define __NR_getegid 50
338#define __NR_setreuid 70
339#define __NR_setregid 71
340#define __NR_getrlimit 76
341#define __NR_getgroups 80
342#define __NR_setgroups 81
343#define __NR_fchown 95
344#define __NR_ioperm 101
345#define __NR_setfsuid 138
346#define __NR_setfsgid 139
347#define __NR__llseek 140
348#define __NR__newselect 142
349#define __NR_setresuid 164
350#define __NR_getresuid 165
351#define __NR_setresgid 170
352#define __NR_getresgid 171
353#define __NR_chown 182
354#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
355#define __NR_mmap2 192
356#define __NR_truncate64 193
357#define __NR_ftruncate64 194
358#define __NR_stat64 195
359#define __NR_lstat64 196
360#define __NR_fstat64 197
361#define __NR_lchown32 198
362#define __NR_getuid32 199
363#define __NR_getgid32 200
364#define __NR_geteuid32 201
365#define __NR_getegid32 202
366#define __NR_setreuid32 203
367#define __NR_setregid32 204
368#define __NR_getgroups32 205
369#define __NR_setgroups32 206
370#define __NR_fchown32 207
371#define __NR_setresuid32 208
372#define __NR_getresuid32 209
373#define __NR_setresgid32 210
374#define __NR_getresgid32 211
375#define __NR_chown32 212
376#define __NR_setuid32 213
377#define __NR_setgid32 214
378#define __NR_setfsuid32 215
379#define __NR_setfsgid32 216
380#define __NR_fcntl64 221
381#define __NR_sendfile64 223
382#define __NR_fadvise64_64 264
383#define __NR_fstatat64 293
384
385#else
386
387#define __NR_select 142
388#define __NR_getrlimit 191 /* SuS compliant getrlimit */
389#define __NR_lchown 198
390#define __NR_getuid 199
391#define __NR_getgid 200
392#define __NR_geteuid 201
393#define __NR_getegid 202
394#define __NR_setreuid 203
395#define __NR_setregid 204
396#define __NR_getgroups 205
397#define __NR_setgroups 206
398#define __NR_fchown 207
399#define __NR_setresuid 208
400#define __NR_getresuid 209
401#define __NR_setresgid 210
402#define __NR_getresgid 211
403#define __NR_chown 212
404#define __NR_setuid 213
405#define __NR_setgid 214
406#define __NR_setfsuid 215
407#define __NR_setfsgid 216
408#define __NR_newfstatat 293
409
410#endif
411
412#endif /* _UAPI_ASM_S390_UNISTD_H_ */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 1d9199e1c2ad..0dfe4d3f74e2 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -210,6 +210,7 @@
210 210
211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ 212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
213 214
214#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
215 216
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 3a0396d87c42..185acfa229b5 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv)
244 } 244 }
245 245
246 if (errno && errno != ENOENT) { 246 if (errno && errno != ENOENT) {
247 perror("reading batch file failed"); 247 p_err("reading batch file failed: %s", strerror(errno));
248 err = -1; 248 err = -1;
249 } else { 249 } else {
250 p_info("processed %d lines", lines); 250 p_info("processed %d lines", lines);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index e8e2baaf93c2..e549e329be82 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv)
774 n < 0 ? strerror(errno) : "short write"); 774 n < 0 ? strerror(errno) : "short write");
775 goto err_free; 775 goto err_free;
776 } 776 }
777
778 if (json_output)
779 jsonw_null(json_wtr);
777 } else { 780 } else {
778 if (member_len == &info.jited_prog_len) { 781 if (member_len == &info.jited_prog_len) {
779 const char *name = NULL; 782 const char *name = NULL;
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
index 860fa151640a..ffca068e4a76 100644
--- a/tools/cgroup/Makefile
+++ b/tools/cgroup/Makefile
@@ -1,7 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for cgroup tools 2# Makefile for cgroup tools
3 3
4CC = $(CROSS_COMPILE)gcc
5CFLAGS = -Wall -Wextra 4CFLAGS = -Wall -Wextra
6 5
7all: cgroup_event_listener 6all: cgroup_event_listener
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 805a2c0cf4cd..240eda014b37 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -12,8 +12,6 @@ endif
12# (this improves performance and avoids hard-to-debug behaviour); 12# (this improves performance and avoids hard-to-debug behaviour);
13MAKEFLAGS += -r 13MAKEFLAGS += -r
14 14
15CC = $(CROSS_COMPILE)gcc
16LD = $(CROSS_COMPILE)ld
17CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include 15CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
18 16
19ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon 17ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 1139d71fa0cf..5db5e62cebda 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,7 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for Hyper-V tools 2# Makefile for Hyper-V tools
3 3
4CC = $(CROSS_COMPILE)gcc
5WARNINGS = -Wall -Wextra 4WARNINGS = -Wall -Wextra
6CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS) 5CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
7 6
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index a08e7a47d6a3..332ed2f6c2c2 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -12,8 +12,6 @@ endif
12# (this improves performance and avoids hard-to-debug behaviour); 12# (this improves performance and avoids hard-to-debug behaviour);
13MAKEFLAGS += -r 13MAKEFLAGS += -r
14 14
15CC = $(CROSS_COMPILE)gcc
16LD = $(CROSS_COMPILE)ld
17CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include 15CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
18 16
19ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer 17ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index ac3c6503ca27..536ee4febd74 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
86 I915_MOCS_CACHED, 86 I915_MOCS_CACHED,
87}; 87};
88 88
89/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
89/* Each region is a minimum of 16k, and there are at most 255 of them. 145/* Each region is a minimum of 16k, and there are at most 255 of them.
90 */ 146 */
91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait {
450 */ 506 */
451#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 507#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
452 508
509/*
510 * Query whether every context (both per-file default and user created) is
511 * isolated (insofar as HW supports). If this parameter is not true, then
512 * freshly created contexts may inherit values from an existing context,
513 * rather than default HW values. If true, it also ensures (insofar as HW
514 * supports) that all state set by this context will not leak to any other
515 * context.
516 *
517 * As not every engine across every gen support contexts, the returned
518 * value reports the support of context isolation for individual engines by
519 * returning a bitmask of each engine class set to true if that class supports
520 * isolation.
521 */
522#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
523
524/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
525 * registers. This used to be fixed per platform but from CNL onwards, this
526 * might vary depending on the parts.
527 */
528#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
529
453typedef struct drm_i915_getparam { 530typedef struct drm_i915_getparam {
454 __s32 param; 531 __s32 param;
455 /* 532 /*
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 8616131e2c61..6d9447700e18 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -163,6 +163,7 @@ enum {
163 IFLA_IF_NETNSID, 163 IFLA_IF_NETNSID,
164 IFLA_CARRIER_UP_COUNT, 164 IFLA_CARRIER_UP_COUNT,
165 IFLA_CARRIER_DOWN_COUNT, 165 IFLA_CARRIER_DOWN_COUNT,
166 IFLA_NEW_IFINDEX,
166 __IFLA_MAX 167 __IFLA_MAX
167}; 168};
168 169
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 8fb90a0819c3..0fb5ef939732 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1362,6 +1362,96 @@ struct kvm_s390_ucas_mapping {
1362/* Available with KVM_CAP_S390_CMMA_MIGRATION */ 1362/* Available with KVM_CAP_S390_CMMA_MIGRATION */
1363#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log) 1363#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
1364#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log) 1364#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
1365/* Memory Encryption Commands */
1366#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
1367
1368struct kvm_enc_region {
1369 __u64 addr;
1370 __u64 size;
1371};
1372
1373#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
1374#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
1375
1376/* Secure Encrypted Virtualization command */
1377enum sev_cmd_id {
1378 /* Guest initialization commands */
1379 KVM_SEV_INIT = 0,
1380 KVM_SEV_ES_INIT,
1381 /* Guest launch commands */
1382 KVM_SEV_LAUNCH_START,
1383 KVM_SEV_LAUNCH_UPDATE_DATA,
1384 KVM_SEV_LAUNCH_UPDATE_VMSA,
1385 KVM_SEV_LAUNCH_SECRET,
1386 KVM_SEV_LAUNCH_MEASURE,
1387 KVM_SEV_LAUNCH_FINISH,
1388 /* Guest migration commands (outgoing) */
1389 KVM_SEV_SEND_START,
1390 KVM_SEV_SEND_UPDATE_DATA,
1391 KVM_SEV_SEND_UPDATE_VMSA,
1392 KVM_SEV_SEND_FINISH,
1393 /* Guest migration commands (incoming) */
1394 KVM_SEV_RECEIVE_START,
1395 KVM_SEV_RECEIVE_UPDATE_DATA,
1396 KVM_SEV_RECEIVE_UPDATE_VMSA,
1397 KVM_SEV_RECEIVE_FINISH,
1398 /* Guest status and debug commands */
1399 KVM_SEV_GUEST_STATUS,
1400 KVM_SEV_DBG_DECRYPT,
1401 KVM_SEV_DBG_ENCRYPT,
1402 /* Guest certificates commands */
1403 KVM_SEV_CERT_EXPORT,
1404
1405 KVM_SEV_NR_MAX,
1406};
1407
1408struct kvm_sev_cmd {
1409 __u32 id;
1410 __u64 data;
1411 __u32 error;
1412 __u32 sev_fd;
1413};
1414
1415struct kvm_sev_launch_start {
1416 __u32 handle;
1417 __u32 policy;
1418 __u64 dh_uaddr;
1419 __u32 dh_len;
1420 __u64 session_uaddr;
1421 __u32 session_len;
1422};
1423
1424struct kvm_sev_launch_update_data {
1425 __u64 uaddr;
1426 __u32 len;
1427};
1428
1429
1430struct kvm_sev_launch_secret {
1431 __u64 hdr_uaddr;
1432 __u32 hdr_len;
1433 __u64 guest_uaddr;
1434 __u32 guest_len;
1435 __u64 trans_uaddr;
1436 __u32 trans_len;
1437};
1438
1439struct kvm_sev_launch_measure {
1440 __u64 uaddr;
1441 __u32 len;
1442};
1443
1444struct kvm_sev_guest_status {
1445 __u32 handle;
1446 __u32 policy;
1447 __u32 state;
1448};
1449
1450struct kvm_sev_dbg {
1451 __u64 src_uaddr;
1452 __u64 dst_uaddr;
1453 __u32 len;
1454};
1365 1455
1366#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 1456#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
1367#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) 1457#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/tools/laptop/freefall/Makefile b/tools/laptop/freefall/Makefile
index 5f758c489a20..b572d94255f6 100644
--- a/tools/laptop/freefall/Makefile
+++ b/tools/laptop/freefall/Makefile
@@ -2,7 +2,6 @@
2PREFIX ?= /usr 2PREFIX ?= /usr
3SBINDIR ?= sbin 3SBINDIR ?= sbin
4INSTALL ?= install 4INSTALL ?= install
5CC = $(CROSS_COMPILE)gcc
6 5
7TARGET = freefall 6TARGET = freefall
8 7
diff --git a/tools/leds/Makefile b/tools/leds/Makefile
index c379af003807..7b6bed13daaa 100644
--- a/tools/leds/Makefile
+++ b/tools/leds/Makefile
@@ -1,7 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for LEDs tools 2# Makefile for LEDs tools
3 3
4CC = $(CROSS_COMPILE)gcc
5CFLAGS = -Wall -Wextra -g -I../../include/uapi 4CFLAGS = -Wall -Wextra -g -I../../include/uapi
6 5
7all: uledmon led_hw_brightness_mon 6all: uledmon led_hw_brightness_mon
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 97073d649c1a..5bbbf285af74 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1060 prog->insns = new_insn; 1060 prog->insns = new_insn;
1061 prog->main_prog_cnt = prog->insns_cnt; 1061 prog->main_prog_cnt = prog->insns_cnt;
1062 prog->insns_cnt = new_cnt; 1062 prog->insns_cnt = new_cnt;
1063 pr_debug("added %zd insn from %s to prog %s\n",
1064 text->insns_cnt, text->section_name,
1065 prog->section_name);
1063 } 1066 }
1064 insn = &prog->insns[relo->insn_idx]; 1067 insn = &prog->insns[relo->insn_idx];
1065 insn->imm += prog->main_prog_cnt - relo->insn_idx; 1068 insn->imm += prog->main_prog_cnt - relo->insn_idx;
1066 pr_debug("added %zd insn from %s to prog %s\n",
1067 text->insns_cnt, text->section_name, prog->section_name);
1068 return 0; 1069 return 0;
1069} 1070}
1070 1071
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b00b1896547e..a8cb69a26576 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -852,8 +852,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
852 * This is a fairly uncommon pattern which is new for GCC 6. As of this 852 * This is a fairly uncommon pattern which is new for GCC 6. As of this
853 * writing, there are 11 occurrences of it in the allmodconfig kernel. 853 * writing, there are 11 occurrences of it in the allmodconfig kernel.
854 * 854 *
855 * As of GCC 7 there are quite a few more of these and the 'in between' code
856 * is significant. Esp. with KASAN enabled some of the code between the mov
857 * and jmpq uses .rodata itself, which can confuse things.
858 *
855 * TODO: Once we have DWARF CFI and smarter instruction decoding logic, 859 * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
856 * ensure the same register is used in the mov and jump instructions. 860 * ensure the same register is used in the mov and jump instructions.
861 *
862 * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
857 */ 863 */
858static struct rela *find_switch_table(struct objtool_file *file, 864static struct rela *find_switch_table(struct objtool_file *file,
859 struct symbol *func, 865 struct symbol *func,
@@ -875,12 +881,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
875 text_rela->addend + 4); 881 text_rela->addend + 4);
876 if (!rodata_rela) 882 if (!rodata_rela)
877 return NULL; 883 return NULL;
884
878 file->ignore_unreachables = true; 885 file->ignore_unreachables = true;
879 return rodata_rela; 886 return rodata_rela;
880 } 887 }
881 888
882 /* case 3 */ 889 /* case 3 */
883 func_for_each_insn_continue_reverse(file, func, insn) { 890 /*
891 * Backward search using the @first_jump_src links, these help avoid
892 * much of the 'in between' code. Which avoids us getting confused by
893 * it.
894 */
895 for (insn = list_prev_entry(insn, list);
896
897 &insn->list != &file->insn_list &&
898 insn->sec == func->sec &&
899 insn->offset >= func->offset;
900
901 insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
902
884 if (insn->type == INSN_JUMP_DYNAMIC) 903 if (insn->type == INSN_JUMP_DYNAMIC)
885 break; 904 break;
886 905
@@ -910,14 +929,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
910 return NULL; 929 return NULL;
911} 930}
912 931
932
913static int add_func_switch_tables(struct objtool_file *file, 933static int add_func_switch_tables(struct objtool_file *file,
914 struct symbol *func) 934 struct symbol *func)
915{ 935{
916 struct instruction *insn, *prev_jump = NULL; 936 struct instruction *insn, *last = NULL, *prev_jump = NULL;
917 struct rela *rela, *prev_rela = NULL; 937 struct rela *rela, *prev_rela = NULL;
918 int ret; 938 int ret;
919 939
920 func_for_each_insn(file, func, insn) { 940 func_for_each_insn(file, func, insn) {
941 if (!last)
942 last = insn;
943
944 /*
945 * Store back-pointers for unconditional forward jumps such
946 * that find_switch_table() can back-track using those and
947 * avoid some potentially confusing code.
948 */
949 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
950 insn->offset > last->offset &&
951 insn->jump_dest->offset > insn->offset &&
952 !insn->jump_dest->first_jump_src) {
953
954 insn->jump_dest->first_jump_src = insn;
955 last = insn->jump_dest;
956 }
957
921 if (insn->type != INSN_JUMP_DYNAMIC) 958 if (insn->type != INSN_JUMP_DYNAMIC)
922 continue; 959 continue;
923 960
@@ -1899,13 +1936,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
1899 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 1936 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
1900 return true; 1937 return true;
1901 1938
1902 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) { 1939 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
1903 insn = insn->jump_dest; 1940 if (insn->jump_dest &&
1904 continue; 1941 insn->jump_dest->func == insn->func) {
1942 insn = insn->jump_dest;
1943 continue;
1944 }
1945
1946 break;
1905 } 1947 }
1906 1948
1907 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 1949 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
1908 break; 1950 break;
1951
1909 insn = list_next_entry(insn, list); 1952 insn = list_next_entry(insn, list);
1910 } 1953 }
1911 1954
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index dbadb304a410..23a1d065cae1 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -47,6 +47,7 @@ struct instruction {
47 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; 47 bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
48 struct symbol *call_dest; 48 struct symbol *call_dest;
49 struct instruction *jump_dest; 49 struct instruction *jump_dest;
50 struct instruction *first_jump_src;
50 struct list_head alts; 51 struct list_head alts;
51 struct symbol *func; 52 struct symbol *func;
52 struct stack_op stack_op; 53 struct stack_op stack_op;
diff --git a/tools/perf/Documentation/perf-data.txt b/tools/perf/Documentation/perf-data.txt
index f0796a47dfa3..90bb4aabe4f8 100644
--- a/tools/perf/Documentation/perf-data.txt
+++ b/tools/perf/Documentation/perf-data.txt
@@ -30,6 +30,10 @@ OPTIONS for 'convert'
30-i:: 30-i::
31 Specify input perf data file path. 31 Specify input perf data file path.
32 32
33-f::
34--force::
35 Don't complain, do it.
36
33-v:: 37-v::
34--verbose:: 38--verbose::
35 Be more verbose (show counter open errors, etc). 39 Be more verbose (show counter open errors, etc).
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 9b0351d3ce34..012328038594 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -146,12 +146,6 @@ define allow-override
146 $(eval $(1) = $(2))) 146 $(eval $(1) = $(2)))
147endef 147endef
148 148
149# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
150$(call allow-override,CC,$(CROSS_COMPILE)gcc)
151$(call allow-override,AR,$(CROSS_COMPILE)ar)
152$(call allow-override,LD,$(CROSS_COMPILE)ld)
153$(call allow-override,CXX,$(CROSS_COMPILE)g++)
154
155LD += $(EXTRA_LDFLAGS) 149LD += $(EXTRA_LDFLAGS)
156 150
157HOSTCC ?= gcc 151HOSTCC ?= gcc
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 48228de415d0..dfa6e3103437 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -10,15 +10,19 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
10 10
11out := $(OUTPUT)arch/s390/include/generated/asm 11out := $(OUTPUT)arch/s390/include/generated/asm
12header := $(out)/syscalls_64.c 12header := $(out)/syscalls_64.c
13sysdef := $(srctree)/tools/arch/s390/include/uapi/asm/unistd.h 13syskrn := $(srctree)/arch/s390/kernel/syscalls/syscall.tbl
14sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls/ 14sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
15sysdef := $(sysprf)/syscall.tbl
15systbl := $(sysprf)/mksyscalltbl 16systbl := $(sysprf)/mksyscalltbl
16 17
17# Create output directory if not already present 18# Create output directory if not already present
18_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') 19_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
19 20
20$(header): $(sysdef) $(systbl) 21$(header): $(sysdef) $(systbl)
21 $(Q)$(SHELL) '$(systbl)' '$(CC)' $(sysdef) > $@ 22 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
23 (diff -B $(sysdef) $(syskrn) >/dev/null) \
24 || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
25 $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
22 26
23clean:: 27clean::
24 $(call QUIET_CLEAN, s390) $(RM) $(header) 28 $(call QUIET_CLEAN, s390) $(RM) $(header)
diff --git a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
index 7fa0d0abd419..72ecbb676370 100755
--- a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
@@ -3,25 +3,23 @@
3# 3#
4# Generate system call table for perf 4# Generate system call table for perf
5# 5#
6# 6# Copyright IBM Corp. 2017, 2018
7# Copyright IBM Corp. 2017
8# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9# 8#
10 9
11gcc=$1 10SYSCALL_TBL=$1
12input=$2
13 11
14if ! test -r $input; then 12if ! test -r $SYSCALL_TBL; then
15 echo "Could not read input file" >&2 13 echo "Could not read input file" >&2
16 exit 1 14 exit 1
17fi 15fi
18 16
19create_table() 17create_table()
20{ 18{
21 local max_nr 19 local max_nr nr abi sc discard
22 20
23 echo 'static const char *syscalltbl_s390_64[] = {' 21 echo 'static const char *syscalltbl_s390_64[] = {'
24 while read sc nr; do 22 while read nr abi sc discard; do
25 printf '\t[%d] = "%s",\n' $nr $sc 23 printf '\t[%d] = "%s",\n' $nr $sc
26 max_nr=$nr 24 max_nr=$nr
27 done 25 done
@@ -29,8 +27,6 @@ create_table()
29 echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr" 27 echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
30} 28}
31 29
32 30grep -E "^[[:digit:]]+[[:space:]]+(common|64)" $SYSCALL_TBL \
33$gcc -m64 -E -dM -x c $input \ 31 |sort -k1 -n \
34 |sed -ne 's/^#define __NR_//p' \
35 |sort -t' ' -k2 -nu \
36 |create_table 32 |create_table
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..b38d48464368
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -0,0 +1,390 @@
1# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2#
3# System call table for s390
4#
5# Format:
6#
7# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
8#
9# where <abi> can be common, 64, or 32
10
111 common exit sys_exit sys_exit
122 common fork sys_fork sys_fork
133 common read sys_read compat_sys_s390_read
144 common write sys_write compat_sys_s390_write
155 common open sys_open compat_sys_open
166 common close sys_close sys_close
177 common restart_syscall sys_restart_syscall sys_restart_syscall
188 common creat sys_creat compat_sys_creat
199 common link sys_link compat_sys_link
2010 common unlink sys_unlink compat_sys_unlink
2111 common execve sys_execve compat_sys_execve
2212 common chdir sys_chdir compat_sys_chdir
2313 32 time - compat_sys_time
2414 common mknod sys_mknod compat_sys_mknod
2515 common chmod sys_chmod compat_sys_chmod
2616 32 lchown - compat_sys_s390_lchown16
2719 common lseek sys_lseek compat_sys_lseek
2820 common getpid sys_getpid sys_getpid
2921 common mount sys_mount compat_sys_mount
3022 common umount sys_oldumount compat_sys_oldumount
3123 32 setuid - compat_sys_s390_setuid16
3224 32 getuid - compat_sys_s390_getuid16
3325 32 stime - compat_sys_stime
3426 common ptrace sys_ptrace compat_sys_ptrace
3527 common alarm sys_alarm sys_alarm
3629 common pause sys_pause sys_pause
3730 common utime sys_utime compat_sys_utime
3833 common access sys_access compat_sys_access
3934 common nice sys_nice sys_nice
4036 common sync sys_sync sys_sync
4137 common kill sys_kill sys_kill
4238 common rename sys_rename compat_sys_rename
4339 common mkdir sys_mkdir compat_sys_mkdir
4440 common rmdir sys_rmdir compat_sys_rmdir
4541 common dup sys_dup sys_dup
4642 common pipe sys_pipe compat_sys_pipe
4743 common times sys_times compat_sys_times
4845 common brk sys_brk compat_sys_brk
4946 32 setgid - compat_sys_s390_setgid16
5047 32 getgid - compat_sys_s390_getgid16
5148 common signal sys_signal compat_sys_signal
5249 32 geteuid - compat_sys_s390_geteuid16
5350 32 getegid - compat_sys_s390_getegid16
5451 common acct sys_acct compat_sys_acct
5552 common umount2 sys_umount compat_sys_umount
5654 common ioctl sys_ioctl compat_sys_ioctl
5755 common fcntl sys_fcntl compat_sys_fcntl
5857 common setpgid sys_setpgid sys_setpgid
5960 common umask sys_umask sys_umask
6061 common chroot sys_chroot compat_sys_chroot
6162 common ustat sys_ustat compat_sys_ustat
6263 common dup2 sys_dup2 sys_dup2
6364 common getppid sys_getppid sys_getppid
6465 common getpgrp sys_getpgrp sys_getpgrp
6566 common setsid sys_setsid sys_setsid
6667 common sigaction sys_sigaction compat_sys_sigaction
6770 32 setreuid - compat_sys_s390_setreuid16
6871 32 setregid - compat_sys_s390_setregid16
6972 common sigsuspend sys_sigsuspend compat_sys_sigsuspend
7073 common sigpending sys_sigpending compat_sys_sigpending
7174 common sethostname sys_sethostname compat_sys_sethostname
7275 common setrlimit sys_setrlimit compat_sys_setrlimit
7376 32 getrlimit - compat_sys_old_getrlimit
7477 common getrusage sys_getrusage compat_sys_getrusage
7578 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
7679 common settimeofday sys_settimeofday compat_sys_settimeofday
7780 32 getgroups - compat_sys_s390_getgroups16
7881 32 setgroups - compat_sys_s390_setgroups16
7983 common symlink sys_symlink compat_sys_symlink
8085 common readlink sys_readlink compat_sys_readlink
8186 common uselib sys_uselib compat_sys_uselib
8287 common swapon sys_swapon compat_sys_swapon
8388 common reboot sys_reboot compat_sys_reboot
8489 common readdir - compat_sys_old_readdir
8590 common mmap sys_old_mmap compat_sys_s390_old_mmap
8691 common munmap sys_munmap compat_sys_munmap
8792 common truncate sys_truncate compat_sys_truncate
8893 common ftruncate sys_ftruncate compat_sys_ftruncate
8994 common fchmod sys_fchmod sys_fchmod
9095 32 fchown - compat_sys_s390_fchown16
9196 common getpriority sys_getpriority sys_getpriority
9297 common setpriority sys_setpriority sys_setpriority
9399 common statfs sys_statfs compat_sys_statfs
94100 common fstatfs sys_fstatfs compat_sys_fstatfs
95101 32 ioperm - -
96102 common socketcall sys_socketcall compat_sys_socketcall
97103 common syslog sys_syslog compat_sys_syslog
98104 common setitimer sys_setitimer compat_sys_setitimer
99105 common getitimer sys_getitimer compat_sys_getitimer
100106 common stat sys_newstat compat_sys_newstat
101107 common lstat sys_newlstat compat_sys_newlstat
102108 common fstat sys_newfstat compat_sys_newfstat
103110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
104111 common vhangup sys_vhangup sys_vhangup
105112 common idle - -
106114 common wait4 sys_wait4 compat_sys_wait4
107115 common swapoff sys_swapoff compat_sys_swapoff
108116 common sysinfo sys_sysinfo compat_sys_sysinfo
109117 common ipc sys_s390_ipc compat_sys_s390_ipc
110118 common fsync sys_fsync sys_fsync
111119 common sigreturn sys_sigreturn compat_sys_sigreturn
112120 common clone sys_clone compat_sys_clone
113121 common setdomainname sys_setdomainname compat_sys_setdomainname
114122 common uname sys_newuname compat_sys_newuname
115124 common adjtimex sys_adjtimex compat_sys_adjtimex
116125 common mprotect sys_mprotect compat_sys_mprotect
117126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
118127 common create_module - -
119128 common init_module sys_init_module compat_sys_init_module
120129 common delete_module sys_delete_module compat_sys_delete_module
121130 common get_kernel_syms - -
122131 common quotactl sys_quotactl compat_sys_quotactl
123132 common getpgid sys_getpgid sys_getpgid
124133 common fchdir sys_fchdir sys_fchdir
125134 common bdflush sys_bdflush compat_sys_bdflush
126135 common sysfs sys_sysfs compat_sys_sysfs
127136 common personality sys_s390_personality sys_s390_personality
128137 common afs_syscall - -
129138 32 setfsuid - compat_sys_s390_setfsuid16
130139 32 setfsgid - compat_sys_s390_setfsgid16
131140 32 _llseek - compat_sys_llseek
132141 common getdents sys_getdents compat_sys_getdents
133142 32 _newselect - compat_sys_select
134142 64 select sys_select -
135143 common flock sys_flock sys_flock
136144 common msync sys_msync compat_sys_msync
137145 common readv sys_readv compat_sys_readv
138146 common writev sys_writev compat_sys_writev
139147 common getsid sys_getsid sys_getsid
140148 common fdatasync sys_fdatasync sys_fdatasync
141149 common _sysctl sys_sysctl compat_sys_sysctl
142150 common mlock sys_mlock compat_sys_mlock
143151 common munlock sys_munlock compat_sys_munlock
144152 common mlockall sys_mlockall sys_mlockall
145153 common munlockall sys_munlockall sys_munlockall
146154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam
147155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam
148156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler
149157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
150158 common sched_yield sys_sched_yield sys_sched_yield
151159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
152160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
153161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
154162 common nanosleep sys_nanosleep compat_sys_nanosleep
155163 common mremap sys_mremap compat_sys_mremap
156164 32 setresuid - compat_sys_s390_setresuid16
157165 32 getresuid - compat_sys_s390_getresuid16
158167 common query_module - -
159168 common poll sys_poll compat_sys_poll
160169 common nfsservctl - -
161170 32 setresgid - compat_sys_s390_setresgid16
162171 32 getresgid - compat_sys_s390_getresgid16
163172 common prctl sys_prctl compat_sys_prctl
164173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
165174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
166175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
167176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
168177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
169178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
170179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
171180 common pread64 sys_pread64 compat_sys_s390_pread64
172181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
173182 32 chown - compat_sys_s390_chown16
174183 common getcwd sys_getcwd compat_sys_getcwd
175184 common capget sys_capget compat_sys_capget
176185 common capset sys_capset compat_sys_capset
177186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
178187 common sendfile sys_sendfile64 compat_sys_sendfile
179188 common getpmsg - -
180189 common putpmsg - -
181190 common vfork sys_vfork sys_vfork
182191 32 ugetrlimit - compat_sys_getrlimit
183191 64 getrlimit sys_getrlimit -
184192 32 mmap2 - compat_sys_s390_mmap2
185193 32 truncate64 - compat_sys_s390_truncate64
186194 32 ftruncate64 - compat_sys_s390_ftruncate64
187195 32 stat64 - compat_sys_s390_stat64
188196 32 lstat64 - compat_sys_s390_lstat64
189197 32 fstat64 - compat_sys_s390_fstat64
190198 32 lchown32 - compat_sys_lchown
191198 64 lchown sys_lchown -
192199 32 getuid32 - sys_getuid
193199 64 getuid sys_getuid -
194200 32 getgid32 - sys_getgid
195200 64 getgid sys_getgid -
196201 32 geteuid32 - sys_geteuid
197201 64 geteuid sys_geteuid -
198202 32 getegid32 - sys_getegid
199202 64 getegid sys_getegid -
200203 32 setreuid32 - sys_setreuid
201203 64 setreuid sys_setreuid -
202204 32 setregid32 - sys_setregid
203204 64 setregid sys_setregid -
204205 32 getgroups32 - compat_sys_getgroups
205205 64 getgroups sys_getgroups -
206206 32 setgroups32 - compat_sys_setgroups
207206 64 setgroups sys_setgroups -
208207 32 fchown32 - sys_fchown
209207 64 fchown sys_fchown -
210208 32 setresuid32 - sys_setresuid
211208 64 setresuid sys_setresuid -
212209 32 getresuid32 - compat_sys_getresuid
213209 64 getresuid sys_getresuid -
214210 32 setresgid32 - sys_setresgid
215210 64 setresgid sys_setresgid -
216211 32 getresgid32 - compat_sys_getresgid
217211 64 getresgid sys_getresgid -
218212 32 chown32 - compat_sys_chown
219212 64 chown sys_chown -
220213 32 setuid32 - sys_setuid
221213 64 setuid sys_setuid -
222214 32 setgid32 - sys_setgid
223214 64 setgid sys_setgid -
224215 32 setfsuid32 - sys_setfsuid
225215 64 setfsuid sys_setfsuid -
226216 32 setfsgid32 - sys_setfsgid
227216 64 setfsgid sys_setfsgid -
228217 common pivot_root sys_pivot_root compat_sys_pivot_root
229218 common mincore sys_mincore compat_sys_mincore
230219 common madvise sys_madvise compat_sys_madvise
231220 common getdents64 sys_getdents64 compat_sys_getdents64
232221 32 fcntl64 - compat_sys_fcntl64
233222 common readahead sys_readahead compat_sys_s390_readahead
234223 32 sendfile64 - compat_sys_sendfile64
235224 common setxattr sys_setxattr compat_sys_setxattr
236225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr
237226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr
238227 common getxattr sys_getxattr compat_sys_getxattr
239228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr
240229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr
241230 common listxattr sys_listxattr compat_sys_listxattr
242231 common llistxattr sys_llistxattr compat_sys_llistxattr
243232 common flistxattr sys_flistxattr compat_sys_flistxattr
244233 common removexattr sys_removexattr compat_sys_removexattr
245234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr
246235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr
247236 common gettid sys_gettid sys_gettid
248237 common tkill sys_tkill sys_tkill
249238 common futex sys_futex compat_sys_futex
250239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
251240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
252241 common tgkill sys_tgkill sys_tgkill
253243 common io_setup sys_io_setup compat_sys_io_setup
254244 common io_destroy sys_io_destroy compat_sys_io_destroy
255245 common io_getevents sys_io_getevents compat_sys_io_getevents
256246 common io_submit sys_io_submit compat_sys_io_submit
257247 common io_cancel sys_io_cancel compat_sys_io_cancel
258248 common exit_group sys_exit_group sys_exit_group
259249 common epoll_create sys_epoll_create sys_epoll_create
260250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl
261251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait
262252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address
263253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
264254 common timer_create sys_timer_create compat_sys_timer_create
265255 common timer_settime sys_timer_settime compat_sys_timer_settime
266256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
267257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
268258 common timer_delete sys_timer_delete sys_timer_delete
269259 common clock_settime sys_clock_settime compat_sys_clock_settime
270260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
271261 common clock_getres sys_clock_getres compat_sys_clock_getres
272262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
273264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
274265 common statfs64 sys_statfs64 compat_sys_statfs64
275266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
276267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages
277268 common mbind sys_mbind compat_sys_mbind
278269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
279270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
280271 common mq_open sys_mq_open compat_sys_mq_open
281272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink
282273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
283274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
284275 common mq_notify sys_mq_notify compat_sys_mq_notify
285276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
286277 common kexec_load sys_kexec_load compat_sys_kexec_load
287278 common add_key sys_add_key compat_sys_add_key
288279 common request_key sys_request_key compat_sys_request_key
289280 common keyctl sys_keyctl compat_sys_keyctl
290281 common waitid sys_waitid compat_sys_waitid
291282 common ioprio_set sys_ioprio_set sys_ioprio_set
292283 common ioprio_get sys_ioprio_get sys_ioprio_get
293284 common inotify_init sys_inotify_init sys_inotify_init
294285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch
295286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
296287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
297288 common openat sys_openat compat_sys_openat
298289 common mkdirat sys_mkdirat compat_sys_mkdirat
299290 common mknodat sys_mknodat compat_sys_mknodat
300291 common fchownat sys_fchownat compat_sys_fchownat
301292 common futimesat sys_futimesat compat_sys_futimesat
302293 32 fstatat64 - compat_sys_s390_fstatat64
303293 64 newfstatat sys_newfstatat -
304294 common unlinkat sys_unlinkat compat_sys_unlinkat
305295 common renameat sys_renameat compat_sys_renameat
306296 common linkat sys_linkat compat_sys_linkat
307297 common symlinkat sys_symlinkat compat_sys_symlinkat
308298 common readlinkat sys_readlinkat compat_sys_readlinkat
309299 common fchmodat sys_fchmodat compat_sys_fchmodat
310300 common faccessat sys_faccessat compat_sys_faccessat
311301 common pselect6 sys_pselect6 compat_sys_pselect6
312302 common ppoll sys_ppoll compat_sys_ppoll
313303 common unshare sys_unshare compat_sys_unshare
314304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
315305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
316306 common splice sys_splice compat_sys_splice
317307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
318308 common tee sys_tee compat_sys_tee
319309 common vmsplice sys_vmsplice compat_sys_vmsplice
320310 common move_pages sys_move_pages compat_sys_move_pages
321311 common getcpu sys_getcpu compat_sys_getcpu
322312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
323313 common utimes sys_utimes compat_sys_utimes
324314 common fallocate sys_fallocate compat_sys_s390_fallocate
325315 common utimensat sys_utimensat compat_sys_utimensat
326316 common signalfd sys_signalfd compat_sys_signalfd
327317 common timerfd - -
328318 common eventfd sys_eventfd sys_eventfd
329319 common timerfd_create sys_timerfd_create sys_timerfd_create
330320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
331321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
332322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
333323 common eventfd2 sys_eventfd2 sys_eventfd2
334324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
335325 common pipe2 sys_pipe2 compat_sys_pipe2
336326 common dup3 sys_dup3 sys_dup3
337327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
338328 common preadv sys_preadv compat_sys_preadv
339329 common pwritev sys_pwritev compat_sys_pwritev
340330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
341331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open
342332 common fanotify_init sys_fanotify_init sys_fanotify_init
343333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
344334 common prlimit64 sys_prlimit64 compat_sys_prlimit64
345335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at
346336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
347337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
348338 common syncfs sys_syncfs sys_syncfs
349339 common setns sys_setns sys_setns
350340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
351341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
352342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
353343 common kcmp sys_kcmp compat_sys_kcmp
354344 common finit_module sys_finit_module compat_sys_finit_module
355345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr
356346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr
357347 common renameat2 sys_renameat2 compat_sys_renameat2
358348 common seccomp sys_seccomp compat_sys_seccomp
359349 common getrandom sys_getrandom compat_sys_getrandom
360350 common memfd_create sys_memfd_create compat_sys_memfd_create
361351 common bpf sys_bpf compat_sys_bpf
362352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write
363353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read
364354 common execveat sys_execveat compat_sys_execveat
365355 common userfaultfd sys_userfaultfd sys_userfaultfd
366356 common membarrier sys_membarrier sys_membarrier
367357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
368358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
369359 common socket sys_socket sys_socket
370360 common socketpair sys_socketpair compat_sys_socketpair
371361 common bind sys_bind compat_sys_bind
372362 common connect sys_connect compat_sys_connect
373363 common listen sys_listen sys_listen
374364 common accept4 sys_accept4 compat_sys_accept4
375365 common getsockopt sys_getsockopt compat_sys_getsockopt
376366 common setsockopt sys_setsockopt compat_sys_setsockopt
377367 common getsockname sys_getsockname compat_sys_getsockname
378368 common getpeername sys_getpeername compat_sys_getpeername
379369 common sendto sys_sendto compat_sys_sendto
380370 common sendmsg sys_sendmsg compat_sys_sendmsg
381371 common recvfrom sys_recvfrom compat_sys_recvfrom
382372 common recvmsg sys_recvmsg compat_sys_recvmsg
383373 common shutdown sys_shutdown sys_shutdown
384374 common mlock2 sys_mlock2 compat_sys_mlock2
385375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range
386376 common preadv2 sys_preadv2 compat_sys_preadv2
387377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
388378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
389379 common statx sys_statx compat_sys_statx
390380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index c0815a37fdb5..539c3d460158 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2245,7 +2245,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
2245 c2c_browser__update_nr_entries(browser); 2245 c2c_browser__update_nr_entries(browser);
2246 2246
2247 while (1) { 2247 while (1) {
2248 key = hist_browser__run(browser, "? - help"); 2248 key = hist_browser__run(browser, "? - help", true);
2249 2249
2250 switch (key) { 2250 switch (key) {
2251 case 's': 2251 case 's':
@@ -2314,7 +2314,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
2314 c2c_browser__update_nr_entries(browser); 2314 c2c_browser__update_nr_entries(browser);
2315 2315
2316 while (1) { 2316 while (1) {
2317 key = hist_browser__run(browser, "? - help"); 2317 key = hist_browser__run(browser, "? - help", true);
2318 2318
2319 switch (key) { 2319 switch (key) {
2320 case 'q': 2320 case 'q':
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 42a52dcc41cd..4ad5dc649716 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -530,7 +530,8 @@ static int report__browse_hists(struct report *rep)
530 case 1: 530 case 1:
531 ret = perf_evlist__tui_browse_hists(evlist, help, NULL, 531 ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
532 rep->min_percent, 532 rep->min_percent,
533 &session->header.env); 533 &session->header.env,
534 true);
534 /* 535 /*
535 * Usually "ret" is the last pressed key, and we only 536 * Usually "ret" is the last pressed key, and we only
536 * care if the key notifies us to switch data file. 537 * care if the key notifies us to switch data file.
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c6ccda52117d..b7c823ba8374 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -283,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top)
283 283
284 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 284 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
285 285
286 if (hists->stats.nr_lost_warned != 286 if (!top->record_opts.overwrite &&
287 hists->stats.nr_events[PERF_RECORD_LOST]) { 287 (hists->stats.nr_lost_warned !=
288 hists->stats.nr_events[PERF_RECORD_LOST])) {
288 hists->stats.nr_lost_warned = 289 hists->stats.nr_lost_warned =
289 hists->stats.nr_events[PERF_RECORD_LOST]; 290 hists->stats.nr_events[PERF_RECORD_LOST];
290 color_fprintf(stdout, PERF_COLOR_RED, 291 color_fprintf(stdout, PERF_COLOR_RED,
@@ -611,7 +612,8 @@ static void *display_thread_tui(void *arg)
611 612
612 perf_evlist__tui_browse_hists(top->evlist, help, &hbt, 613 perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
613 top->min_percent, 614 top->min_percent,
614 &top->session->header.env); 615 &top->session->header.env,
616 !top->record_opts.overwrite);
615 617
616 done = 1; 618 done = 1;
617 return NULL; 619 return NULL;
@@ -807,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool,
807 809
808static void perf_top__mmap_read_idx(struct perf_top *top, int idx) 810static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
809{ 811{
812 struct record_opts *opts = &top->record_opts;
813 struct perf_evlist *evlist = top->evlist;
810 struct perf_sample sample; 814 struct perf_sample sample;
811 struct perf_evsel *evsel; 815 struct perf_evsel *evsel;
816 struct perf_mmap *md;
812 struct perf_session *session = top->session; 817 struct perf_session *session = top->session;
813 union perf_event *event; 818 union perf_event *event;
814 struct machine *machine; 819 struct machine *machine;
820 u64 end, start;
815 int ret; 821 int ret;
816 822
817 while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { 823 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
818 ret = perf_evlist__parse_sample(top->evlist, event, &sample); 824 if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
825 return;
826
827 while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
828 ret = perf_evlist__parse_sample(evlist, event, &sample);
819 if (ret) { 829 if (ret) {
820 pr_err("Can't parse sample, err = %d\n", ret); 830 pr_err("Can't parse sample, err = %d\n", ret);
821 goto next_event; 831 goto next_event;
@@ -869,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
869 } else 879 } else
870 ++session->evlist->stats.nr_unknown_events; 880 ++session->evlist->stats.nr_unknown_events;
871next_event: 881next_event:
872 perf_evlist__mmap_consume(top->evlist, idx); 882 perf_mmap__consume(md, opts->overwrite);
873 } 883 }
884
885 perf_mmap__read_done(md);
874} 886}
875 887
876static void perf_top__mmap_read(struct perf_top *top) 888static void perf_top__mmap_read(struct perf_top *top)
877{ 889{
890 bool overwrite = top->record_opts.overwrite;
891 struct perf_evlist *evlist = top->evlist;
892 unsigned long long start, end;
878 int i; 893 int i;
879 894
895 start = rdclock();
896 if (overwrite)
897 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
898
880 for (i = 0; i < top->evlist->nr_mmaps; i++) 899 for (i = 0; i < top->evlist->nr_mmaps; i++)
881 perf_top__mmap_read_idx(top, i); 900 perf_top__mmap_read_idx(top, i);
901
902 if (overwrite) {
903 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
904 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
905 }
906 end = rdclock();
907
908 if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
909 ui__warning("Too slow to read ring buffer.\n"
910 "Please try increasing the period (-c) or\n"
911 "decreasing the freq (-F) or\n"
912 "limiting the number of CPUs (-C)\n");
913}
914
915/*
916 * Check per-event overwrite term.
917 * perf top should support consistent term for all events.
918 * - All events don't have per-event term
919 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
920 * Nothing change, return 0.
921 * - All events have same per-event term
922 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
923 * Using the per-event setting to replace the opts->overwrite if
924 * they are different, then return 0.
925 * - Events have different per-event term
926 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
927 * Return -1
928 * - Some of the event set per-event term, but some not.
929 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
930 * Return -1
931 */
932static int perf_top__overwrite_check(struct perf_top *top)
933{
934 struct record_opts *opts = &top->record_opts;
935 struct perf_evlist *evlist = top->evlist;
936 struct perf_evsel_config_term *term;
937 struct list_head *config_terms;
938 struct perf_evsel *evsel;
939 int set, overwrite = -1;
940
941 evlist__for_each_entry(evlist, evsel) {
942 set = -1;
943 config_terms = &evsel->config_terms;
944 list_for_each_entry(term, config_terms, list) {
945 if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
946 set = term->val.overwrite ? 1 : 0;
947 }
948
949 /* no term for current and previous event (likely) */
950 if ((overwrite < 0) && (set < 0))
951 continue;
952
953 /* has term for both current and previous event, compare */
954 if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
955 return -1;
956
957 /* no term for current event but has term for previous one */
958 if ((overwrite >= 0) && (set < 0))
959 return -1;
960
961 /* has term for current event */
962 if ((overwrite < 0) && (set >= 0)) {
963 /* if it's first event, set overwrite */
964 if (evsel == perf_evlist__first(evlist))
965 overwrite = set;
966 else
967 return -1;
968 }
969 }
970
971 if ((overwrite >= 0) && (opts->overwrite != overwrite))
972 opts->overwrite = overwrite;
973
974 return 0;
975}
976
977static int perf_top_overwrite_fallback(struct perf_top *top,
978 struct perf_evsel *evsel)
979{
980 struct record_opts *opts = &top->record_opts;
981 struct perf_evlist *evlist = top->evlist;
982 struct perf_evsel *counter;
983
984 if (!opts->overwrite)
985 return 0;
986
987 /* only fall back when first event fails */
988 if (evsel != perf_evlist__first(evlist))
989 return 0;
990
991 evlist__for_each_entry(evlist, counter)
992 counter->attr.write_backward = false;
993 opts->overwrite = false;
994 ui__warning("fall back to non-overwrite mode\n");
995 return 1;
882} 996}
883 997
884static int perf_top__start_counters(struct perf_top *top) 998static int perf_top__start_counters(struct perf_top *top)
@@ -888,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top)
888 struct perf_evlist *evlist = top->evlist; 1002 struct perf_evlist *evlist = top->evlist;
889 struct record_opts *opts = &top->record_opts; 1003 struct record_opts *opts = &top->record_opts;
890 1004
1005 if (perf_top__overwrite_check(top)) {
1006 ui__error("perf top only support consistent per-event "
1007 "overwrite setting for all events\n");
1008 goto out_err;
1009 }
1010
891 perf_evlist__config(evlist, opts, &callchain_param); 1011 perf_evlist__config(evlist, opts, &callchain_param);
892 1012
893 evlist__for_each_entry(evlist, counter) { 1013 evlist__for_each_entry(evlist, counter) {
894try_again: 1014try_again:
895 if (perf_evsel__open(counter, top->evlist->cpus, 1015 if (perf_evsel__open(counter, top->evlist->cpus,
896 top->evlist->threads) < 0) { 1016 top->evlist->threads) < 0) {
1017
1018 /*
1019 * Specially handle overwrite fall back.
1020 * Because perf top is the only tool which has
1021 * overwrite mode by default, support
1022 * both overwrite and non-overwrite mode, and
1023 * require consistent mode for all events.
1024 *
1025 * May move it to generic code with more tools
1026 * have similar attribute.
1027 */
1028 if (perf_missing_features.write_backward &&
1029 perf_top_overwrite_fallback(top, counter))
1030 goto try_again;
1031
897 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { 1032 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
898 if (verbose > 0) 1033 if (verbose > 0)
899 ui__warning("%s\n", msg); 1034 ui__warning("%s\n", msg);
@@ -1033,7 +1168,7 @@ static int __cmd_top(struct perf_top *top)
1033 1168
1034 perf_top__mmap_read(top); 1169 perf_top__mmap_read(top);
1035 1170
1036 if (hits == top->samples) 1171 if (opts->overwrite || (hits == top->samples))
1037 ret = perf_evlist__poll(top->evlist, 100); 1172 ret = perf_evlist__poll(top->evlist, 100);
1038 1173
1039 if (resize) { 1174 if (resize) {
@@ -1127,6 +1262,7 @@ int cmd_top(int argc, const char **argv)
1127 .uses_mmap = true, 1262 .uses_mmap = true,
1128 }, 1263 },
1129 .proc_map_timeout = 500, 1264 .proc_map_timeout = 500,
1265 .overwrite = 1,
1130 }, 1266 },
1131 .max_stack = sysctl_perf_event_max_stack, 1267 .max_stack = sysctl_perf_event_max_stack,
1132 .sym_pcnt_filter = 5, 1268 .sym_pcnt_filter = 5,
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 51abdb0a4047..790ec25919a0 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -33,7 +33,6 @@ arch/s390/include/uapi/asm/kvm.h
33arch/s390/include/uapi/asm/kvm_perf.h 33arch/s390/include/uapi/asm/kvm_perf.h
34arch/s390/include/uapi/asm/ptrace.h 34arch/s390/include/uapi/asm/ptrace.h
35arch/s390/include/uapi/asm/sie.h 35arch/s390/include/uapi/asm/sie.h
36arch/s390/include/uapi/asm/unistd.h
37arch/arm/include/uapi/asm/kvm.h 36arch/arm/include/uapi/asm/kvm.h
38arch/arm64/include/uapi/asm/kvm.h 37arch/arm64/include/uapi/asm/kvm.h
39arch/alpha/include/uapi/asm/errno.h 38arch/alpha/include/uapi/asm/errno.h
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
new file mode 100644
index 000000000000..3b6208763e50
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
@@ -0,0 +1,27 @@
1[
2 {,
3 "EventCode": "0x7A",
4 "EventName": "BR_INDIRECT_SPEC",
5 "BriefDescription": "Branch speculatively executed - Indirect branch"
6 },
7 {,
8 "EventCode": "0xC9",
9 "EventName": "BR_COND",
10 "BriefDescription": "Conditional branch executed"
11 },
12 {,
13 "EventCode": "0xCA",
14 "EventName": "BR_INDIRECT_MISPRED",
15 "BriefDescription": "Indirect branch mispredicted"
16 },
17 {,
18 "EventCode": "0xCB",
19 "EventName": "BR_INDIRECT_MISPRED_ADDR",
20 "BriefDescription": "Indirect branch mispredicted because of address miscompare"
21 },
22 {,
23 "EventCode": "0xCC",
24 "EventName": "BR_COND_MISPRED",
25 "BriefDescription": "Conditional branch mispredicted"
26 }
27]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
new file mode 100644
index 000000000000..480d9f7460ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
@@ -0,0 +1,22 @@
1[
2 {,
3 "EventCode": "0x60",
4 "EventName": "BUS_ACCESS_LD",
5 "BriefDescription": "Bus access - Read"
6 },
7 {,
8 "EventCode": "0x61",
9 "EventName": "BUS_ACCESS_ST",
10 "BriefDescription": "Bus access - Write"
11 },
12 {,
13 "EventCode": "0xC0",
14 "EventName": "EXT_MEM_REQ",
15 "BriefDescription": "External memory request"
16 },
17 {,
18 "EventCode": "0xC1",
19 "EventName": "EXT_MEM_REQ_NC",
20 "BriefDescription": "Non-cacheable external memory request"
21 }
22]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
new file mode 100644
index 000000000000..11baad6344b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
@@ -0,0 +1,27 @@
1[
2 {,
3 "EventCode": "0xC2",
4 "EventName": "PREFETCH_LINEFILL",
5 "BriefDescription": "Linefill because of prefetch"
6 },
7 {,
8 "EventCode": "0xC3",
9 "EventName": "PREFETCH_LINEFILL_DROP",
10 "BriefDescription": "Instruction Cache Throttle occurred"
11 },
12 {,
13 "EventCode": "0xC4",
14 "EventName": "READ_ALLOC_ENTER",
15 "BriefDescription": "Entering read allocate mode"
16 },
17 {,
18 "EventCode": "0xC5",
19 "EventName": "READ_ALLOC",
20 "BriefDescription": "Read allocate mode"
21 },
22 {,
23 "EventCode": "0xC8",
24 "EventName": "EXT_SNOOP",
25 "BriefDescription": "SCU Snooped data from another CPU for this CPU"
26 }
27]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
new file mode 100644
index 000000000000..480d9f7460ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
@@ -0,0 +1,22 @@
1[
2 {,
3 "EventCode": "0x60",
4 "EventName": "BUS_ACCESS_LD",
5 "BriefDescription": "Bus access - Read"
6 },
7 {,
8 "EventCode": "0x61",
9 "EventName": "BUS_ACCESS_ST",
10 "BriefDescription": "Bus access - Write"
11 },
12 {,
13 "EventCode": "0xC0",
14 "EventName": "EXT_MEM_REQ",
15 "BriefDescription": "External memory request"
16 },
17 {,
18 "EventCode": "0xC1",
19 "EventName": "EXT_MEM_REQ_NC",
20 "BriefDescription": "Non-cacheable external memory request"
21 }
22]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
new file mode 100644
index 000000000000..73a22402d003
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
@@ -0,0 +1,32 @@
1[
2 {,
3 "EventCode": "0x86",
4 "EventName": "EXC_IRQ",
5 "BriefDescription": "Exception taken, IRQ"
6 },
7 {,
8 "EventCode": "0x87",
9 "EventName": "EXC_FIQ",
10 "BriefDescription": "Exception taken, FIQ"
11 },
12 {,
13 "EventCode": "0xC6",
14 "EventName": "PRE_DECODE_ERR",
15 "BriefDescription": "Pre-decode error"
16 },
17 {,
18 "EventCode": "0xD0",
19 "EventName": "L1I_CACHE_ERR",
20 "BriefDescription": "L1 Instruction Cache (data or tag) memory error"
21 },
22 {,
23 "EventCode": "0xD1",
24 "EventName": "L1D_CACHE_ERR",
25 "BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
26 },
27 {,
28 "EventCode": "0xD2",
29 "EventName": "TLB_ERR",
30 "BriefDescription": "TLB memory error"
31 }
32]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
new file mode 100644
index 000000000000..3149fb90555a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
@@ -0,0 +1,52 @@
1[
2 {,
3 "EventCode": "0xC7",
4 "EventName": "STALL_SB_FULL",
5 "BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
6 },
7 {,
8 "EventCode": "0xE0",
9 "EventName": "OTHER_IQ_DEP_STALL",
10 "BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
11 },
12 {,
13 "EventCode": "0xE1",
14 "EventName": "IC_DEP_STALL",
15 "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
16 },
17 {,
18 "EventCode": "0xE2",
19 "EventName": "IUTLB_DEP_STALL",
20 "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
21 },
22 {,
23 "EventCode": "0xE3",
24 "EventName": "DECODE_DEP_STALL",
25 "BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
26 },
27 {,
28 "EventCode": "0xE4",
29 "EventName": "OTHER_INTERLOCK_STALL",
30 "BriefDescription": "Cycles there is an interlock other than Advanced SIMD/Floating-point instructions or load/store instruction"
31 },
32 {,
33 "EventCode": "0xE5",
34 "EventName": "AGU_DEP_STALL",
35 "BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
36 },
37 {,
38 "EventCode": "0xE6",
39 "EventName": "SIMD_DEP_STALL",
40 "BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
41 },
42 {,
43 "EventCode": "0xE7",
44 "EventName": "LD_DEP_STALL",
45 "BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
46 },
47 {,
48 "EventCode": "0xE8",
49 "EventName": "ST_DEP_STALL",
50 "BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
51 }
52]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 219d6756134e..e61c9ca6cf9e 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -13,3 +13,4 @@
13# 13#
14#Family-model,Version,Filename,EventType 14#Family-model,Version,Filename,EventType
150x00000000420f5160,v1,cavium,core 150x00000000420f5160,v1,cavium,core
160x00000000410fd03[[:xdigit:]],v1,cortex-a53,core
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 4035d43523c3..e0b1b414d466 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -31,10 +31,12 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
31 int i; 31 int i;
32 32
33 for (i = 0; i < evlist->nr_mmaps; i++) { 33 for (i = 0; i < evlist->nr_mmaps; i++) {
34 struct perf_mmap *map = &evlist->overwrite_mmap[i];
34 union perf_event *event; 35 union perf_event *event;
36 u64 start, end;
35 37
36 perf_mmap__read_catchup(&evlist->overwrite_mmap[i]); 38 perf_mmap__read_init(map, true, &start, &end);
37 while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) { 39 while ((event = perf_mmap__read_event(map, true, &start, end)) != NULL) {
38 const u32 type = event->header.type; 40 const u32 type = event->header.type;
39 41
40 switch (type) { 42 switch (type) {
@@ -49,6 +51,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
49 return TEST_FAIL; 51 return TEST_FAIL;
50 } 52 }
51 } 53 }
54 perf_mmap__read_done(map);
52 } 55 }
53 return TEST_OK; 56 return TEST_OK;
54} 57}
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 8b3da21a08f1..c446c894b297 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() {
22 expected[4]="rtt min.*" 22 expected[4]="rtt min.*"
23 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" 23 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
24 expected[6]=".*inet_pton[[:space:]]\($libc\)$" 24 expected[6]=".*inet_pton[[:space:]]\($libc\)$"
25 expected[7]="getaddrinfo[[:space:]]\($libc\)$" 25 case "$(uname -m)" in
26 expected[8]=".*\(.*/bin/ping.*\)$" 26 s390x)
27 27 eventattr='call-graph=dwarf'
28 perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do 28 expected[7]="gaih_inet[[:space:]]\(inlined\)$"
29 expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
30 expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
31 expected[10]="__libc_start_main[[:space:]]\($libc\)$"
32 expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
33 ;;
34 *)
35 eventattr='max-stack=3'
36 expected[7]="getaddrinfo[[:space:]]\($libc\)$"
37 expected[8]=".*\(.*/bin/ping.*\)$"
38 ;;
39 esac
40
41 perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
29 echo $line 42 echo $line
30 echo "$line" | egrep -q "${expected[$idx]}" 43 echo "$line" | egrep -q "${expected[$idx]}"
31 if [ $? -ne 0 ] ; then 44 if [ $? -ne 0 ] ; then
@@ -33,7 +46,7 @@ trace_libc_inet_pton_backtrace() {
33 exit 1 46 exit 1
34 fi 47 fi
35 let idx+=1 48 let idx+=1
36 [ $idx -eq 9 ] && break 49 [ -z "${expected[$idx]}" ] && break
37 done 50 done
38} 51}
39 52
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 68146f4620a5..6495ee55d9c3 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -608,7 +608,8 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
608 return browser->title ? browser->title(browser, bf, size) : 0; 608 return browser->title ? browser->title(browser, bf, size) : 0;
609} 609}
610 610
611int hist_browser__run(struct hist_browser *browser, const char *help) 611int hist_browser__run(struct hist_browser *browser, const char *help,
612 bool warn_lost_event)
612{ 613{
613 int key; 614 int key;
614 char title[160]; 615 char title[160];
@@ -638,8 +639,9 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
638 nr_entries = hist_browser__nr_entries(browser); 639 nr_entries = hist_browser__nr_entries(browser);
639 ui_browser__update_nr_entries(&browser->b, nr_entries); 640 ui_browser__update_nr_entries(&browser->b, nr_entries);
640 641
641 if (browser->hists->stats.nr_lost_warned != 642 if (warn_lost_event &&
642 browser->hists->stats.nr_events[PERF_RECORD_LOST]) { 643 (browser->hists->stats.nr_lost_warned !=
644 browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
643 browser->hists->stats.nr_lost_warned = 645 browser->hists->stats.nr_lost_warned =
644 browser->hists->stats.nr_events[PERF_RECORD_LOST]; 646 browser->hists->stats.nr_events[PERF_RECORD_LOST];
645 ui_browser__warn_lost_events(&browser->b); 647 ui_browser__warn_lost_events(&browser->b);
@@ -2763,7 +2765,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2763 bool left_exits, 2765 bool left_exits,
2764 struct hist_browser_timer *hbt, 2766 struct hist_browser_timer *hbt,
2765 float min_pcnt, 2767 float min_pcnt,
2766 struct perf_env *env) 2768 struct perf_env *env,
2769 bool warn_lost_event)
2767{ 2770{
2768 struct hists *hists = evsel__hists(evsel); 2771 struct hists *hists = evsel__hists(evsel);
2769 struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); 2772 struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
@@ -2844,7 +2847,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2844 2847
2845 nr_options = 0; 2848 nr_options = 0;
2846 2849
2847 key = hist_browser__run(browser, helpline); 2850 key = hist_browser__run(browser, helpline,
2851 warn_lost_event);
2848 2852
2849 if (browser->he_selection != NULL) { 2853 if (browser->he_selection != NULL) {
2850 thread = hist_browser__selected_thread(browser); 2854 thread = hist_browser__selected_thread(browser);
@@ -3184,7 +3188,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
3184 3188
3185static int perf_evsel_menu__run(struct perf_evsel_menu *menu, 3189static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
3186 int nr_events, const char *help, 3190 int nr_events, const char *help,
3187 struct hist_browser_timer *hbt) 3191 struct hist_browser_timer *hbt,
3192 bool warn_lost_event)
3188{ 3193{
3189 struct perf_evlist *evlist = menu->b.priv; 3194 struct perf_evlist *evlist = menu->b.priv;
3190 struct perf_evsel *pos; 3195 struct perf_evsel *pos;
@@ -3203,7 +3208,9 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
3203 case K_TIMER: 3208 case K_TIMER:
3204 hbt->timer(hbt->arg); 3209 hbt->timer(hbt->arg);
3205 3210
3206 if (!menu->lost_events_warned && menu->lost_events) { 3211 if (!menu->lost_events_warned &&
3212 menu->lost_events &&
3213 warn_lost_event) {
3207 ui_browser__warn_lost_events(&menu->b); 3214 ui_browser__warn_lost_events(&menu->b);
3208 menu->lost_events_warned = true; 3215 menu->lost_events_warned = true;
3209 } 3216 }
@@ -3224,7 +3231,8 @@ browse_hists:
3224 key = perf_evsel__hists_browse(pos, nr_events, help, 3231 key = perf_evsel__hists_browse(pos, nr_events, help,
3225 true, hbt, 3232 true, hbt,
3226 menu->min_pcnt, 3233 menu->min_pcnt,
3227 menu->env); 3234 menu->env,
3235 warn_lost_event);
3228 ui_browser__show_title(&menu->b, title); 3236 ui_browser__show_title(&menu->b, title);
3229 switch (key) { 3237 switch (key) {
3230 case K_TAB: 3238 case K_TAB:
@@ -3282,7 +3290,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
3282 int nr_entries, const char *help, 3290 int nr_entries, const char *help,
3283 struct hist_browser_timer *hbt, 3291 struct hist_browser_timer *hbt,
3284 float min_pcnt, 3292 float min_pcnt,
3285 struct perf_env *env) 3293 struct perf_env *env,
3294 bool warn_lost_event)
3286{ 3295{
3287 struct perf_evsel *pos; 3296 struct perf_evsel *pos;
3288 struct perf_evsel_menu menu = { 3297 struct perf_evsel_menu menu = {
@@ -3309,13 +3318,15 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
3309 menu.b.width = line_len; 3318 menu.b.width = line_len;
3310 } 3319 }
3311 3320
3312 return perf_evsel_menu__run(&menu, nr_entries, help, hbt); 3321 return perf_evsel_menu__run(&menu, nr_entries, help,
3322 hbt, warn_lost_event);
3313} 3323}
3314 3324
3315int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 3325int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
3316 struct hist_browser_timer *hbt, 3326 struct hist_browser_timer *hbt,
3317 float min_pcnt, 3327 float min_pcnt,
3318 struct perf_env *env) 3328 struct perf_env *env,
3329 bool warn_lost_event)
3319{ 3330{
3320 int nr_entries = evlist->nr_entries; 3331 int nr_entries = evlist->nr_entries;
3321 3332
@@ -3325,7 +3336,7 @@ single_entry:
3325 3336
3326 return perf_evsel__hists_browse(first, nr_entries, help, 3337 return perf_evsel__hists_browse(first, nr_entries, help,
3327 false, hbt, min_pcnt, 3338 false, hbt, min_pcnt,
3328 env); 3339 env, warn_lost_event);
3329 } 3340 }
3330 3341
3331 if (symbol_conf.event_group) { 3342 if (symbol_conf.event_group) {
@@ -3342,5 +3353,6 @@ single_entry:
3342 } 3353 }
3343 3354
3344 return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, 3355 return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
3345 hbt, min_pcnt, env); 3356 hbt, min_pcnt, env,
3357 warn_lost_event);
3346} 3358}
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index ba431777f559..9428bee076f2 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -28,7 +28,8 @@ struct hist_browser {
28 28
29struct hist_browser *hist_browser__new(struct hists *hists); 29struct hist_browser *hist_browser__new(struct hists *hists);
30void hist_browser__delete(struct hist_browser *browser); 30void hist_browser__delete(struct hist_browser *browser);
31int hist_browser__run(struct hist_browser *browser, const char *help); 31int hist_browser__run(struct hist_browser *browser, const char *help,
32 bool warn_lost_event);
32void hist_browser__init(struct hist_browser *browser, 33void hist_browser__init(struct hist_browser *browser,
33 struct hists *hists); 34 struct hists *hists);
34#endif /* _PERF_UI_BROWSER_HISTS_H_ */ 35#endif /* _PERF_UI_BROWSER_HISTS_H_ */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ac35cd214feb..e5fc14e53c05 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -715,28 +715,11 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
715 return perf_mmap__read_forward(md); 715 return perf_mmap__read_forward(md);
716} 716}
717 717
718union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
719{
720 struct perf_mmap *md = &evlist->mmap[idx];
721
722 /*
723 * No need to check messup for backward ring buffer:
724 * We can always read arbitrary long data from a backward
725 * ring buffer unless we forget to pause it before reading.
726 */
727 return perf_mmap__read_backward(md);
728}
729
730union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 718union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
731{ 719{
732 return perf_evlist__mmap_read_forward(evlist, idx); 720 return perf_evlist__mmap_read_forward(evlist, idx);
733} 721}
734 722
735void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
736{
737 perf_mmap__read_catchup(&evlist->mmap[idx]);
738}
739
740void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 723void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
741{ 724{
742 perf_mmap__consume(&evlist->mmap[idx], false); 725 perf_mmap__consume(&evlist->mmap[idx], false);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 75f8e0ad5d76..336b838e6957 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -133,10 +133,6 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
133 133
134union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, 134union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
135 int idx); 135 int idx);
136union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
137 int idx);
138void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
139
140void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); 136void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
141 137
142int perf_evlist__open(struct perf_evlist *evlist); 138int perf_evlist__open(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ff359c9ece2e..ef351688b797 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -41,17 +41,7 @@
41 41
42#include "sane_ctype.h" 42#include "sane_ctype.h"
43 43
44static struct { 44struct perf_missing_features perf_missing_features;
45 bool sample_id_all;
46 bool exclude_guest;
47 bool mmap2;
48 bool cloexec;
49 bool clockid;
50 bool clockid_wrong;
51 bool lbr_flags;
52 bool write_backward;
53 bool group_read;
54} perf_missing_features;
55 45
56static clockid_t clockid; 46static clockid_t clockid;
57 47
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 846e41644525..a7487c6d1866 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -149,6 +149,20 @@ union u64_swap {
149 u32 val32[2]; 149 u32 val32[2];
150}; 150};
151 151
152struct perf_missing_features {
153 bool sample_id_all;
154 bool exclude_guest;
155 bool mmap2;
156 bool cloexec;
157 bool clockid;
158 bool clockid_wrong;
159 bool lbr_flags;
160 bool write_backward;
161 bool group_read;
162};
163
164extern struct perf_missing_features perf_missing_features;
165
152struct cpu_map; 166struct cpu_map;
153struct target; 167struct target;
154struct thread_map; 168struct thread_map;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index f6630cb95eff..02721b579746 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -430,7 +430,8 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
430int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, 430int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
431 struct hist_browser_timer *hbt, 431 struct hist_browser_timer *hbt,
432 float min_pcnt, 432 float min_pcnt,
433 struct perf_env *env); 433 struct perf_env *env,
434 bool warn_lost_event);
434int script_browse(const char *script_opt); 435int script_browse(const char *script_opt);
435#else 436#else
436static inline 437static inline
@@ -438,7 +439,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
438 const char *help __maybe_unused, 439 const char *help __maybe_unused,
439 struct hist_browser_timer *hbt __maybe_unused, 440 struct hist_browser_timer *hbt __maybe_unused,
440 float min_pcnt __maybe_unused, 441 float min_pcnt __maybe_unused,
441 struct perf_env *env __maybe_unused) 442 struct perf_env *env __maybe_unused,
443 bool warn_lost_event __maybe_unused)
442{ 444{
443 return 0; 445 return 0;
444} 446}
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 05076e683938..91531a7c8fbf 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -22,29 +22,27 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
22 22
23/* When check_messup is true, 'end' must points to a good entry */ 23/* When check_messup is true, 'end' must points to a good entry */
24static union perf_event *perf_mmap__read(struct perf_mmap *map, 24static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 u64 start, u64 end, u64 *prev) 25 u64 *startp, u64 end)
26{ 26{
27 unsigned char *data = map->base + page_size; 27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL; 28 union perf_event *event = NULL;
29 int diff = end - start; 29 int diff = end - *startp;
30 30
31 if (diff >= (int)sizeof(event->header)) { 31 if (diff >= (int)sizeof(event->header)) {
32 size_t size; 32 size_t size;
33 33
34 event = (union perf_event *)&data[start & map->mask]; 34 event = (union perf_event *)&data[*startp & map->mask];
35 size = event->header.size; 35 size = event->header.size;
36 36
37 if (size < sizeof(event->header) || diff < (int)size) { 37 if (size < sizeof(event->header) || diff < (int)size)
38 event = NULL; 38 return NULL;
39 goto broken_event;
40 }
41 39
42 /* 40 /*
43 * Event straddles the mmap boundary -- header should always 41 * Event straddles the mmap boundary -- header should always
44 * be inside due to u64 alignment of output. 42 * be inside due to u64 alignment of output.
45 */ 43 */
46 if ((start & map->mask) + size != ((start + size) & map->mask)) { 44 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
47 unsigned int offset = start; 45 unsigned int offset = *startp;
48 unsigned int len = min(sizeof(*event), size), cpy; 46 unsigned int len = min(sizeof(*event), size), cpy;
49 void *dst = map->event_copy; 47 void *dst = map->event_copy;
50 48
@@ -59,20 +57,19 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
59 event = (union perf_event *)map->event_copy; 57 event = (union perf_event *)map->event_copy;
60 } 58 }
61 59
62 start += size; 60 *startp += size;
63 } 61 }
64 62
65broken_event:
66 if (prev)
67 *prev = start;
68
69 return event; 63 return event;
70} 64}
71 65
66/*
67 * legacy interface for mmap read.
68 * Don't use it. Use perf_mmap__read_event().
69 */
72union perf_event *perf_mmap__read_forward(struct perf_mmap *map) 70union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
73{ 71{
74 u64 head; 72 u64 head;
75 u64 old = map->prev;
76 73
77 /* 74 /*
78 * Check if event was unmapped due to a POLLHUP/POLLERR. 75 * Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -82,13 +79,26 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
82 79
83 head = perf_mmap__read_head(map); 80 head = perf_mmap__read_head(map);
84 81
85 return perf_mmap__read(map, old, head, &map->prev); 82 return perf_mmap__read(map, &map->prev, head);
86} 83}
87 84
88union perf_event *perf_mmap__read_backward(struct perf_mmap *map) 85/*
86 * Read event from ring buffer one by one.
87 * Return one event for each call.
88 *
89 * Usage:
90 * perf_mmap__read_init()
91 * while(event = perf_mmap__read_event()) {
92 * //process the event
93 * perf_mmap__consume()
94 * }
95 * perf_mmap__read_done()
96 */
97union perf_event *perf_mmap__read_event(struct perf_mmap *map,
98 bool overwrite,
99 u64 *startp, u64 end)
89{ 100{
90 u64 head, end; 101 union perf_event *event;
91 u64 start = map->prev;
92 102
93 /* 103 /*
94 * Check if event was unmapped due to a POLLHUP/POLLERR. 104 * Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -96,40 +106,19 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
96 if (!refcount_read(&map->refcnt)) 106 if (!refcount_read(&map->refcnt))
97 return NULL; 107 return NULL;
98 108
99 head = perf_mmap__read_head(map); 109 if (startp == NULL)
100 if (!head)
101 return NULL; 110 return NULL;
102 111
103 /* 112 /* non-overwirte doesn't pause the ringbuffer */
104 * 'head' pointer starts from 0. Kernel minus sizeof(record) form 113 if (!overwrite)
105 * it each time when kernel writes to it, so in fact 'head' is 114 end = perf_mmap__read_head(map);
106 * negative. 'end' pointer is made manually by adding the size of
107 * the ring buffer to 'head' pointer, means the validate data can
108 * read is the whole ring buffer. If 'end' is positive, the ring
109 * buffer has not fully filled, so we must adjust 'end' to 0.
110 *
111 * However, since both 'head' and 'end' is unsigned, we can't
112 * simply compare 'end' against 0. Here we compare '-head' and
113 * the size of the ring buffer, where -head is the number of bytes
114 * kernel write to the ring buffer.
115 */
116 if (-head < (u64)(map->mask + 1))
117 end = 0;
118 else
119 end = head + map->mask + 1;
120
121 return perf_mmap__read(map, start, end, &map->prev);
122}
123 115
124void perf_mmap__read_catchup(struct perf_mmap *map) 116 event = perf_mmap__read(map, startp, end);
125{
126 u64 head;
127 117
128 if (!refcount_read(&map->refcnt)) 118 if (!overwrite)
129 return; 119 map->prev = *startp;
130 120
131 head = perf_mmap__read_head(map); 121 return event;
132 map->prev = head;
133} 122}
134 123
135static bool perf_mmap__empty(struct perf_mmap *map) 124static bool perf_mmap__empty(struct perf_mmap *map)
@@ -267,41 +256,60 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6
267 return -1; 256 return -1;
268} 257}
269 258
270int perf_mmap__push(struct perf_mmap *md, bool overwrite, 259/*
271 void *to, int push(void *to, void *buf, size_t size)) 260 * Report the start and end of the available data in ringbuffer
261 */
262int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
263 u64 *startp, u64 *endp)
272{ 264{
273 u64 head = perf_mmap__read_head(md); 265 u64 head = perf_mmap__read_head(md);
274 u64 old = md->prev; 266 u64 old = md->prev;
275 u64 end = head, start = old;
276 unsigned char *data = md->base + page_size; 267 unsigned char *data = md->base + page_size;
277 unsigned long size; 268 unsigned long size;
278 void *buf;
279 int rc = 0;
280 269
281 start = overwrite ? head : old; 270 *startp = overwrite ? head : old;
282 end = overwrite ? old : head; 271 *endp = overwrite ? old : head;
283 272
284 if (start == end) 273 if (*startp == *endp)
285 return 0; 274 return -EAGAIN;
286 275
287 size = end - start; 276 size = *endp - *startp;
288 if (size > (unsigned long)(md->mask) + 1) { 277 if (size > (unsigned long)(md->mask) + 1) {
289 if (!overwrite) { 278 if (!overwrite) {
290 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); 279 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
291 280
292 md->prev = head; 281 md->prev = head;
293 perf_mmap__consume(md, overwrite); 282 perf_mmap__consume(md, overwrite);
294 return 0; 283 return -EAGAIN;
295 } 284 }
296 285
297 /* 286 /*
298 * Backward ring buffer is full. We still have a chance to read 287 * Backward ring buffer is full. We still have a chance to read
299 * most of data from it. 288 * most of data from it.
300 */ 289 */
301 if (overwrite_rb_find_range(data, md->mask, head, &start, &end)) 290 if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
302 return -1; 291 return -EINVAL;
303 } 292 }
304 293
294 return 0;
295}
296
297int perf_mmap__push(struct perf_mmap *md, bool overwrite,
298 void *to, int push(void *to, void *buf, size_t size))
299{
300 u64 head = perf_mmap__read_head(md);
301 u64 end, start;
302 unsigned char *data = md->base + page_size;
303 unsigned long size;
304 void *buf;
305 int rc = 0;
306
307 rc = perf_mmap__read_init(md, overwrite, &start, &end);
308 if (rc < 0)
309 return (rc == -EAGAIN) ? 0 : -1;
310
311 size = end - start;
312
305 if ((start & md->mask) + size != (end & md->mask)) { 313 if ((start & md->mask) + size != (end & md->mask)) {
306 buf = &data[start & md->mask]; 314 buf = &data[start & md->mask];
307 size = md->mask + 1 - (start & md->mask); 315 size = md->mask + 1 - (start & md->mask);
@@ -327,3 +335,14 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
327out: 335out:
328 return rc; 336 return rc;
329} 337}
338
339/*
340 * Mandatory for overwrite mode
341 * The direction of overwrite mode is backward.
342 * The last perf_mmap__read() will set tail to map->prev.
343 * Need to correct the map->prev to head which is the end of next read.
344 */
345void perf_mmap__read_done(struct perf_mmap *map)
346{
347 map->prev = perf_mmap__read_head(map);
348}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e43d7b55a55f..ec7d3a24e276 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map);
65 65
66void perf_mmap__consume(struct perf_mmap *map, bool overwrite); 66void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
67 67
68void perf_mmap__read_catchup(struct perf_mmap *md);
69
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 68static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{ 69{
72 struct perf_event_mmap_page *pc = mm->base; 70 struct perf_event_mmap_page *pc = mm->base;
@@ -87,11 +85,17 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
87} 85}
88 86
89union perf_event *perf_mmap__read_forward(struct perf_mmap *map); 87union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
90union perf_event *perf_mmap__read_backward(struct perf_mmap *map); 88
89union perf_event *perf_mmap__read_event(struct perf_mmap *map,
90 bool overwrite,
91 u64 *startp, u64 end);
91 92
92int perf_mmap__push(struct perf_mmap *md, bool backward, 93int perf_mmap__push(struct perf_mmap *md, bool backward,
93 void *to, int push(void *to, void *buf, size_t size)); 94 void *to, int push(void *to, void *buf, size_t size));
94 95
95size_t perf_mmap__mmap_len(struct perf_mmap *map); 96size_t perf_mmap__mmap_len(struct perf_mmap *map);
96 97
98int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
99 u64 *startp, u64 *endp);
100void perf_mmap__read_done(struct perf_mmap *map);
97#endif /*__PERF_MMAP_H */ 101#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 443892dabedb..1019bbc5dbd8 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -340,35 +340,15 @@ size_t hex_width(u64 v)
340 return n; 340 return n;
341} 341}
342 342
343static int hex(char ch)
344{
345 if ((ch >= '0') && (ch <= '9'))
346 return ch - '0';
347 if ((ch >= 'a') && (ch <= 'f'))
348 return ch - 'a' + 10;
349 if ((ch >= 'A') && (ch <= 'F'))
350 return ch - 'A' + 10;
351 return -1;
352}
353
354/* 343/*
355 * While we find nice hex chars, build a long_val. 344 * While we find nice hex chars, build a long_val.
356 * Return number of chars processed. 345 * Return number of chars processed.
357 */ 346 */
358int hex2u64(const char *ptr, u64 *long_val) 347int hex2u64(const char *ptr, u64 *long_val)
359{ 348{
360 const char *p = ptr; 349 char *p;
361 *long_val = 0;
362
363 while (*p) {
364 const int hex_val = hex(*p);
365 350
366 if (hex_val < 0) 351 *long_val = strtoull(ptr, &p, 16);
367 break;
368
369 *long_val = (*long_val << 4) | hex_val;
370 p++;
371 }
372 352
373 return p - ptr; 353 return p - ptr;
374} 354}
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index a1883bbb0144..2cccbba64418 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -56,9 +56,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
56# to compile vs uClibc, that can be done here as well. 56# to compile vs uClibc, that can be done here as well.
57CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- 57CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
58CROSS_COMPILE ?= $(CROSS) 58CROSS_COMPILE ?= $(CROSS)
59CC = $(CROSS_COMPILE)gcc
60LD = $(CROSS_COMPILE)gcc
61STRIP = $(CROSS_COMPILE)strip
62HOSTCC = gcc 59HOSTCC = gcc
63 60
64# check if compiler option is supported 61# check if compiler option is supported
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index fcb3ed0be5f8..dd614463d4d6 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -42,6 +42,24 @@ EXTRA_WARNINGS += -Wformat
42 42
43CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?) 43CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
44 44
45# Makefiles suck: This macro sets a default value of $(2) for the
46# variable named by $(1), unless the variable has been set by
47# environment or command line. This is necessary for CC and AR
48# because make sets default values, so the simpler ?= approach
49# won't work as expected.
50define allow-override
51 $(if $(or $(findstring environment,$(origin $(1))),\
52 $(findstring command line,$(origin $(1)))),,\
53 $(eval $(1) = $(2)))
54endef
55
56# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
57$(call allow-override,CC,$(CROSS_COMPILE)gcc)
58$(call allow-override,AR,$(CROSS_COMPILE)ar)
59$(call allow-override,LD,$(CROSS_COMPILE)ld)
60$(call allow-override,CXX,$(CROSS_COMPILE)g++)
61$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
62
45ifeq ($(CC_NO_CLANG), 1) 63ifeq ($(CC_NO_CLANG), 1)
46EXTRA_WARNINGS += -Wstrict-aliasing=3 64EXTRA_WARNINGS += -Wstrict-aliasing=3
47endif 65endif
diff --git a/tools/spi/Makefile b/tools/spi/Makefile
index 90615e10c79a..815d15589177 100644
--- a/tools/spi/Makefile
+++ b/tools/spi/Makefile
@@ -11,8 +11,6 @@ endif
11# (this improves performance and avoids hard-to-debug behaviour); 11# (this improves performance and avoids hard-to-debug behaviour);
12MAKEFLAGS += -r 12MAKEFLAGS += -r
13 13
14CC = $(CROSS_COMPILE)gcc
15LD = $(CROSS_COMPILE)ld
16CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include 14CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
17 15
18ALL_TARGETS := spidev_test spidev_fdx 16ALL_TARGETS := spidev_test spidev_fdx
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index cc15af2e54fe..9cf83f895d98 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -11,3 +11,4 @@ test_progs
11test_tcpbpf_user 11test_tcpbpf_user
12test_verifier_log 12test_verifier_log
13feature 13feature
14test_libbpf_open
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 436c4c72414f..9e03a4c356a4 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data)
126 fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, 126 fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
127 2, map_flags); 127 2, map_flags);
128 if (fd < 0) { 128 if (fd < 0) {
129 if (errno == ENOMEM)
130 return;
129 printf("Failed to create hashmap key=%d value=%d '%s'\n", 131 printf("Failed to create hashmap key=%d value=%d '%s'\n",
130 i, j, strerror(errno)); 132 i, j, strerror(errno));
131 exit(1); 133 exit(1);
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
index 57119ad57a3f..3e645ee41ed5 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -5,7 +5,6 @@
5#include <linux/if_ether.h> 5#include <linux/if_ether.h>
6#include <linux/if_packet.h> 6#include <linux/if_packet.h>
7#include <linux/ip.h> 7#include <linux/ip.h>
8#include <linux/in6.h>
9#include <linux/types.h> 8#include <linux/types.h>
10#include <linux/socket.h> 9#include <linux/socket.h>
11#include <linux/tcp.h> 10#include <linux/tcp.h>
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index c0f16e93f9bd..c73592fa3d41 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2587,6 +2587,32 @@ static struct bpf_test tests[] = {
2587 .result = ACCEPT, 2587 .result = ACCEPT,
2588 }, 2588 },
2589 { 2589 {
2590 "runtime/jit: pass negative index to tail_call",
2591 .insns = {
2592 BPF_MOV64_IMM(BPF_REG_3, -1),
2593 BPF_LD_MAP_FD(BPF_REG_2, 0),
2594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 BPF_FUNC_tail_call),
2596 BPF_MOV64_IMM(BPF_REG_0, 0),
2597 BPF_EXIT_INSN(),
2598 },
2599 .fixup_prog = { 1 },
2600 .result = ACCEPT,
2601 },
2602 {
2603 "runtime/jit: pass > 32bit index to tail_call",
2604 .insns = {
2605 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2606 BPF_LD_MAP_FD(BPF_REG_2, 0),
2607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2608 BPF_FUNC_tail_call),
2609 BPF_MOV64_IMM(BPF_REG_0, 0),
2610 BPF_EXIT_INSN(),
2611 },
2612 .fixup_prog = { 2 },
2613 .result = ACCEPT,
2614 },
2615 {
2590 "stack pointer arithmetic", 2616 "stack pointer arithmetic",
2591 .insns = { 2617 .insns = {
2592 BPF_MOV64_IMM(BPF_REG_1, 4), 2618 BPF_MOV64_IMM(BPF_REG_1, 4),
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index a5276a91dfbf..0862e6f47a38 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/
5CFLAGS += -I../../../../usr/include/ 5CFLAGS += -I../../../../usr/include/
6 6
7TEST_PROGS := run_tests.sh 7TEST_PROGS := run_tests.sh
8TEST_FILES := run_fuse_test.sh
8TEST_GEN_FILES := memfd_test fuse_mnt fuse_test 9TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
9 10
10fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) 11fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 39fd362415cf..0f2698f9fd6d 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -57,7 +57,7 @@ volatile int gotsig;
57 57
58void sighandler(int sig, siginfo_t *info, void *ctx) 58void sighandler(int sig, siginfo_t *info, void *ctx)
59{ 59{
60 struct ucontext *ucp = ctx; 60 ucontext_t *ucp = ctx;
61 61
62 if (!testing) { 62 if (!testing) {
63 signal(sig, SIG_DFL); 63 signal(sig, SIG_DFL);
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 0b457e8e0f0c..5df609950a66 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -141,6 +141,15 @@ struct seccomp_data {
141#define SECCOMP_FILTER_FLAG_LOG 2 141#define SECCOMP_FILTER_FLAG_LOG 2
142#endif 142#endif
143 143
144#ifndef PTRACE_SECCOMP_GET_METADATA
145#define PTRACE_SECCOMP_GET_METADATA 0x420d
146
147struct seccomp_metadata {
148 __u64 filter_off; /* Input: which filter */
149 __u64 flags; /* Output: filter's flags */
150};
151#endif
152
144#ifndef seccomp 153#ifndef seccomp
145int seccomp(unsigned int op, unsigned int flags, void *args) 154int seccomp(unsigned int op, unsigned int flags, void *args)
146{ 155{
@@ -2845,6 +2854,58 @@ TEST(get_action_avail)
2845 EXPECT_EQ(errno, EOPNOTSUPP); 2854 EXPECT_EQ(errno, EOPNOTSUPP);
2846} 2855}
2847 2856
2857TEST(get_metadata)
2858{
2859 pid_t pid;
2860 int pipefd[2];
2861 char buf;
2862 struct seccomp_metadata md;
2863
2864 ASSERT_EQ(0, pipe(pipefd));
2865
2866 pid = fork();
2867 ASSERT_GE(pid, 0);
2868 if (pid == 0) {
2869 struct sock_filter filter[] = {
2870 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2871 };
2872 struct sock_fprog prog = {
2873 .len = (unsigned short)ARRAY_SIZE(filter),
2874 .filter = filter,
2875 };
2876
2877 /* one with log, one without */
2878 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
2879 SECCOMP_FILTER_FLAG_LOG, &prog));
2880 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
2881
2882 ASSERT_EQ(0, close(pipefd[0]));
2883 ASSERT_EQ(1, write(pipefd[1], "1", 1));
2884 ASSERT_EQ(0, close(pipefd[1]));
2885
2886 while (1)
2887 sleep(100);
2888 }
2889
2890 ASSERT_EQ(0, close(pipefd[1]));
2891 ASSERT_EQ(1, read(pipefd[0], &buf, 1));
2892
2893 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
2894 ASSERT_EQ(pid, waitpid(pid, NULL, 0));
2895
2896 md.filter_off = 0;
2897 ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
2898 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
2899 EXPECT_EQ(md.filter_off, 0);
2900
2901 md.filter_off = 1;
2902 ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
2903 EXPECT_EQ(md.flags, 0);
2904 EXPECT_EQ(md.filter_off, 1);
2905
2906 ASSERT_EQ(0, kill(pid, SIGKILL));
2907}
2908
2848/* 2909/*
2849 * TODO: 2910 * TODO:
2850 * - add microbenchmarks 2911 * - add microbenchmarks
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 10ca46df1449..d744991c0f4f 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,16 +5,26 @@ include ../lib.mk
5 5
6.PHONY: all all_32 all_64 warn_32bit_failure clean 6.PHONY: all all_32 all_64 warn_32bit_failure clean
7 7
8TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ 8UNAME_M := $(shell uname -m)
9 check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ 9CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
10CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
11
12TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
13 check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
10 protection_keys test_vdso test_vsyscall 14 protection_keys test_vdso test_vsyscall
11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 15TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
12 test_FCMOV test_FCOMI test_FISTTP \ 16 test_FCMOV test_FCOMI test_FISTTP \
13 vdso_restorer 17 vdso_restorer
14TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl 18TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
19# Some selftests require 32bit support enabled also on 64bit systems
20TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
15 21
16TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) 22TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
17TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) 23TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
24ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
25TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
26endif
27
18BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) 28BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
19BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) 29BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
20 30
@@ -23,10 +33,6 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
23 33
24CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie 34CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
25 35
26UNAME_M := $(shell uname -m)
27CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
28CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
29
30define gen-target-rule-32 36define gen-target-rule-32
31$(1) $(1)_32: $(OUTPUT)/$(1)_32 37$(1) $(1)_32: $(OUTPUT)/$(1)_32
32.PHONY: $(1) $(1)_32 38.PHONY: $(1) $(1)_32
@@ -40,12 +46,14 @@ endef
40ifeq ($(CAN_BUILD_I386),1) 46ifeq ($(CAN_BUILD_I386),1)
41all: all_32 47all: all_32
42TEST_PROGS += $(BINARIES_32) 48TEST_PROGS += $(BINARIES_32)
49EXTRA_CFLAGS += -DCAN_BUILD_32
43$(foreach t,$(TARGETS_C_32BIT_ALL),$(eval $(call gen-target-rule-32,$(t)))) 50$(foreach t,$(TARGETS_C_32BIT_ALL),$(eval $(call gen-target-rule-32,$(t))))
44endif 51endif
45 52
46ifeq ($(CAN_BUILD_X86_64),1) 53ifeq ($(CAN_BUILD_X86_64),1)
47all: all_64 54all: all_64
48TEST_PROGS += $(BINARIES_64) 55TEST_PROGS += $(BINARIES_64)
56EXTRA_CFLAGS += -DCAN_BUILD_64
49$(foreach t,$(TARGETS_C_64BIT_ALL),$(eval $(call gen-target-rule-64,$(t)))) 57$(foreach t,$(TARGETS_C_64BIT_ALL),$(eval $(call gen-target-rule-64,$(t))))
50endif 58endif
51 59
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index ec0f6b45ce8b..9c0325e1ea68 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
315 return si->si_upper; 315 return si->si_upper;
316} 316}
317#else 317#else
318
319/*
320 * This deals with old version of _sigfault in some distros:
321 *
322
323old _sigfault:
324 struct {
325 void *si_addr;
326 } _sigfault;
327
328new _sigfault:
329 struct {
330 void __user *_addr;
331 int _trapno;
332 short _addr_lsb;
333 union {
334 struct {
335 void __user *_lower;
336 void __user *_upper;
337 } _addr_bnd;
338 __u32 _pkey;
339 };
340 } _sigfault;
341 *
342 */
343
318static inline void **__si_bounds_hack(siginfo_t *si) 344static inline void **__si_bounds_hack(siginfo_t *si)
319{ 345{
320 void *sigfault = &si->_sifields._sigfault; 346 void *sigfault = &si->_sifields._sigfault;
321 void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault); 347 void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
322 void **__si_lower = end_sigfault; 348 int *trapno = (int*)end_sigfault;
349 /* skip _trapno and _addr_lsb */
350 void **__si_lower = (void**)(trapno + 2);
323 351
324 return __si_lower; 352 return __si_lower;
325} 353}
@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
331 359
332static inline void *__si_bounds_upper(siginfo_t *si) 360static inline void *__si_bounds_upper(siginfo_t *si)
333{ 361{
334 return (*__si_bounds_hack(si)) + sizeof(void *); 362 return *(__si_bounds_hack(si) + 1);
335} 363}
336#endif 364#endif
337 365
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bc1b0735bb50..f15aa5a76fe3 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
393 return forkret; 393 return forkret;
394} 394}
395 395
396void davecmp(void *_a, void *_b, int len)
397{
398 int i;
399 unsigned long *a = _a;
400 unsigned long *b = _b;
401
402 for (i = 0; i < len / sizeof(*a); i++) {
403 if (a[i] == b[i])
404 continue;
405
406 dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
407 }
408}
409
410void dumpit(char *f)
411{
412 int fd = open(f, O_RDONLY);
413 char buf[100];
414 int nr_read;
415
416 dprintf2("maps fd: %d\n", fd);
417 do {
418 nr_read = read(fd, &buf[0], sizeof(buf));
419 write(1, buf, nr_read);
420 } while (nr_read > 0);
421 close(fd);
422}
423
424#define PKEY_DISABLE_ACCESS 0x1 396#define PKEY_DISABLE_ACCESS 0x1
425#define PKEY_DISABLE_WRITE 0x2 397#define PKEY_DISABLE_WRITE 0x2
426 398
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index a48da95c18fd..ddfdd635de16 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -119,7 +119,9 @@ static void check_result(void)
119 119
120int main() 120int main()
121{ 121{
122#ifdef CAN_BUILD_32
122 int tmp; 123 int tmp;
124#endif
123 125
124 sethandler(SIGTRAP, sigtrap, 0); 126 sethandler(SIGTRAP, sigtrap, 0);
125 127
@@ -139,12 +141,13 @@ int main()
139 : : "c" (post_nop) : "r11"); 141 : : "c" (post_nop) : "r11");
140 check_result(); 142 check_result();
141#endif 143#endif
142 144#ifdef CAN_BUILD_32
143 printf("[RUN]\tSet TF and check int80\n"); 145 printf("[RUN]\tSet TF and check int80\n");
144 set_eflags(get_eflags() | X86_EFLAGS_TF); 146 set_eflags(get_eflags() | X86_EFLAGS_TF);
145 asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid) 147 asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
146 : INT80_CLOBBERS); 148 : INT80_CLOBBERS);
147 check_result(); 149 check_result();
150#endif
148 151
149 /* 152 /*
150 * This test is particularly interesting if fast syscalls use 153 * This test is particularly interesting if fast syscalls use
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index bf0d687c7db7..64f11c8d9b76 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
90 vdso_size += PAGE_SIZE; 90 vdso_size += PAGE_SIZE;
91 } 91 }
92 92
93#ifdef __i386__
93 /* Glibc is likely to explode now - exit with raw syscall */ 94 /* Glibc is likely to explode now - exit with raw syscall */
94 asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret)); 95 asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
96#else /* __x86_64__ */
97 syscall(SYS_exit, ret);
98#endif
95 } else { 99 } else {
96 int status; 100 int status;
97 101
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 29973cde06d3..235259011704 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -26,20 +26,59 @@
26# endif 26# endif
27#endif 27#endif
28 28
29/* max length of lines in /proc/self/maps - anything longer is skipped here */
30#define MAPS_LINE_LEN 128
31
29int nerrs = 0; 32int nerrs = 0;
30 33
34typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
35
36getcpu_t vgetcpu;
37getcpu_t vdso_getcpu;
38
39static void *vsyscall_getcpu(void)
40{
31#ifdef __x86_64__ 41#ifdef __x86_64__
32# define VSYS(x) (x) 42 FILE *maps;
43 char line[MAPS_LINE_LEN];
44 bool found = false;
45
46 maps = fopen("/proc/self/maps", "r");
47 if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
48 return NULL;
49
50 while (fgets(line, MAPS_LINE_LEN, maps)) {
51 char r, x;
52 void *start, *end;
53 char name[MAPS_LINE_LEN];
54
55 /* sscanf() is safe here as strlen(name) >= strlen(line) */
56 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
57 &start, &end, &r, &x, name) != 5)
58 continue;
59
60 if (strcmp(name, "[vsyscall]"))
61 continue;
62
63 /* assume entries are OK, as we test vDSO here not vsyscall */
64 found = true;
65 break;
66 }
67
68 fclose(maps);
69
70 if (!found) {
71 printf("Warning: failed to find vsyscall getcpu\n");
72 return NULL;
73 }
74 return (void *) (0xffffffffff600800);
33#else 75#else
34# define VSYS(x) 0 76 return NULL;
35#endif 77#endif
78}
36 79
37typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
38
39const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
40getcpu_t vdso_getcpu;
41 80
42void fill_function_pointers() 81static void fill_function_pointers()
43{ 82{
44 void *vdso = dlopen("linux-vdso.so.1", 83 void *vdso = dlopen("linux-vdso.so.1",
45 RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); 84 RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
@@ -54,6 +93,8 @@ void fill_function_pointers()
54 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); 93 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
55 if (!vdso_getcpu) 94 if (!vdso_getcpu)
56 printf("Warning: failed to find getcpu in vDSO\n"); 95 printf("Warning: failed to find getcpu in vDSO\n");
96
97 vgetcpu = (getcpu_t) vsyscall_getcpu();
57} 98}
58 99
59static long sys_getcpu(unsigned * cpu, unsigned * node, 100static long sys_getcpu(unsigned * cpu, unsigned * node,
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 7a744fa7b786..be81621446f0 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -33,6 +33,9 @@
33# endif 33# endif
34#endif 34#endif
35 35
36/* max length of lines in /proc/self/maps - anything longer is skipped here */
37#define MAPS_LINE_LEN 128
38
36static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 39static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
37 int flags) 40 int flags)
38{ 41{
@@ -98,7 +101,7 @@ static int init_vsys(void)
98#ifdef __x86_64__ 101#ifdef __x86_64__
99 int nerrs = 0; 102 int nerrs = 0;
100 FILE *maps; 103 FILE *maps;
101 char line[128]; 104 char line[MAPS_LINE_LEN];
102 bool found = false; 105 bool found = false;
103 106
104 maps = fopen("/proc/self/maps", "r"); 107 maps = fopen("/proc/self/maps", "r");
@@ -108,10 +111,12 @@ static int init_vsys(void)
108 return 0; 111 return 0;
109 } 112 }
110 113
111 while (fgets(line, sizeof(line), maps)) { 114 while (fgets(line, MAPS_LINE_LEN, maps)) {
112 char r, x; 115 char r, x;
113 void *start, *end; 116 void *start, *end;
114 char name[128]; 117 char name[MAPS_LINE_LEN];
118
119 /* sscanf() is safe here as strlen(name) >= strlen(line) */
115 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", 120 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
116 &start, &end, &r, &x, name) != 5) 121 &start, &end, &r, &x, name) != 5)
117 continue; 122 continue;
diff --git a/tools/usb/Makefile b/tools/usb/Makefile
index 4e6506078494..01d758d73b6d 100644
--- a/tools/usb/Makefile
+++ b/tools/usb/Makefile
@@ -1,7 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for USB tools 2# Makefile for USB tools
3 3
4CC = $(CROSS_COMPILE)gcc
5PTHREAD_LIBS = -lpthread 4PTHREAD_LIBS = -lpthread
6WARNINGS = -Wall -Wextra 5WARNINGS = -Wall -Wextra
7CFLAGS = $(WARNINGS) -g -I../include 6CFLAGS = $(WARNINGS) -g -I../include
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index be320b905ea7..20f6cf04377f 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -6,7 +6,6 @@ TARGETS=page-types slabinfo page_owner_sort
6LIB_DIR = ../lib/api 6LIB_DIR = ../lib/api
7LIBS = $(LIB_DIR)/libapi.a 7LIBS = $(LIB_DIR)/libapi.a
8 8
9CC = $(CROSS_COMPILE)gcc
10CFLAGS = -Wall -Wextra -I../lib/ 9CFLAGS = -Wall -Wextra -I../lib/
11LDFLAGS = $(LIBS) 10LDFLAGS = $(LIBS)
12 11
diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile
index e664f1167388..e0e87239126b 100644
--- a/tools/wmi/Makefile
+++ b/tools/wmi/Makefile
@@ -2,7 +2,6 @@ PREFIX ?= /usr
2SBINDIR ?= sbin 2SBINDIR ?= sbin
3INSTALL ?= install 3INSTALL ?= install
4CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include 4CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
5CC = $(CROSS_COMPILE)gcc
6 5
7TARGET = dell-smbios-example 6TARGET = dell-smbios-example
8 7